repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
NMTGMinor
|
NMTGMinor-master/pretrain_module/configuration_bert.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT model configuration """
import logging
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
"bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
"bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json",
"bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json",
"cl-tohoku/bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking/config.json",
"cl-tohoku/bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char/config.json",
"cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking/config.json",
"TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/config.json",
"TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/config.json",
"wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class BertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
It is used to instantiate an BERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30522):
Vocabulary size of the BERT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
hidden_size (:obj:`int`, optional, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, optional, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, optional, defaults to False):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
Example::
"""
model_type = "bert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
gradient_checkpointing=False,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.gradient_checkpointing = gradient_checkpointing
| 8,448
| 64.496124
| 181
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
import torch
from torch import Tensor, device, dtype, nn
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
WEIGHTS_NAME,
)
# from .generation_utils import GenerationMixin
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
def find_pruneable_heads_and_indices(
heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: torch.LongTensor = torch.arange(len(mask))[mask].long()
return heads, index
class ModuleUtilsMixin:
"""
A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
Returns:
:obj:`int`: The number of parameters.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
"""
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
zero with :obj:`model.reset_memory_hooks_state()`.
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
"""
Reset the :obj:`mem_rss_diff` attribute of each module (see
:func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
"""
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self) -> device:
"""
:obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def dtype(self) -> dtype:
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
if self.dtype == torch.float16:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
elif self.dtype == torch.float32:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
raise ValueError(
"{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format(
self.dtype
)
)
return encoder_extended_attention_mask
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(
self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
) -> Tensor:
"""
Prepare the head mask if needed.
Args:
head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (:obj:`int`):
The number of hidden layers in the model.
is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
:obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]`
or list with :obj:`[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility
return head_mask
# class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin):
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r"""
Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a
PyTorch model, taking as arguments:
- **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
TensorFlow checkpoint.
- **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated
to the model.
- **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
- **authorized_missing_keys** (:obj:`Optional[List[str]]`) -- A list of re pattern of tensor names to ignore
when loading the model (and avoid unnecessary warnings).
"""
config_class = None
base_model_prefix = ""
authorized_missing_keys = None
_auto_class = None
_no_split_modules = None
_keep_in_fp32_modules = None
# a list of `re` patterns of `state_dict` keys that should be removed from the list of missing
# keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings.
_keys_to_ignore_on_load_missing = None
# a list of `re` patterns of `state_dict` keys that should be removed from the list of
# unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary
# warnings.
_keys_to_ignore_on_load_unexpected = None
# a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't
# trained, but which are either deterministic or tied variables)
_keys_to_ignore_on_save = None
is_parallelizable = False
supports_gradient_checkpointing = False
@property
def dummy_inputs(self) -> Dict[str, torch.Tensor]:
"""
:obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@property
def base_model(self) -> nn.Module:
"""
:obj:`torch.nn.Module`: The main body of the model.
"""
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self) -> nn.Module:
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def post_init(self):
"""
A method executed at the end of each Transformer model initialization, to execute code that needs the model's
modules properly initialized (such as weight initialization).
"""
self.init_weights()
# since we never use this crap?
# self._backward_compatibility_gradient_checkpointing()
def _backward_compatibility_gradient_checkpointing(self):
if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False):
self.gradient_checkpointing_enable()
# Remove the attribute now that is has been consumed, so it's no saved in the config.
delattr(self.config, "gradient_checkpointing")
def gradient_checkpointing_enable(self):
"""
Activates gradient checkpointing for the current model.
Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint
activations".
"""
if not self.supports_gradient_checkpointing:
raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
self.apply(partial(self._set_gradient_checkpointing, value=True))
def set_input_embeddings(self, value: nn.Module):
"""
Set model's input embeddings
Args:
value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self) -> nn.Module:
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
print("Tie the weights between the input embeddings and the output embeddings is done in ptetrained model")
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of whether we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model wihtout doing
anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
# self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(
self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
) -> torch.nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.nn.Embedding`` module of the model wihtout doing anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
"""
Initializes and prunes weights if needed.
"""
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list
of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will
prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
print("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False):
import torch_xla.core.xla_model as xm
if xm.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# xm.save takes care of saving only from master
xm.save(model_to_save.state_dict(), output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(model_to_save.state_dict(), output_model_file)
print("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, state_dict, *model_args, **kwargs):
state_dict = state_dict
model = kwargs.pop("model", None)
output_loading_info = kwargs.pop("output_loading_info", False)
model_prefix = kwargs.pop("model_prefix", False)
missing_keys = []
unexpected_keys = []
error_msgs = []
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
has_prefix_module = any(s.startswith(model_prefix) for s in state_dict.keys())
if not hasattr(model, model_prefix) and has_prefix_module:
start_prefix = model_prefix + "."
if hasattr(model, model_prefix) and not has_prefix_module:
model_to_load = getattr(model, model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(start_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if cls.authorized_missing_keys is not None:
for pat in cls.authorized_missing_keys:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if output_loading_info:
if len(missing_keys) > 0:
print("Some weights of the model were not initialized from the pretrained model")
print("missing_keys:", missing_keys)
else:
print("All the weights of the model were initialized from the pretrained model")
if len(unexpected_keys) > 0:
print("Some weights of the pretrained model were not used")
print("unexpected_keys:", unexpected_keys)
else:
print("All the weights of the pretrained model checkpoint were used")
if len(error_msgs) > 0:
print("error_msgs:", error_msgs)
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
# When we use it as an Encoder ,no weight to tie
model.tie_weights() # make sure token embedding weights are still tied if needed
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
return model
| 31,118
| 44.830633
| 128
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/tokenization.py
|
import onmt.markdown
import argparse
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-data_file', default="",
help="Path to the data")
parser.add_argument('-plm_vocab_file', default="", type=str,
help="""Path of vocab file""")
parser.add_argument('-lang', default='en',
help='language [en|ch|others]')
parser.add_argument('-plm_type', default='roberta',
help='pretrain_mode [roberta|bert|others]')
parser.add_argument('-pretrain_tokenizer', default='roberta-base',
help='which tokenizer is used')
parser.add_argument('-add_special_tok', action='store_true',
help="""add special tokens at the beginning and at the end of each sentence""")
parser.add_argument('-special_bos', default='<s>',
help='special bos token.')
parser.add_argument('-special_eos', default='</s>',
help='special eso token.')
opt = parser.parse_args()
def make_vocab_dict(vocab_file, lang):
"""Loads a vocabulary file into a dictionary."""
index = 0
vocab = open(vocab_file, "r")
word2idx = open(opt.plm_type + "_word2idx."+lang, "w")
idx2word = open(opt.plm_type + "_idx2word."+lang, "w")
while True:
word = vocab.readline()
# the last line
if not word:
break
word = word.strip()
idx2word.write(str(index) + " " + word + "\n")
word2idx.write(word + " " + str(index) + "\n")
index += 1
vocab.close()
word2idx.close()
idx2word.close()
def tokenize_data(raw_data, tokenizer):
with open(raw_data, "r", encoding="utf-8") as f_raw:
tokenized_sents = []
for line in f_raw:
sent = line.strip()
tokenized_sent = tokenizer.tokenize(sent)
if opt.add_special_tok:
tokenized_sent.insert(0, opt.special_bos)
tokenized_sent.append(opt.special_eos)
tokenized_sents.append(tokenized_sent)
new_data = raw_data + "." + opt.plm_type + ".tok"
with open(new_data, "w", encoding="utf-8") as f_tok:
for sent in tokenized_sents:
sent = " ".join(sent)
f_tok.write(sent)
f_tok.write('\n')
def main():
# step1: make dictionary
make_vocab_dict(opt.plm_vocab_file, opt.lang)
# step2: tokenization
if opt.plm_type == "bert":
from pytorch_pretrained_bert import BertTokenizer
# "en": bert-base-uncased "ch": bert-base-chinese
tokenizer = BertTokenizer.from_pretrained(opt.pretrain_tokenizer)
elif opt.plm_type == "roberta":
from pretrain_module.roberta_tokenization_ch import FullTokenizer
from transformers import RobertaTokenizer
# "en": roberta-base
if opt.lang != "ch":
tokenizer = RobertaTokenizer.from_pretrained(opt.pretrain_tokenizer)
else:
tokenizer = FullTokenizer(opt.plm_vocab_file)
elif opt.plm_type.lower() == "bart":
from transformers import BartTokenizer
tokenizer = BartTokenizer.from_pretrained(opt.pretrain_tokenizer)
else:
print("Tokenization with this pretrained model is not supported right now.")
exit(-1)
if opt.data_file != "":
tokenize_data(opt.data_file, tokenizer)
if __name__ == "__main__":
main()
| 3,440
| 34.112245
| 99
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_deltalm.py
|
# coding=utf-8
# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch MBART model. """
import copy
import math
import random
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import numpy as np
from torch.nn import CrossEntropyLoss, MSELoss
from onmt.modules.layer_norm import LayerNorm
from onmt.modules.optimized.self_attention_func import self_attn_func
from onmt.modules.optimized.encdec_attention_func_bias import encdec_attn_bias_func
from onmt.modules.dropout import embedded_dropout
from onmt.modules.optimized.dropout_add import fused_dropout_add
from onmt.modules.optimized.linear import linear_function
from torch.cuda.amp import custom_fwd, custom_bwd
from .activations import ACT2FN
from .modeling_outputs import (
BaseModelOutput,
)
from .modeling_utils import PreTrainedModel
from .modeling_mbart import MBartLearnedPositionalEmbedding, MBartAttention, MBartCrossAttention
# from ...utils import logging
# from .configuration_bart import BartConfig
import onmt
from collections import defaultdict
from .configuration_deltalm import DeltaLMConfig
_CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
_CONFIG_FOR_DOC = "DeltaLMConfig"
_TOKENIZER_FOR_DOC = "MBartTokenizer"
MBART_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/mbart-large-cc25",
# See all MBART models at https://huggingface.co/models?filter=mbart
]
class IndexCopy(torch.autograd.Function):
"""
This function is kinda similar to rnn pad_packed_sequence
It remaps nonpadded values for a (N-1)-d tensor into a (N)-d tensor
"""
@staticmethod
@custom_fwd
def forward(ctx, input, non_pad_indices, total_batch_size):
"""
:param ctx:
:param input: 2D [bsz x ... ] bsz is the total number of elements after unpadding
:param non_pad_indices: bsz * seq_len
:param total_batch_size: (int) bsz * seq_len (before unpadding) > bsz
:return:
In the forward pass we create a new zero tensor and copy the inputs into it based on non_pad_indices
"""
sizes = list(input.size())
sizes[0] = total_batch_size
output = input.new_zeros(*sizes)
output.index_copy_(0, non_pad_indices, input)
ctx.save_for_backward(non_pad_indices)
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grads):
"""
:param ctx:
:param output_grads:
:return:
In the backward pass we simply
"""
non_pad_indices, = ctx.saved_tensors
grad_input = output_grads.index_select(0, non_pad_indices)
return grad_input, None, None
index_copy = IndexCopy.apply
class DeltaLMEncoderLayer(nn.Module):
def __init__(self, config: DeltaLMConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MBartAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = config.activation_dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.activation_fn_name = config.activation_function
self.normalize_before = config.normalize_before
# Optimization
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
from onmt.modules.optimized.fast_mha import fast_bert_mha
self.fast_bert_mha = fast_bert_mha
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: bool = False,
max_len=-1, cu_seqlens=None,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
where padding elements are indicated by very large negative values.
output_attentions (:obj:`bool`, `optional`):
:param output_attentions: Whether or not to return the attentions tensors of all attention layers.
:param attention_mask: `(batch, src_len)`
:param hidden_states: `(seq_len, batch, embed_dim)`
:param cu_seqlens:
:param max_len:
"""
residual = hidden_states
if self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
cu_seqlens=cu_seqlens,
max_len=max_len
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Second block (FFN)
residual = hidden_states
if self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
if self.fused and hidden_states.is_cuda:
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
dropout = self.activation_dropout if self.training else 0.0
hidden_states = self.fused_function(dropout, False, hidden_states, *weights, *biases).type_as(hidden_states)
else:
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class DeltaLMDecoderLayer(nn.Module):
def __init__(self, config: DeltaLMConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MBartAttention( #MBartAutoRegressiveSelfAttentionSLow(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = MBartCrossAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout
)
self.normalize_before = config.normalize_before
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.fc3 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc4 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.ffn_layer_norm = LayerNorm(self.embed_dim)
self.is_factorized = False
self.multiplicative_factorize = False
self.fast_factorize = False
self.ffn_dim = config.decoder_ffn_dim
self.n_languages = -1
self.has_adapter = False
self.adapter_location = -1
# Optimization
self.activation_fn_name = config.activation_function
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
@property
def word_lut(self):
return self.embed_tokens
def freeze_self_attn_params(self):
self.self_attn.q_proj.weight.requires_grad = False
self.self_attn.k_proj.weight.requires_grad = False
self.self_attn.v_proj.weight.requires_grad = False
self.self_attn.out_proj.weight.requires_grad = False
self.self_attn.q_proj.bias.requires_grad = False
self.self_attn.k_proj.bias.requires_grad = False
self.self_attn.v_proj.bias.requires_grad = False
self.self_attn.out_proj.bias.requires_grad = False
def freeze_ffn_params(self):
self.fc1.weight.requires_grad = False
self.fc2.weight.requires_grad = False
self.fc1.bias.requires_grad = False
self.fc2.bias.requires_grad = False
def add_factorize(self, n_languages, rank=4, multiplicative=False, fast=False, dyrank=False, **kwargs):
self.self_attn.add_factorized_weights(n_languages, rank=rank, multiplicative=multiplicative, fast=fast, dyrank=dyrank)
self.encoder_attn.add_factorized_weights(n_languages, rank=rank, multiplicative=multiplicative, fast=fast, dyrank=dyrank)
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
nn.init.normal_(self.r_i, 0.0, 0.02)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.normal_(self.r_o, 0.0, 0.02)
nn.init.normal_(self.s_o, 0.0, 0.02)
if multiplicative:
rank = rank if fast else 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
constant = math.sqrt(1.0 / rank) if fast else 1
nn.init.constant_(self.rm_i, constant)
nn.init.constant_(self.sm_i, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
self.r2_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
self.s2_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.r2_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.s2_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
nn.init.normal_(self.r2_i, 0.0, 0.02)
nn.init.normal_(self.s2_i, 0.0, 0.02)
nn.init.normal_(self.r2_o, 0.0, 0.02)
nn.init.normal_(self.s2_o, 0.0, 0.02)
if multiplicative:
rank = rank if fast else 1
self.rm2_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
self.sm2_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.rm2_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.sm2_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
constant = math.sqrt(1.0 / rank) if fast else 1
nn.init.constant_(self.rm2_i, constant)
nn.init.constant_(self.sm2_i, constant)
nn.init.constant_(self.rm2_o, constant)
nn.init.constant_(self.sm2_o, constant)
def add_adapters(self, n_languages, downsampling_factor=4, adapter_location=1):
"""
:param n_languages: one adapter per language
:param downsampling_factor: downsampling rate size for the hidden layer
:param adapter_location:
:return:
"""
self.n_languages = n_languages
self.has_adapter = True
self.adapter_location = adapter_location
from .adapter import MultilingualAdapter
self.adapter = MultilingualAdapter(n_languages, self.embed_dim, downsample_factor=downsampling_factor)
def get_mlp_weights(self, lang=None, atb=None):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
if lang is not None:
if self.is_factorized:
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.fast_factorize:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight * mul_factor_in
out_weight = out_weight * mul_factor_out
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.fast_factorize:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight + add_factor_in
out_weight = out_weight + add_factor_out
return in_weight, out_weight, in_bias, out_bias
def get_interleaved_mlp_weights(self, lang=None, atb=None):
in_weight = self.fc3.weight
out_weight = self.fc4.weight
in_bias = self.fc3.bias
out_bias = self.fc4.bias
if lang is not None:
if self.is_factorized:
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm2_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm2_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm2_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm2_o, 0, lang).squeeze(0)
if self.fast_factorize:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight * mul_factor_in
out_weight = out_weight * mul_factor_out
r_i = torch.index_select(self.r2_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s2_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r2_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s2_o, 0, lang).squeeze(0)
if self.fast_factorize:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight + add_factor_in
out_weight = out_weight + add_factor_out
return in_weight, out_weight, in_bias, out_bias
def call_mlp(self, x, in_weight, out_weight, in_bias, out_bias, activation_fn, dropout_p, training_,
fused, fused_function):
"""
Move the MLP section to a different function to choose between pytorch and custom mlp
:param x:
:param in_weight:
:param out_weight:
:param in_bias:
:param out_bias:
:param activation_fn:
:param dropout_p:
:param training_:
:param fused:
:param fused_function:
:return:
"""
# TODO: check type x torch.half or torch.float32
if fused and x.is_cuda:
dropout_p_ = dropout_p if training_ else 0.0
weights = [in_weight, out_weight]
biases = [in_bias, out_bias]
x = fused_function(dropout_p_, False, x, *weights, *biases)
else:
x = F.linear(x, in_weight, in_bias)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = F.linear(x, out_weight, out_bias)
return x
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
sub_encoder_hidden_states: Optional[torch.Tensor] = None,
sub_encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
incremental: Optional[bool] = False,
incremental_cache=None,
lang=None, atb=None
):
"""
:param hidden_states:
:param attention_mask:
:param encoder_hidden_states:
:param encoder_attention_mask:
:param sub_encoder_hidden_states:
:param sub_encoder_attention_mask:
:param output_attentions:
:param incremental:
:param incremental_cache:
:param lang:
:param atb:
:return:
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
# hidden_states = hidden_states.transpose(0, 1).contiguous()
residual = hidden_states
if self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
# self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
incremental=incremental, incremental_cache=incremental_cache,
lang=lang, atb=atb
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
###############################################
# Interleaved FFN block
residual = hidden_states
if self.normalize_before:
hidden_states = self.ffn_layer_norm(hidden_states)
in_weight, out_weight, in_bias, out_bias = self.get_interleaved_mlp_weights(lang=lang, atb=atb)
hidden_states = self.call_mlp(hidden_states, in_weight, out_weight, in_bias, out_bias,
self.activation_fn, self.activation_dropout, self.training,
self.fused, self.fused_function)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.ffn_layer_norm(hidden_states)
###############################################
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.normalize_before:
hidden_states = self.encoder_attn_layer_norm(hidden_states)
attention_input = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
# cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, incremental_cache = self.encoder_attn(
hidden_states=attention_input,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
incremental=incremental, incremental_cache=incremental_cache,
lang=lang, atb=atb
)
# perform cross-attention on the sub-hidden states
# if sub_encoder_hidden_states is not None:
# sub_hidden_states, sub_cross_attn_weights, _ = self.encoder_attn(
# hidden_states=attention_input,
# key_value_states=sub_encoder_hidden_states,
# attention_mask=sub_encoder_attention_mask,
# output_attentions=output_attentions,
# incremental=False, incremental_cache=None,
# lang=lang, mixture=mixture
# )
#
# # t x b x h -> sum to 1
# contrastive_loss = F.mse_loss(hidden_states.float(), sub_hidden_states.float(), reduction='none')
#
# else:
contrastive_loss = None
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.encoder_attn_layer_norm(hidden_states)
###############################################
# Fully Connected
residual = hidden_states
if self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, atb=atb)
hidden_states = self.call_mlp(hidden_states, in_weight, out_weight, in_bias, out_bias,
self.activation_fn, self.activation_dropout, self.training,
self.fused, self.fused_function)
# hidden_states = fused_dropout_add(hidden_states, residual, self.dropout, self.training)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
# ADAPTER
if self.has_adapter:
residual = hidden_states
if self.adapter_location == 1:
hidden_states = self.adapter(hidden_states, lang=lang, atb=atb)
hidden_states.add_(residual)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if contrastive_loss is not None:
# print("Return contrastive loss here,", contrastive_loss.size())
outputs += (contrastive_loss, )
return outputs, incremental_cache
class MBartPreTrainedModel(PreTrainedModel):
config_class = DeltaLMConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (MBartDecoder, MBartDecoder)):
module.gradient_checkpointing = value
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
class DeltaLMEncoder(MBartPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`MBartEncoderLayer`.
Args:
config: DeltaLMConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: DeltaLMConfig, opt, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
config.dropout = opt.residual_dropout if opt.residual_dropout > 0 else opt.dropout
config.attention_dropout = opt.attn_dropout
config.activation_dropout = opt.ffn_dropout if opt.ffn_dropout > 0 else opt.dropout
config.layerdrop = opt.death_rate
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.opt = opt
self.word_dropout = opt.word_dropout
embed_dim = config.d_model
self.embed_dim = embed_dim
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
# TODO: check this number
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([DeltaLMEncoderLayer(config) for _ in range(config.encoder_layers)])
# this applies at the beginning of the encoder stack
if config.normalize_embedding:
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = nn.Identity()
# this applies at the end of the encoder stack
if config.normalize_before:
self.layer_norm = LayerNorm(config.d_model)
else:
self.layer_norm = nn.Identity()
self.init_weights()
self.gradient_checkpointing = False
from onmt.modules.optimized.fast_mha import fast_bert_mha
self.fast_bert_mha = fast_bert_mha
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None
):
"""
:param input_ids: [T x B] discrete input tokens
:param attention_mask: [B x T] attention mask (padded = 1, non-pad = 0]
:param inputs_embeds: [T x B x H] optional
:param output_attentions:
:param output_hidden_states:
:param return_dict:
:return:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# print(self.embed_scale, self.layernorm_embedding, self.layer_norm)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
bsz, seq_len = input_ids.size(0), input_ids.size(1)
input_shape = torch.Size([bsz, seq_len])
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = embedded_dropout(self.embed_tokens, input_ids,
dropout=self.word_dropout if self.training else 0)
inputs_embeds = inputs_embeds * self.embed_scale
inputs_embeds = inputs_embeds.view(bsz, seq_len, -1)
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# TODO: use fast bert mha
can_run_fast_bert_mha = False
# check if fast bert mha can be run
seq_len = hidden_states.size(1)
bsz = hidden_states.size(0)
sm = torch.cuda.get_device_capability()
total_bsz = 0
hidden_states = hidden_states.transpose(0, 1).contiguous()
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
max_len=max_len, cu_seqlens=cu_seqlens,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
# if we remove padding before (for fast bert MHA) then remember to put padding back
# to restore the form B x T X H
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
class DeltaLMDecoder(MBartPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`MBartDecoderLayer`
\
Args:
config: DeltaLMConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: DeltaLMConfig, opt, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
config.dropout = opt.residual_dropout if opt.residual_dropout > 0 else opt.dropout
config.activation_dropout = opt.ffn_dropout if opt.ffn_dropout > 0 else opt.dropout
config.attention_dropout = opt.attn_dropout
self.dropout = config.dropout
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([DeltaLMDecoderLayer(config) for _ in range(config.decoder_layers)])
# applies before the decoder stack
if config.normalize_embedding:
self.layernorm_embedding = LayerNorm(config.d_model)
else:
self.layernorm_embedding = nn.Identity()
# applies after the decoder stack
if config.normalize_before:
self.layer_norm = LayerNorm(config.d_model)
else:
self.layer_norm = nn.Identity()
self.init_weights()
self.gradient_checkpointing = False
self.model_size = config.d_model
self.switchout = 0.0
# self.word_lut = self.embed_tokens
self.config.bert_hidden_size = config.d_model
self.layerdrop = opt.death_rate_decoder
self.dec_pretrained_model = 'mbart'
if opt.freeze_embedding:
self.embed_tokens.weight.requires_grad = False
self.word_dropout = opt.word_dropout
# freeze parameters if declared
if opt.freeze_decoder_self_attn:
self.freeze_self_attn_params()
if opt.freeze_decoder_ffn:
self.freeze_ffn_params()
if opt.freeze_decoder:
for p in self.parameters():
p.requires_grad = False
if not opt.freeze_cross_attention:
# but we need to enable the cross attention
for layer in self.layers:
for p in layer.encoder_attn.parameters():
p.requires_grad = True
for p in layer.encoder_attn_layer_norm.parameters():
p.requires_grad = True
if opt.multilingual_factorized_weights_decoder:
print("[INFO] Factorizing MBART model into %d languages" % opt.n_languages)
self.add_factorize(opt.n_languages, rank=opt.mfw_rank,
multiplicative=opt.mfw_multiplicative,
fast=opt.fast_factorize)
# adapter
if opt.decoder_adapter > 0:
print("[INFO] Adding MBART Adapters for %d languages" % opt.n_languages)
for layer in self.layers:
layer.add_adapters(opt.n_languages, adapter_location=opt.decoder_adapter)
def freeze_self_attn_params(self):
self.layer_norm.weight.requires_grad = False
self.layer_norm.bias.requires_grad = False
for layer in self.layers:
layer.freeze_self_attn_params()
def freeze_ffn_params(self):
for layer in self.layers:
layer.freeze_ffn_params()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def add_factorize(self, n_languages, rank=4, multiplicative=False, fast=False):
for layer in self.layers:
layer.add_factorize(n_languages, rank=rank, multiplicative=multiplicative, fast=fast)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
sub_encoder_hidden_states=None,
sub_encoder_attention_mask=None,
inputs_embeds=None,
incremental=False, incremental_cache=None,
lang=None, atb=None,
output_attentions=None,
output_hidden_states=None,
checkpointing_ffn=False,
checkpointing_cross_attn=False,
):
"""
:param atb:
:param input_ids:
:param attention_mask:
:param encoder_hidden_states:
:param encoder_attention_mask:
:param sub_encoder_hidden_states:
:param sub_encoder_attention_mask:
:param inputs_embeds:
:param incremental:
:param incremental_cache:
:param lang:
:param output_attentions:
:param output_hidden_states:
:return:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = 0
if inputs_embeds is None:
inputs_embeds = embedded_dropout(self.embed_tokens, input_ids,
dropout=self.word_dropout if self.training else 0)
inputs_embeds = inputs_embeds * self.embed_scale
qlen = input_ids.size(1)
klen = qlen
# if attention_mask is None:
attention_mask = torch.triu(
inputs_embeds.new_ones(qlen, klen), diagonal=1).bool()
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
# encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
encoder_attention_mask = encoder_attention_mask
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
# hidden_states = hidden_states
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
# next_decoder_cache = () if use_cache else None
contrastive_loss = 0
hidden_states = hidden_states.transpose(0, 1).contiguous()
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
# encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
encoder_attention_mask = encoder_attention_mask
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# Stochastic Layer
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_outputs, _ = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
sub_encoder_hidden_states=sub_encoder_hidden_states,
sub_encoder_attention_mask=sub_encoder_attention_mask,
output_attentions=output_attentions,
lang=lang,
atb=atb
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add up the contrastive_loss per layer
if sub_encoder_hidden_states is not None:
contrastive_loss_ = layer_outputs[-1]
# print("Receive contrastive loss after layer", contrastive_loss_.size())
contrastive_loss = contrastive_loss + contrastive_loss_
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, contrastive_loss]
if v is not None
)
def step(self, input, decoder_state, **kwargs):
# context is stored in the decoder state in [T B H] format
encoder_hidden_states = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
atb = decoder_state.tgt_atb
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
input_ids = input
input_shape = input_ids.size()
time_step = input.size(1)
# print("[DEBUGGING] Current time step: %d" % time_step)
input_ = input
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1:]
past_key_values_length = input.size(1) - 1
else:
past_key_values_length = 0
inputs_embeds = self.embed_tokens(input_) * self.embed_scale
# inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
qlen = input_ids.size(1)
klen = qlen
attention_mask = torch.triu(
inputs_embeds.new_ones(qlen, klen), diagonal=1).bool()
if buffering:
attention_mask = attention_mask[-1:, :]
encoder_attention_mask = decoder_state.src_mask
if not self.layers[0].encoder_attn.fast_attention:
encoder_attention_mask = 1 - encoder_attention_mask
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_.size(-1))
else:
encoder_attention_mask = encoder_attention_mask.bool()
# embed positions
positions = self.embed_positions(input_.size(), past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = hidden_states.transpose(0, 1)
hidden_states = self.layernorm_embedding(hidden_states)
for idx, decoder_layer in enumerate(self.layers):
if buffering:
buffer = buffers[idx] if idx in buffers else None
else:
buffer = None
layer_outputs, buffer = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=None,
incremental=buffering, incremental_cache=buffer,
lang=lang, atb=atb
)
if buffering:
decoder_state.update_attention_buffer(buffer, idx)
hidden_states = layer_outputs[0]
hidden_states = self.layer_norm(hidden_states)
output = hidden_states[-1].unsqueeze(0)
# just a fake coverage
coverage = hidden_states.new(hidden_states.size(1), 1, encoder_hidden_states.size(0)).zero_()
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = encoder_hidden_states
return output_dict
| 45,662
| 39.09043
| 129
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_bert.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import torch
import torch.utils.checkpoint
from torch import nn
import torch.nn.functional as F
import numpy as np
from .activations import gelu, gelu_new, swish
from .configuration_bert import BertConfig
from .modeling_outputs import (
BaseModelOutput,
)
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices
import onmt.constants
from collections import defaultdict
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def mish(x):
return x * torch.tanh(nn.functional.softplus(x))
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish}
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("FusedLayerNorm is not available, we use torch.nn.LayerNorm")
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.max_relative_pos_len = config.max_relative_pos_len
self.pos_emb_type = config.pos_emb_type
self.diff_head_pos = config.diff_head_pos
if self.pos_emb_type == 'absolute':
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
else:
self.position_embeddings = None
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.max_position_id = config.max_position_embeddings
self.bert_word_dropout = config.bert_word_dropout
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.bert_emb_dropout)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, no_emb_offset=False):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
if seq_length > self.max_position_id:
position_ids = torch.clamp(position_ids, 0, self.max_position_id-1)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
embed = self.word_embeddings
if self.bert_word_dropout and self.training:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - self.bert_word_dropout).\
expand_as(embed.weight) / (1 - self.bert_word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
words_embeddings = F.embedding(
input_ids, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + token_type_embeddings
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def emb_step(self, tgt_len, input_ids, token_type_ids=None):
position_ids = torch.tensor(tgt_len-1, dtype=torch.long, device=input_ids.device)
if tgt_len > self.max_position_id:
position_ids = torch.tensor(self.max_position_id-1, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
embed = self.word_embeddings
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
words_embeddings = F.embedding(
input_ids, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + token_type_embeddings
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.heads_num = config.num_attention_heads
self.head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.heads_num * self.head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.bert_atten_dropout)
# for relative attention
self.max_relative_pos_len = config.max_relative_pos_len
self.pos_emb_type = config.pos_emb_type
self.diff_head_pos = config.diff_head_pos
if self.pos_emb_type == "absolute":
self.relative_pos_emb = None
elif self.pos_emb_type == "relative_k":
if self.diff_head_pos:
self.relative_pos_embeddingk = nn.Embedding(2 * self.max_relative_pos_len + 1, self.all_head_size)
else:
self.relative_pos_embeddingk = nn.Embedding(2 * self.max_relative_pos_len + 1, self.head_size)
elif self.pos_emb_type == "relative_kv":
# diff_head_pos brings no improvement
assert not self.diff_head_pos
if self.diff_head_pos:
self.relative_pos_embeddingk = nn.Embedding(2 * self.max_relative_pos_len + 1, self.all_head_size)
self.relative_pos_embeddingv = nn.Embedding(2 * self.max_relative_pos_len + 1, self.all_head_size)
else:
self.relative_pos_embeddingk = nn.Embedding(2 * self.max_relative_pos_len + 1, self.head_size)
self.relative_pos_embeddingv = nn.Embedding(2 * self.max_relative_pos_len + 1, self.head_size)
else:
print("The pos_emb_type is not supported")
exit(-1)
def transpose_for_scores(self, x):
# x: [h_dim, len, all_head_size]
# new_x_shape: [h_dim, len, h_num, h_dim]
# return: [h_dim, h_num, len, h_dim]
new_x_shape = x.size()[:-1] + (self.heads_num, self.head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
# hidden_states: [bsz, len, H]
mixed_query_layer = self.query(hidden_states) # [bsz, len, all_head_size]
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
# hidden_states [bsz, len_q, H]
# mixed_key_layer: [bsz, len_k, all_head_size] with all_head_size = H
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer) # [bsz, h_num, q, h_dim]
key_layer = self.transpose_for_scores(mixed_key_layer) # [bsz, h_num, k, h_dim]
value_layer = self.transpose_for_scores(mixed_value_layer) # [bsz, h_num, len_v, h_dim]
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores_qk = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # [bsz h_num q k]
attention_scores = attention_scores_qk / math.sqrt(self.head_size)
if self.pos_emb_type == "relative_k" or self.pos_emb_type == "relative_kv":
klen = mixed_key_layer.shape[1]
vlen = klen
qlen = mixed_query_layer.shape[1]
bsz = hidden_states.shape[0]
range_vec_q = torch.arange(qlen, device=hidden_states.device)
range_mat_q = range_vec_q.unsqueeze(-1).expand(-1, klen)
range_vec_k = torch.arange(klen, device=hidden_states.device)
distance_mat = range_mat_q - range_vec_k
distance_mat_clamp = distance_mat.clamp_(-self.max_relative_pos_len, self.max_relative_pos_len)
relative_position = distance_mat_clamp.add_(self.max_relative_pos_len) # [qlen ,klen,h_dim]
relative_pos_embk = self.relative_pos_embeddingk(relative_position) # [q ,k,h_dim/all_h_dim]
relative_pos_embk = relative_pos_embk.to(dtype=query_layer.dtype) # fp16 compatibility
# einsum"bhld,lrd->bhlr"
if not self.diff_head_pos:
# query_layer [bsz, h_num, qlen, h_dim] => [bsz*h_num, q, h_dim]
query_layer_rel = query_layer.reshape(bsz * self.heads_num, qlen, self.head_size)
query_layer_rel = query_layer_rel.transpose(0, 1) # [q, bsz*h_num, h_dim]
# [q, b*h_num, h_dim] [q ,h_dim, k] => [q, b*h_num, bsz, k]
attn_scores_rel_k = torch.bmm(query_layer_rel, relative_pos_embk.transpose(1, 2))
attn_scores_rel_k = attn_scores_rel_k.transpose(0, 1).reshape(bsz, self.heads_num, qlen, klen)
else:
query_layer_rel = query_layer.reshape(bsz, self.heads_num * qlen, self.head_size) # [b, h_num*q, h_dim]
query_layer_rel = query_layer_rel.transpose(0, 1) # [h_num*q, bsz, h_dim]
relative_pos_embk = relative_pos_embk.transpose(0, 1) # [k, q, all_h_dim]
# [k, q, h_num, h_dim] => [k, h_num, q, h_dim]
relative_pos_embk = relative_pos_embk.reshape(klen, qlen, self.heads_num, self.head_size).transpose(1, 2)
# [klen, h_num, qlen, h_dim] =>[k, h_num*q, h_dim] => [h_num*q, k, h_dim]
relative_pos_embk = relative_pos_embk.reshape(klen, self.heads_num * qlen, self.head_size).transpose(0, 1)
# [h_num*q, bsz, h_dim] [h_num*q, h_dim, k] => [h_num*q, bsz, k]
attn_scores_rel_k = torch.bmm(query_layer_rel, relative_pos_embk.transpose(1, 2))
attn_scores_rel_k = attn_scores_rel_k.transpose(0, 1).reshape(bsz, self.heads_num, qlen, klen)
attn_scores_rel_k = attn_scores_rel_k / math.sqrt(self.head_size)
attention_scores += attn_scores_rel_k # [b h_num q v]
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# [b, h_num, q,v] [b, h_num, v, h_dim] => [b, h_num, q ,dim]
context_layer = torch.matmul(attention_probs, value_layer)
if self.pos_emb_type == "relative_kv":
relative_pos_embv = self.relative_pos_embeddingv(relative_position) # [q , v, h_dim]
relative_pos_embv = relative_pos_embv.to(dtype=query_layer.dtype) # fp16 compatibility
# [b h_num q v] -> [q, b*h_num v]
attention_scores = attention_probs.reshape(bsz*self.heads_num, qlen, vlen).transpose(0, 1)
# [q, b*h_num v] [q , v, h_dim] -> [q, b*h_num h_dim] ->[b, h_num, q, h_dim]
context_rel_v = torch.matmul(attention_scores, relative_pos_embv).transpose(0, 1)
context_rel_v = context_rel_v.reshape(bsz, self.heads_num, qlen, self.head_size)
context_layer += context_rel_v
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
def selfattn_step(self,
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
buffer=None
):
# hidden_size -> all_head_size: 767 -> 768
proj_query = self.query(hidden_states) # [beam*bsz, 1(always), H]
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
# enc_dec_attention
if encoder_hidden_states is not None: # use src mask, otherwise use tgt mask
attention_mask = encoder_attention_mask
if buffer is not None and 'src_k' in buffer and 'src_v' in buffer: # no repeated computation needed
proj_key = buffer['src_k']
proj_value = buffer['src_v']
else:
if buffer is None:
buffer = dict()
proj_key = self.key(encoder_hidden_states)
proj_value = self.value(encoder_hidden_states)
buffer['src_k'] = proj_key
buffer['src_v'] = proj_value
# decoder self-attention
# hidden_states [bsz*beam, 1(always), H], bsz will decrease if finished e.g. bsz*beam:128,124...
else:
proj_key = self.key(hidden_states)
proj_value = self.value(hidden_states)
if buffer is not None and 'k' in buffer and 'v' in buffer:
proj_key = torch.cat([buffer['k'], proj_key], dim=1) # concat with previous time_step result
buffer['k'] = proj_key
proj_value = torch.cat([buffer['v'], proj_value], dim=1) # time second
buffer['v'] = proj_value
else:
if buffer is None:
buffer = dict()
buffer['k'] = proj_key
buffer['v'] = proj_value
query_layer = self.transpose_for_scores(proj_query) # [beam*bsz, h_num, 1, head_size]
key_layer = self.transpose_for_scores(proj_key)
value_layer = self.transpose_for_scores(proj_value)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores_qk = torch.matmul(query_layer, key_layer.transpose(-1, -2))
# dec self-attention: [beam*bsz, h_num, 1(always), step]) bsz -->1 step 1-->qlen(klen)
# dec cross-attention [beam*bsz, h_num, 1(always), src_len(klen)(always)]) bsz -->1
attention_scores = attention_scores_qk / math.sqrt(self.head_size)
# relative attention
if self.pos_emb_type == "relative_k" or self.pos_emb_type == "relative_kv":
qlen = buffer['k'].shape[1] # always k, not src_k, so the current inference position in decoder
klen = proj_key.shape[1] # change based on self-attn(constant) or cross-attn(increase) accordingly
vlen = klen
bbsz = attention_scores_qk.shape[0]
range_vec_q = torch.arange(qlen, device=hidden_states.device)[-1].unsqueeze(-1)
range_mat_q = range_vec_q.unsqueeze(-1).expand(-1, klen) # [1, klen]
range_vec_k = torch.arange(klen, device=hidden_states.device) # [1, klen]
distance_mat = range_mat_q - range_vec_k
distance_mat_clamp = distance_mat.clamp_(-self.max_relative_pos_len, self.max_relative_pos_len)
relative_position = distance_mat_clamp.add_(self.max_relative_pos_len) # [1, k], q is always 1
relative_pos_embk = self.relative_pos_embeddingk(relative_position) # [1, k, h_dim/all_h_dim]
relative_pos_embk = relative_pos_embk.to(dtype=query_layer.dtype) # fp16 compatibility
if not self.diff_head_pos:
# query_layer [bbsz(beam*bsz), h_num, 1, h_dim]
query_layer_rel = query_layer.reshape(bbsz * self.heads_num, 1, self.head_size)
query_layer_rel = query_layer_rel.transpose(0, 1) # [1, bsz*h_num, h_dim]
# [1, bsz*h_num, h_dim] [1, h_dim, k] => [1, bsz*h_num, k]
attention_scores_rel = torch.bmm(query_layer_rel, relative_pos_embk.transpose(1, 2))
# [1, bsz*h_num, k] => [bbsz, h_num, 1, k]
attention_scores_rel = attention_scores_rel.transpose(0, 1).reshape(bbsz, self.heads_num, 1, klen)
else:
relative_pos_embk = relative_pos_embk.transpose(0, 1) # [ k, 1, all_h_dim]
relative_pos_embk = relative_pos_embk.reshape(klen, 1, self.heads_num, self.head_size).transpose(1, 2) # [k, h_num, 1, h_dim]
relative_pos_embk = relative_pos_embk.reshape(klen, self.heads_num * 1, self.head_size).transpose(0, 1) # [h_num* 1, klen, h_dim]
# query_layer [bbsz(beam*bsz), h_num, 1, h_dim]
query_layer_rel = query_layer.reshape(bbsz, self.heads_num * 1, self.head_size) # [bbsz, num*1, h_dim]
query_layer_rel = query_layer_rel.transpose(0, 1) # [h_num*1, bbsz, h_dim]
attention_scores_rel = torch.bmm(query_layer_rel, relative_pos_embk.transpose(1, 2)) # [num, bbsz, klen]
# [bsz, h_num, qlen, klen]
attention_scores_rel = attention_scores_rel.transpose(0, 1).reshape(bbsz, self.heads_num, 1, klen)
attention_scores_rel = attention_scores_rel / math.sqrt(self.head_size)
attention_scores += attention_scores_rel
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
# score: [bbsz, h_num, 1, v] # [bbsz, h_num, v, head_size] -> [bbsz, num, 1, h_dim]
context_layer = torch.matmul(attention_probs, value_layer)
if self.pos_emb_type == "relative_kv":
relative_pos_embv = self.relative_pos_embeddingv(relative_position) # [1 , v, h_dim]
relative_pos_embv = relative_pos_embv.to(dtype=query_layer.dtype) # fp16 compatibility
# [bbsz h_num 1 v] -> [1, b*h_num v]
attention_scores = attention_probs.reshape(bbsz*self.heads_num, 1, vlen).transpose(0, 1)
# [1, bb*h_num v] [1 , v, h_dim] -> [1, bb*h_num h_dim] ->[bb*h_num, 1, h_dim]
context_rel_v = torch.matmul(attention_scores, relative_pos_embv).transpose(0, 1)
context_rel_v = context_rel_v.reshape(bbsz, self.heads_num, 1, self.head_size)
context_layer += context_rel_v
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs, buffer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.bert_hidden_dropout)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
def attn_step(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
buffer=None
):
self_outputs, buffer = self.self.selfattn_step(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
buffer
)
# output: BertSelfOutput dropout--> add--> LN
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs, buffer
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.bert_hidden_dropout)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
if self.is_decoder:
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
def bertlayer_step(
self,
hidden_states,
attention_mask,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
buffer=None
):
self_attention_outputs, buffer = self.attention.attn_step(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
buffer=buffer
)
attention_output = self_attention_outputs[0] # context_layer
outputs = self_attention_outputs[1:] # (attention_probs,)add self attentions if we output attention weights
cross_attention_outputs, buffer = self.crossattention.attn_step(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
buffer=buffer
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
# 1.dropout(intermediate_output) 2. add(attention_output) 3.LN
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs, buffer
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = BertConfig
base_model_prefix = "bert"
authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config,
bert_word_dropout=None,
bert_emb_dropout=None,
bert_atten_dropout=None,
bert_hidden_dropout=None,
bert_hidden_size=None,
is_decoder=False,
before_plm_output_ln=False,
gradient_checkpointing=False,
**kwargs
):
super().__init__(config)
self.config = config
if bert_word_dropout is not None:
self.config.bert_word_dropout = bert_word_dropout
if bert_emb_dropout is not None:
self.config.bert_emb_dropout = bert_emb_dropout
if bert_atten_dropout is not None:
self.config.bert_atten_dropout = bert_atten_dropout
if bert_hidden_dropout is not None:
self.config.bert_hidden_dropout = bert_hidden_dropout
if bert_hidden_size is not None:
self.config.bert_hidden_size = bert_hidden_size
self.config.max_relative_pos_len = kwargs.pop('max_pos_len', 0)
self.config.diff_head_pos = kwargs.pop('diff_head_pos', False)
self.config.pos_emb_type = kwargs.pop('pos_emb_type', "absolute")
self.config.is_decoder = is_decoder
self.config.before_plm_output_ln = before_plm_output_ln
self.config.gradient_checkpointing = gradient_checkpointing
self.embeddings = BertEmbeddings(self.config)
self.encoder = BertEncoder(self.config)
if self.config.before_plm_output_ln:
self.before_plm_output_ln = BertLayerNorm(self.config.hidden_size, eps=self.config.layer_norm_eps)
else:
self.before_plm_output_ln = None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
no_offset=False
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size() # [bsz, src_len]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # default [None] * n_head
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
no_emb_offset=no_offset,
) # [bsz, src_len, hidden_dim]
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.before_plm_output_ln is not None:
sequence_output = self.before_plm_output_ln(encoder_outputs[0])
else:
sequence_output = encoder_outputs[0]
return (sequence_output, ) + encoder_outputs[1:]
def step(self, input_ids, decoder_state, streaming=False):
device = input_ids.device
if input_ids.size(1) > 1:
input_ = input_ids[:, -1].unsqueeze(1)
else:
input_ = input_ids
tgt_token_type = input_.ne(onmt.constants.TGT_PAD).long() # [bsz, len]
data_type = next(self.parameters()).dtype
src_mask = decoder_state.src_mask.squeeze(1) # [bsz, all_src_len]
extended_src_mask = self.invert_attention_mask(src_mask)
mask_tgt = input_ids.ne(onmt.constants.TGT_PAD).byte()
input_shape = input_ids.size() # [bsz, sent_len]
cur_pos = input_shape[-1]
extended_tgt_mask = self.get_extended_attention_mask(mask_tgt, input_shape, device=device)
extended_tgt_mask = extended_tgt_mask[:, :, -1, :].unsqueeze(-2)
encoder_hidden_states = decoder_state.context.transpose(0, 1) # [b, l, de_model]
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
head_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, data_type)
if self.dec_pretrained_model == "bert" or self.dec_pretrained_model == "roberta":
embedding_output = self.embeddings.emb_step(cur_pos, input_, tgt_token_type)
else:
print("Warning: check dec_pretrained_model", self.dec_pretrained_model)
exit(-1)
hidden_states = embedding_output
output_attentions = False
buffers = decoder_state.attention_buffers
for i, layer in enumerate(self.encoder.layer):
buffer = buffers[i] if i in buffers else None
layer_outputs, buffer = layer.bertlayer_step(
hidden_states,
extended_tgt_mask,
head_mask[i],
encoder_hidden_states, # decoder_state.context
extended_src_mask, # decoder_state.src_mask
output_attentions,
buffer
)
hidden_states = layer_outputs[0]
decoder_state.update_attention_buffer(buffer, i)
output_dict = defaultdict(lambda: None)
output_dict["hidden"] = hidden_states
# output_dict["coverage"] = buffers[i]
return output_dict
def renew_buffer(self, new_len):
# not sure about this
# self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len+1, new_len+1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
| 41,160
| 44.231868
| 146
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/adapter.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from onmt.modules.layer_norm import LayerNorm
class Adapter(torch.nn.Module):
def __init__(self, input_dim, downsample_factor=2):
self.input_dim = input_dim
self.middle_dim = input_dim // downsample_factor
super(Adapter, self).__init__()
self.linear_in = nn.Linear(input_dim, self.middle_dim)
self.linear_out = nn.Linear(self.middle_dim, input_dim)
self.norm = LayerNorm(input_dim)
self.fused = False
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
self.reset_parameters()
def reset_parameters(self):
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
with torch.no_grad():
normal_(self.linear_in.weight.data)
normal_(self.linear_out.weight.data)
self.linear_in.bias.data.zero_()
self.linear_out.bias.data.zero_()
def forward(self, input):
if self.fused:
weights = [self.linear_in.weight, self.linear_out.weight]
biases = [self.linear_in.bias, self.linear_out.bias]
# seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
input_norm = self.norm(input)
input = self.fused_function(0.0, False, input_norm,
*weights, *biases)
return input
else:
return self.linear_out(F.relu(self.linear_in(self.norm(input))))
class MultilingualAdapter(torch.nn.Module):
def __init__(self, n_languages, input_size, downsample_factor=4):
self.n_languages = n_languages
self.input_size = input_size
super(MultilingualAdapter, self).__init__()
self.adapters = nn.ModuleList([Adapter(input_size, downsample_factor) for _ in range(self.n_languages)])
def forward(self, input, lang=None, mixture=None):
"""
:param input: tensor TxBxH
:param lang: tensor size 1 (language for the batch)
:param mixture: tensor size B x n_language (mixture for the minibatch)
:return:
"""
if lang is not None:
assert mixture is None
if lang.numel() != 1:
print("Expected singled unit tensor, but get", lang.size())
assert lang.numel() == 1
adapter = self.adapters[lang.item()]
return adapter(input)
if mixture is not None:
assert mixture.size(0) == input.size(1) and mixture.size(1) == self.n_languages
outputs = list()
for i in range(self.n_languages):
# mixture size is [B x n_language]
mixture_weight = mixture[:, i].unsqueeze(0).squeeze(-1)
outputs.append(self.adapters[i](input)) * mixture_weight
outputs = torch.stack(outputs).sum(0) # n_languages x T x B x H
outputs = torch.sum(outputs, 0, keepdim=False) # -> T x B x H
return outputs
| 3,348
| 33.525773
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/configuration_deltalm.py
|
# coding=utf-8
# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" MBART model configuration """
import warnings
from collections import OrderedDict
from typing import Mapping
from .configuration_utils import PretrainedConfig
BART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
MBART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/config.json",
# See all MBART models at https://huggingface.co/models?filter=mbart
}
class DeltaLMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.MBartModel`. It is used to
instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MBART `facebook/mbart-large-cc25
<https://huggingface.co/facebook/mbart-large-cc25>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Example::
>>> from transformers import MBartModel, MBartConfig
>>> # Initializing a MBART facebook/mbart-large-cc25 style configuration
>>> configuration = MBartConfig()
>>> # Initializing a model from the facebook/mbart-large-cc25 style configuration
>>> model = MBartModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "deltalm"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50265,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
forced_eos_token_id=2,
**kwargs
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
| 4,606
| 37.714286
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/roberta_tokenization_ch.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as vocab_f:
for line in vocab_f:
token = convert_to_unicode(line)
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
#print("items:",items) #['[CLS]', '日', '##期', ',', '但', '被', '##告', '金', '##东', '##福', '载', '##明', '[MASK]', 'U', '##N', '##K', ']', '保', '##证', '本', '##月', '1', '##4', '[MASK]', '到', '##位', ',', '2', '##0', '##1', '##5', '年', '6', '[MASK]', '1', '##1', '日', '[', 'U', '##N', '##K', ']', ',', '原', '##告', '[MASK]', '认', '##可', '于', '2', '##0', '##1', '##5', '[MASK]', '6', '月', '[MASK]', '[MASK]', '日', '##向', '被', '##告', '主', '##张', '权', '##利', '。', '而', '[MASK]', '[MASK]', '自', '[MASK]', '[MASK]', '[MASK]', '[MASK]', '年', '6', '月', '1', '##1', '日', '[SEP]', '原', '##告', '于', '2', '##0', '##1', '##6', '[MASK]', '6', '[MASK]', '2', '##4', '日', '起', '##诉', ',', '主', '##张', '保', '##证', '责', '##任', ',', '已', '超', '##过', '保', '##证', '期', '##限', '[MASK]', '保', '##证', '人', '依', '##法', '不', '##再', '承', '##担', '保', '##证', '[MASK]', '[MASK]', '[MASK]', '[SEP]']
for i,item in enumerate(items):
#print(i,"item:",item) # ##期
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 13,125
| 32.062972
| 861
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_mbart.py
|
# coding=utf-8
# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch MBART model. """
import copy
import math
import random
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import numpy as np
from torch.nn import CrossEntropyLoss, MSELoss
from onmt.modules.layer_norm import LayerNorm
from onmt.modules.optimized.self_attention_func import self_attn_func, self_attn_compact_func
from onmt.modules.optimized.encdec_attention_func_bias import encdec_attn_bias_func, encdec_attn_bias_compact_func
from onmt.modules.optimized.linear import factorize_linear
from onmt.modules.dropout import embedded_dropout
from onmt.modules.optimized.dropout_add import fused_dropout_add
from onmt.modules.optimized.linear import linear_function
from torch.cuda.amp import custom_fwd, custom_bwd
from onmt.models.speech_recognizer.fairseq_wav2vec2.fairseq_modules import index_copy
from .activations import ACT2FN
from .modeling_outputs import (
BaseModelOutput,
)
from .modeling_utils import PreTrainedModel
# from ...utils import logging
# from .configuration_bart import BartConfig
import onmt
from collections import defaultdict
from .configuration_mbart import MBartConfig
_CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
_CONFIG_FOR_DOC = "MBartConfig"
_TOKENIZER_FOR_DOC = "MBartTokenizer"
MBART_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/mbart-large-cc25",
# See all MBART models at https://huggingface.co/models?filter=mbart
]
class IndexCopy(torch.autograd.Function):
"""
This function is kinda similar to rnn pad_packed_sequence
It remaps nonpadded values for a (N-1)-d tensor into a (N)-d tensor
"""
@staticmethod
@custom_fwd
def forward(ctx, input, non_pad_indices, total_batch_size):
"""
:param ctx:
:param input: 2D [bsz x ... ] bsz is the total number of elements after unpadding
:param non_pad_indices: bsz * seq_len
:param total_batch_size: (int) bsz * seq_len (before unpadding) > bsz
:return:
In the forward pass we create a new zero tensor and copy the inputs into it based on non_pad_indices
"""
sizes = list(input.size())
sizes[0] = total_batch_size
output = input.new_zeros(*sizes)
output.index_copy_(0, non_pad_indices, input)
ctx.save_for_backward(non_pad_indices)
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grads):
"""
:param ctx:
:param output_grads:
:return:
In the backward pass we simply
"""
non_pad_indices, = ctx.saved_tensors
grad_input = output_grads.index_select(0, non_pad_indices)
return grad_input, None, None
index_copy = IndexCopy.apply
# Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->MBart
class MBartLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions + self.offset)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->MBart
class MBartAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.fast_attention = False
self.is_factorized = False
self.multiplicative_factorize = False
self.fast_factorize = False
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def add_factorized_weights(self, n_languages, rank=4, multiplicative=False, fast=False, dyrank=False, **kwargs):
"""
Add factorized weights for self-attention
:param n_languages:
:param rank:
:param multiplicative:
:param fast:
:param dyrank:
:return:
"""
embed_dim = self.embed_dim
self.is_factorized = True
self.multiplicative_factorize = multiplicative
self.fast_factorize = fast
self.dyrank = dyrank
if multiplicative:
_rank = rank if fast else 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, 3 * embed_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
constant = 1
nn.init.constant_(self.rm_i, constant)
nn.init.constant_(self.sm_i, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, 3 * embed_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if self.dyrank:
nn.init.zeros_(self.r_i)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.zeros_(self.r_o)
nn.init.normal_(self.s_o, 0.0, 0.02)
else:
std = 0.01 if fast else 0.02
nn.init.normal_(self.r_i, 0.0, std)
nn.init.normal_(self.s_i, 0.0, std)
nn.init.normal_(self.r_o, 0.0, std)
nn.init.normal_(self.s_o, 0.0, std)
def convert_fast_attention(self):
if self.fast_attention:
return
# print("[INFO] Convert MBartAttention from slow to fast")
self.fast_attention = True
w_q = self.q_proj.weight.clone()
w_k = self.k_proj.weight.clone()
w_v = self.v_proj.weight.clone()
weights = [w_q, w_k, w_v]
weight_ = torch.cat(weights, dim=0).contiguous()
b_q = self.q_proj.bias.clone()
b_k = self.k_proj.bias.clone()
b_v = self.v_proj.bias.clone()
biases = [b_q, b_k, b_v]
bias_ = torch.cat(biases, dim=0).contiguous()
head_dim = self.head_dim
heads = self.num_heads
input_dim = self.embed_dim
weight_ = weight_.reshape(3 * head_dim * heads, input_dim).view(3, heads, head_dim, input_dim).transpose(0, 1). \
reshape(-1, input_dim)
bias_ = bias_.reshape(3 * head_dim * heads).view(3, heads, head_dim).transpose(0, 1).reshape(-1)
weight_t = torch.Tensor(3 * input_dim, input_dim)
bias_t = torch.Tensor(3 * input_dim)
weight_t.copy_(weight_)
bias_t.copy_(bias_)
self.proj_weight = Parameter(weight_t)
self.proj_bias = Parameter(bias_t)
self.proj_weight.requires_grad = self.q_proj.weight.requires_grad
self.proj_bias.requires_grad = self.q_proj.bias.requires_grad
del self.q_proj, self.k_proj, self.v_proj
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cu_seqlens=None, max_len=None,
lang=None, atb=None,
incremental=False, incremental_cache=None,
checkpointing=False, stacked_kv=None
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
if not self.fast_attention:
raise NotImplementedError("Slow attention by HuggingFace is deprecated.")
else:
in_proj_weight = self.proj_weight
out_proj_weight = self.out_proj.weight
if self.is_factorized and self.fast_factorize:
n_languages, _rank = self.rm_o.size(0), self.rm_o.size(1)
# TODO: mm instead of index select
# rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0)
# sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
# rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
# sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if lang.ndim == 1:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
elif lang.ndim == 2: # for flash attention
rm_i = torch.mm(lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
lang.size(0), _rank,
self.rm_i.size(-1))
sm_i = torch.mm(lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
lang.size(0), _rank,
self.sm_i.size(-1))
rm_o = torch.mm(lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
lang.size(0), _rank,
self.rm_o.size(-1))
sm_o = torch.mm(lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
lang.size(0), _rank,
self.sm_o.size(-1))
elif lang.ndim == 3:
_len, _bsz = lang.size(0), lang.size(1)
_lang = lang.view(_len * _bsz, lang.size(-1))
rm_i = torch.mm(_lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
_len, _bsz, _rank, self.rm_i.size(-1))
sm_i = torch.mm(_lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
_len, _bsz, _rank, self.sm_i.size(-1))
rm_o = torch.mm(_lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
_len, _bsz, _rank, self.rm_o.size(-1))
sm_o = torch.mm(_lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
_len, _bsz, _rank, self.sm_o.size(-1))
if hidden_states.ndim == 3:
use_time_mask = self.is_decoder
bsz, qlen = hidden_states.size(1), hidden_states.size(0)
mask = attention_mask
low_precision = True # Use CUDA impl
input_lin_results = factorize_linear(hidden_states, in_proj_weight, self.proj_bias, rm_i, sm_i)
attn_output, coverage = self_attn_compact_func(use_time_mask, self.training, self.num_heads,
input_lin_results,
mask, self.dropout,
False, None,
incremental, incremental_cache, low_precision,
True, checkpointing)
attn_output = attn_output.view(qlen, bsz, -1).contiguous()
output = factorize_linear(attn_output, out_proj_weight, self.out_proj.bias, rm_o, sm_o)
return output, coverage, incremental_cache
else:
"""
flash attention
"""
assert self.fast_bert_mha is not None
assert cu_seqlens is not None
assert max_len is not None
total_bsz = hidden_states.size(0)
# qkv = linear_function(hidden_states, in_proj_weight, self.proj_bias) # B x H
qkv = factorize_linear(hidden_states, in_proj_weight, self.proj_bias, rm_i, sm_i)
# B x 3 x H x d
# TODO: moving to CUDA to remove overhead?
qkv = qkv.view(total_bsz, self.num_heads, 3, self.head_dim).transpose(1, 2).contiguous()
dropout_p = self.dropout if self.training else 0.0
causal = self.is_decoder
softmax_scale = 1.0 / math.sqrt(64)
context = self.fast_bert_mha(qkv, cu_seqlens, max_len, dropout_p, softmax_scale, causal, False)
coverage = None
context = context.view(-1, self.num_heads * self.head_dim).contiguous()
output = factorize_linear(context, out_proj_weight, self.out_proj.bias, rm_o, sm_o)
return output, coverage, incremental_cache
# Code is twice as long TODO: merging two sections
if self.is_factorized:
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if not self.dyrank:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
else:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
# Has to be multiplicative here
in_proj_weight = in_proj_weight * mul_factor_in
out_proj_weight = out_proj_weight * mul_factor_out
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.dyrank:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
in_proj_weight = in_proj_weight + add_factor_in
out_proj_weight = out_proj_weight + add_factor_out
if hidden_states.ndim == 3:
use_time_mask = self.is_decoder
qlen, klen = hidden_states.size(0), hidden_states.size(0)
mask = attention_mask
low_precision = True # Use CUDA impl
# print("USING FAST ATTENTION - DECODER=" + str(self.is_decoder))
attn_output, coverage = self_attn_func(use_time_mask, self.training, self.num_heads, hidden_states,
in_proj_weight, out_proj_weight,
self.proj_bias, self.out_proj.bias,
mask, self.dropout,
False, None,
incremental, incremental_cache, low_precision,
True, checkpointing)
attn_output = attn_output
else:
"""
flash attention
"""
assert self.fast_bert_mha is not None
assert cu_seqlens is not None
assert max_len is not None
# assert self.is_decoder is False # only encoder
# sm = torch.cuda.get_device_capability()
# Only Ampere supported at the moment-
total_bsz = hidden_states.size(0)
qkv = linear_function(hidden_states, in_proj_weight, self.proj_bias) # B x H
# B x 3 x H x d
# TODO: moving to CUDA to remove overhead?
# qkv = qkv.view(total_bsz, self.num_heads, 3, self.head_dim).transpose(1, 2).contiguous()
# context, coverage = self.fast_bert_mha(qkv, cu_seqlens, self.dropout, max_len, self.training)
qkv = qkv.view(total_bsz, self.num_heads, 3, self.head_dim).transpose(1, 2).contiguous()
dropout_p = self.dropout if self.training else 0.0
causal = self.is_decoder
softmax_scale = 1.0 / math.sqrt(64)
context = self.fast_bert_mha(qkv, cu_seqlens, max_len, dropout_p, softmax_scale, causal, False)
coverage = None
context = context.view(-1, self.num_heads * self.head_dim).contiguous()
outputs = linear_function(context, out_proj_weight, self.out_proj.bias)
attn_output = outputs
return attn_output, coverage, incremental_cache
class MBartCrossAttention(MBartAttention):
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
convert_fast_attention = False,
**kwargs
):
super().__init__(embed_dim, num_heads, dropout, is_decoder, bias)
from onmt.modules.optimized.flash_mha import flash_encdec_mha
self.fast_bert_mha = flash_encdec_mha
if convert_fast_attention:
self.convert_fast_attention()
def convert_fast_attention(self):
if self.fast_attention:
return
self.fast_attention = True
# Merge weight KV into one
w_k = self.k_proj.weight.clone()
w_v = self.v_proj.weight.clone()
weights = [w_k, w_v]
weight_ = torch.cat(weights, dim=0).contiguous()
b_k = self.k_proj.bias.clone()
b_v = self.v_proj.bias.clone()
biases = [b_k, b_v]
bias_ = torch.cat(biases, dim=0).contiguous()
head_dim = self.head_dim
heads = self.num_heads
input_dim = self.embed_dim
weight_ = weight_.reshape(2 * head_dim * heads, input_dim).view(2, heads, head_dim, input_dim).transpose(0, 1). \
reshape(-1, input_dim)
bias_ = bias_.reshape(2 * head_dim * heads).view(2, heads, head_dim).transpose(0, 1).reshape(-1)
weight_t = torch.Tensor(2 * input_dim, input_dim)
bias_t = torch.Tensor(2 * input_dim)
weight_t.copy_(weight_)
bias_t.copy_(bias_)
self.proj_weight_kv = Parameter(weight_t)
self.proj_bias_kv = Parameter(bias_t)
self.proj_weight_kv.requires_grad = self.k_proj.weight.requires_grad
self.proj_bias_kv.requires_grad = self.k_proj.bias.requires_grad
del self.k_proj
del self.v_proj
def add_factorized_weights(self, n_languages, rank=4,
multiplicative=False, fast=False, dyrank=False, **kwargs):
embed_dim = self.embed_dim
self.is_factorized = True
self.multiplicative_factorize = multiplicative
self.fast_factorize = fast
self.dyrank = dyrank
# if not fast: the weights are calculated first
# W = W_S * (rm \dot sm) + (r \dot s)
# if fast: maybe using only W_S
# WX + b = W(rm \dot sm)X + b
# = W(X \dot sm)rm + b
if multiplicative:
_rank = rank if fast else 1
self.rm_q = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.sm_q = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.rm_kv = torch.nn.Parameter(torch.Tensor(n_languages, _rank, 2 * embed_dim))
self.sm_kv = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, embed_dim))
constant = 1
nn.init.constant_(self.rm_q, constant)
nn.init.constant_(self.sm_q, constant)
nn.init.constant_(self.rm_kv, constant)
nn.init.constant_(self.sm_kv, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
if not fast:
self.r_q = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_q = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_kv = torch.nn.Parameter(torch.Tensor(n_languages, rank, 2 * embed_dim))
self.s_kv = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if self.dyrank:
nn.init.zeros_(self.r_q)
nn.init.normal_(self.s_q, 0.0, 0.02)
nn.init.zeros_(self.r_kv)
nn.init.normal_(self.s_kv, 0.0, 0.02)
nn.init.zeros_(self.r_o)
nn.init.normal_(self.s_o, 0.0, 0.02)
else:
std = 0.01 if fast else 0.02
nn.init.normal_(self.r_q, 0.0, std)
nn.init.normal_(self.s_q, 0.0, std)
nn.init.normal_(self.r_kv, 0.0, std)
nn.init.normal_(self.s_kv, 0.0, std)
nn.init.normal_(self.r_o, 0.0, std)
nn.init.normal_(self.s_o, 0.0, std)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
lang=None, checkpointing=False,
src_lang=None,
incremental=False, incremental_cache=None,
cu_seqlens=None, max_len=None,
cu_seqlens_kv=None, max_len_kv=None, stacked_kv=None, **kwargs
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
assert key_value_states is not None
if not self.fast_attention:
raise NotImplementedError("Slow Attention by HuggingFace not supported anymore")
else:
in_proj_weight_q = self.q_proj.weight
in_proj_weight_kv = self.proj_weight_kv
out_proj_weight = self.out_proj.weight
if self.is_factorized and self.fast_factorize:
# TODO: mm instead of index select
n_languages, _rank = self.rm_o.size(0), self.rm_o.size(1)
# if lang has only 1 element we can do this
if lang.ndim == 1:
rm_q = torch.index_select(self.rm_q, 0, lang).squeeze(0) # squeeze possible because only 1
sm_q = torch.index_select(self.sm_q, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
elif lang.ndim == 2: # for flash attention
rm_q = torch.mm(lang, self.rm_q.view(n_languages, _rank * self.rm_q.size(-1))).view(
lang.size(0), _rank,
self.rm_q.size(-1))
sm_q = torch.mm(lang, self.sm_q.view(n_languages, _rank * self.sm_q.size(-1))).view(
lang.size(0), _rank,
self.sm_q.size(-1))
rm_o = torch.mm(lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
lang.size(0), _rank,
self.rm_o.size(-1))
sm_o = torch.mm(lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
lang.size(0), _rank,
self.sm_o.size(-1))
elif lang.ndim == 3:
_len, _bsz = lang.size(0), lang.size(1)
_lang = lang.view(_len * _bsz, lang.size(-1))
rm_q = torch.mm(_lang, self.rm_q.view(n_languages, _rank * self.rm_q.size(-1))).view(
_len, _bsz, _rank, self.rm_q.size(-1))
sm_q = torch.mm(_lang, self.sm_q.view(n_languages, _rank * self.sm_q.size(-1))).view(
_len, _bsz, _rank, self.sm_q.size(-1))
rm_o = torch.mm(_lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
_len, _bsz, _rank, self.rm_o.size(-1))
sm_o = torch.mm(_lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
_len, _bsz, _rank, self.sm_o.size(-1))
else:
raise NotImplementedError("Unknown dimension for language IDs")
if src_lang.ndim == 1:
rm_kv = torch.index_select(self.rm_kv, 0, src_lang).squeeze(0) # squeeze possible because only 1
sm_kv = torch.index_select(self.sm_kv, 0, src_lang).squeeze(0)
elif src_lang.ndim == 2:
rm_kv = torch.mm(src_lang, self.rm_kv.view(n_languages, _rank * self.rm_kv.size(-1))).view(
src_lang.size(0), _rank,
self.rm_kv.size(-1))
sm_kv = torch.mm(src_lang, self.sm_kv.view(n_languages, _rank * self.sm_kv.size(-1))).view(
src_lang.size(0), _rank,
self.sm_kv.size(-1))
elif src_lang.ndim == 3:
_len_src = src_lang.size(0)
_src_lang = src_lang.view(_len_src * _bsz, src_lang.size(-1))
rm_kv = torch.mm(_src_lang, self.rm_kv.view(n_languages, _rank * self.rm_kv.size(-1))).view(
_len_src, _bsz, _rank, self.rm_kv.size(-1))
sm_kv = torch.mm(_src_lang, self.sm_kv.view(n_languages, _rank * self.sm_kv.size(-1))).view(
_len_src, _bsz, _rank, self.sm_kv.size(-1))
# if lang has size [T x B x L] we need to do a GEMM
if hidden_states.ndim == 3:
use_time_mask = self.is_decoder
bsz, qlen = hidden_states.size(1), hidden_states.size(0)
mask = attention_mask
low_precision = True # Use CUDA impl
input_lin_q_results = factorize_linear(hidden_states, in_proj_weight_q, self.q_proj.bias, rm_q, sm_q)
input_lin_kv_results = factorize_linear(key_value_states, in_proj_weight_kv, self.proj_bias_kv, rm_kv, sm_kv)
recompute = False
attn_output, coverage = encdec_attn_bias_compact_func(recompute, self.training, self.num_heads,
input_lin_q_results , input_lin_kv_results ,
attention_mask, self.dropout,
incremental, incremental_cache,
False, None, None, # no rotary encodings
low_precision, True)
attn_output = attn_output.view(qlen, bsz, -1).contiguous()
output = factorize_linear(attn_output, out_proj_weight, self.out_proj.bias, rm_o, sm_o)
return output, coverage, incremental_cache
else:
"""
flash attention
"""
assert self.fast_bert_mha is not None
assert cu_seqlens is not None
assert cu_seqlens_kv is not None
assert max_len is not None
assert max_len_kv is not None
assert incremental == False
assert incremental_cache is None
total_bsz_q = hidden_states.size(0)
total_bsz_kv = key_value_states.size(0)
q = factorize_linear(hidden_states, in_proj_weight_q, self.q_proj.bias, rm_q, sm_q)
# linear_function(hidden_states, in_proj_weight_q, self.q_proj.bias)
# print(key_value_states.size(), rm_kv.size(), sm_kv.size())
kv = factorize_linear(key_value_states, in_proj_weight_kv, self.proj_bias_kv, rm_kv, sm_kv) #
# linear_function(key_value_states, in_proj_weight_kv, self.proj_bias_kv)
kv = kv.view(total_bsz_kv, self.num_heads, 2, self.head_dim).transpose(1, 2).contiguous()
q = q.view(total_bsz_q, self.num_heads, self.head_dim)
dropout_p = self.dropout if self.training else 0.0
causal = False
softmax_scale = 1.0 / math.sqrt(64)
context = self.fast_bert_mha(q, kv, cu_seqlens, cu_seqlens_kv,
max_len, max_len_kv, dropout_p, softmax_scale, causal, False)
context = context.view(-1, self.num_heads * self.head_dim).contiguous()
# output = linear_function(context, out_proj_weight, self.out_proj.bias)
output = factorize_linear(context, out_proj_weight, self.out_proj.bias, rm_o, sm_o)
coverage = None
return output, coverage, incremental_cache
if self.is_factorized:
if self.multiplicative_factorize:
rm_q = torch.index_select(self.rm_q, 0, lang).squeeze(0) # squeeze possible because only 1
sm_q = torch.index_select(self.sm_q, 0, lang).squeeze(0)
rm_kv = torch.index_select(self.rm_kv, 0, lang).squeeze(0) # squeeze possible because only 1
sm_kv = torch.index_select(self.sm_kv, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.dyrank:
mul_factor_q = torch.mm(rm_q.t(), sm_q)
mul_factor_kv = torch.mm(rm_kv.t(), sm_kv)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_q = torch.bmm(rm_q.unsqueeze(-1), sm_q.unsqueeze(1)).sum(dim=0)
mul_factor_kv = torch.bmm(rm_kv.unsqueeze(-1), sm_kv.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
in_proj_weight_q = in_proj_weight_q * mul_factor_q
in_proj_weight_kv = in_proj_weight_kv * mul_factor_kv
out_proj_weight = out_proj_weight * mul_factor_out
r_q = torch.index_select(self.r_q, 0, lang).squeeze(0)
s_q = torch.index_select(self.s_q, 0, lang).squeeze(0)
r_kv = torch.index_select(self.r_kv, 0, lang).squeeze(0)
s_kv = torch.index_select(self.s_kv, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.dyrank:
add_factor_q = torch.mm(r_q.t(), s_q)
add_factor_kv = torch.mm(r_kv.t(), s_kv)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_q = torch.bmm(r_q.unsqueeze(-1), s_q.unsqueeze(1)).sum(dim=0)
add_factor_kv = torch.bmm(r_kv.unsqueeze(-1), s_kv.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
# Has to be additive here
in_proj_weight_q = in_proj_weight_q + add_factor_q
in_proj_weight_kv = in_proj_weight_kv + add_factor_kv
out_proj_weight = out_proj_weight + add_factor_out
if hidden_states.ndim == 3 and key_value_states.ndim == 3:
recompute = checkpointing
key_value_states = key_value_states
# TODO: Add factorize
# attention_mask should have size Bxlen_k
low_precision = True
attn_output, coverage = encdec_attn_bias_func(recompute, self.training, self.num_heads,
hidden_states, key_value_states,
in_proj_weight_q, in_proj_weight_kv, out_proj_weight,
self.q_proj.bias, self.proj_bias_kv, self.out_proj.bias,
attention_mask, self.dropout,
incremental, incremental_cache,
False, None, None, # no rotary encodings
low_precision, True)
elif hidden_states.ndim == 2 and key_value_states.ndim == 2:
assert self.fast_bert_mha is not None
assert cu_seqlens is not None
assert cu_seqlens_kv is not None
assert max_len is not None
assert max_len_kv is not None
assert incremental == False
assert incremental_cache is None
total_bsz_q = hidden_states.size(0)
total_bsz_kv = key_value_states.size(0)
q = linear_function(hidden_states, in_proj_weight_q, self.q_proj.bias)
kv = linear_function(key_value_states, in_proj_weight_kv, self.proj_bias_kv)
kv = kv.view(total_bsz_kv, self.num_heads, 2, self.head_dim).transpose(1, 2).contiguous()
q = q.view(total_bsz_q, self.num_heads, self.head_dim)
dropout_p = self.dropout if self.training else 0.0
causal = False
softmax_scale = 1.0 / math.sqrt(64)
context = self.fast_bert_mha(q, kv, cu_seqlens, cu_seqlens_kv,
max_len, max_len_kv, dropout_p, softmax_scale, causal, False)
context = context.view(-1, self.num_heads * self.head_dim).contiguous()
attn_output = linear_function(context, out_proj_weight, self.out_proj.bias)
coverage = None
return attn_output, coverage, incremental_cache
class MBartCrossAttentionSlow(MBartAttention):
def convert_fast_attention(self):
pass
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
lang=None, atb=None,
incremental=False, incremental_cache=None, **kwargs
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
# is_cross_attention = key_value_states is not None
assert key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
# these are stored
key_states = incremental_cache['c_k']
value_states = incremental_cache['c_v']
else:
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
if incremental:
incremental_cache['c_k'] = key_states
incremental_cache['c_v'] = value_states
# reshape into B x H x T x D ?
key_states = self._shape(key_states, -1, bsz)
value_states = self._shape(value_states, -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, incremental_cache
class MBartAutoRegressiveSelfAttentionSLow(MBartAttention):
def convert_fast_attention(self):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
incremental=False, incremental_cache=None, **kwargs
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
# is_cross_attention = key_value_states is not None
assert key_value_states is None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
if incremental:
if 'k' in incremental_cache and 'v' in incremental_cache:
key_states = torch.cat([incremental_cache['k'], key_states], dim=1) # time first
value_states = torch.cat([incremental_cache['v'], value_states], dim=1) # time first
incremental_cache['k'] = key_states
incremental_cache['v'] = value_states
else:
incremental_cache['k'] = key_states
incremental_cache['v'] = value_states
# reshape into B x H x T x D ?
key_states = self._shape(key_states, -1, bsz)
value_states = self._shape(value_states, -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, incremental_cache
class MBartEncoderLayer(nn.Module):
def __init__(self, config: MBartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MBartAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = config.activation_dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.activation_fn_name = config.activation_function
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
from onmt.modules.optimized.fast_mha import fast_bert_mha
self.fast_bert_mha = fast_bert_mha
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: bool = False,
max_len=-1, cu_seqlens=None,
checkpointing_ffn=False
):
"""
:param checkpointing_ffn:
:param output_attentions: Whether or not to return the attentions tensors of all attention layers.
:param attention_mask: `(batch, src_len)`
:param hidden_states: `(seq_len, batch, embed_dim)`
:param cu_seqlens:
:param max_len:
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
cu_seqlens=cu_seqlens,
max_len=max_len
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.fused and hidden_states.is_cuda:
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
dropout = self.activation_dropout if self.training else 0.0
hidden_states = self.fused_function(dropout, checkpointing_ffn, hidden_states, *weights, *biases)
else:
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = hidden_states + residual
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MBartDecoderLayer(nn.Module):
def __init__(self, config: MBartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MBartAttention( # MBartAutoRegressiveSelfAttentionSLow(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = MBartCrossAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout
)
self.activation_fn_name = config.activation_function
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.is_factorized = False
self.multiplicative_factorize = False
self.fast_factorize = False
self.ffn_dim = config.decoder_ffn_dim
self.n_languages = -1
self.has_adapter = False
self.adapter_location = -1
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
@property
def word_lut(self):
return self.embed_tokens
def freeze_self_attn_params(self):
self.self_attn.q_proj.weight.requires_grad = False
self.self_attn.k_proj.weight.requires_grad = False
self.self_attn.v_proj.weight.requires_grad = False
self.self_attn.out_proj.weight.requires_grad = False
self.self_attn.q_proj.bias.requires_grad = False
self.self_attn.k_proj.bias.requires_grad = False
self.self_attn.v_proj.bias.requires_grad = False
self.self_attn.out_proj.bias.requires_grad = False
def freeze_ffn_params(self):
self.fc1.weight.requires_grad = False
self.fc2.weight.requires_grad = False
self.fc1.bias.requires_grad = False
self.fc2.bias.requires_grad = False
def add_factorize(self, n_languages, rank=4, multiplicative=False, fast=False, dyrank=False, **kwargs):
# add factorized weights for self-attention
self.self_attn.add_factorized_weights(n_languages, rank=rank, multiplicative=multiplicative,
fast=fast, dyrank=dyrank)
self.encoder_attn.add_factorized_weights(n_languages, rank=rank, multiplicative=multiplicative,
fast=fast, dyrank=dyrank)
# add factorized_weights for ffn
self.is_factorized = True
self.multiplicative_factorize = multiplicative
self.fast_factorize = fast
self.dyrank = dyrank
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
if self.dyrank:
nn.init.zeros_(self.r_i)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.zeros_(self.r_o)
nn.init.normal_(self.s_o, 0.0, 0.02)
else:
nn.init.normal_(self.r_i, 0.0, 0.02)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.normal_(self.r_o, 0.0, 0.02)
nn.init.normal_(self.s_o, 0.0, 0.02)
if multiplicative:
_rank = rank if fast else 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.ffn_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, _rank, self.ffn_dim))
constant = 1
nn.init.constant_(self.rm_i, constant)
nn.init.constant_(self.sm_i, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
def add_adapters(self, n_languages, downsampling_factor=4, adapter_location=1):
"""
:param n_languages: one adapter per language
:param downsampling_factor: downsampling rate size for the hidden layer
:param adapter_location:
:return:
"""
self.n_languages = n_languages
self.has_adapter = True
self.adapter_location = adapter_location
from .adapter import MultilingualAdapter
self.adapter = MultilingualAdapter(n_languages, self.embed_dim, downsample_factor=downsampling_factor)
def get_mlp_weights(self, lang=None, atb=None):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
if lang is not None:
if self.is_factorized:
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.fast_factorize:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight * mul_factor_in
out_weight = out_weight * mul_factor_out
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.fast_factorize or self.dyrank:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight + add_factor_in
out_weight = out_weight + add_factor_out
return in_weight, out_weight, in_bias, out_bias
def call_mlp(self, x, in_weight, out_weight, in_bias, out_bias, activation_fn, dropout_p, training_,
fused, fused_function, checkpointing):
"""
Move the MLP section to a different function to choose between pytorch and custom mlp
:param x:
:param in_weight:
:param out_weight:
:param in_bias:
:param out_bias:
:param activation_fn:
:param dropout_p:
:param training_:
:param fused:
:param fused_function:
:return:
"""
# TODO: check type x torch.half or torch.float32
if fused and x.is_cuda:
dropout_p_ = dropout_p if training_ else 0.0
weights = [in_weight, out_weight]
biases = [in_bias, out_bias]
x = fused_function(dropout_p_, checkpointing, x, *weights, *biases)
else:
x = F.linear(x, in_weight, in_bias)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = F.linear(x, out_weight, out_bias)
return x
def call_factorize_mlp(self, x, lang, activation_fn, dropout_p, training_):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
n_languages, _rank = self.rm_o.size(0), self.rm_o.size(1)
# TODO: mm instead of index select for multiple code
# rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
# sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
# rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
# sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if lang.ndim == 1:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
elif lang.ndim == 2: # for flash attention
rm_i = torch.mm(lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(lang.size(0), _rank,
self.rm_i.size(-1))
sm_i = torch.mm(lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(lang.size(0), _rank,
self.sm_i.size(-1))
rm_o = torch.mm(lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(lang.size(0), _rank,
self.rm_o.size(-1))
sm_o = torch.mm(lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(lang.size(0), _rank,
self.sm_o.size(-1))
elif lang.ndim == 3:
_len, _bsz = lang.size(0), lang.size(1)
_lang = lang.view(_len * _bsz, lang.size(-1))
rm_i = torch.mm(_lang, self.rm_i.view(n_languages, _rank * self.rm_i.size(-1))).view(
_len, _bsz, _rank, self.rm_i.size(-1))
sm_i = torch.mm(_lang, self.sm_i.view(n_languages, _rank * self.sm_i.size(-1))).view(
_len, _bsz, _rank, self.sm_i.size(-1))
rm_o = torch.mm(_lang, self.rm_o.view(n_languages, _rank * self.rm_o.size(-1))).view(
_len, _bsz, _rank, self.rm_o.size(-1))
sm_o = torch.mm(_lang, self.sm_o.view(n_languages, _rank * self.sm_o.size(-1))).view(
_len, _bsz, _rank, self.sm_o.size(-1))
x = factorize_linear(x, in_weight, in_bias, rm_i, sm_i)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = factorize_linear(x, out_weight, out_bias, rm_o, sm_o)
return x
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
sub_encoder_hidden_states: Optional[torch.Tensor] = None,
sub_encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
incremental: Optional[bool] = False,
incremental_cache=None,
checkpointing_ffn=False,
checkpointing_cross_attn=False,
checkpointing_self_attn=False,
lang=None, src_lang=None,
max_len=None, cu_seqlens=None,
max_len_kv=None, cu_seqlens_kv=None, **kwargs
):
"""
:param checkpointing_cross_attn:
:param checkpointing_ffn: Recompute the middle-layer of FFN to save memory
:param hidden_states:
:param attention_mask:
:param encoder_hidden_states:
:param encoder_attention_mask:
:param sub_encoder_hidden_states:
:param sub_encoder_attention_mask:
:param output_attentions:
:param incremental:
:param incremental_cache:
:param lang:
:param atb:
:param kwargs:
:return:
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
# self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
incremental=incremental, incremental_cache=incremental_cache,
lang=lang, checkpointing=checkpointing_self_attn,
cu_seqlens=cu_seqlens, max_len=max_len
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = hidden_states + residual
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
attention_input = hidden_states
hidden_states, cross_attn_weights, incremental_cache = self.encoder_attn(
hidden_states=attention_input,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
incremental=incremental, incremental_cache=incremental_cache,
checkpointing=checkpointing_cross_attn,
lang=lang, src_lang=src_lang,
cu_seqlens=cu_seqlens, max_len=max_len,
cu_seqlens_kv=cu_seqlens_kv, max_len_kv=max_len_kv
)
contrastive_loss = None
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = hidden_states + residual
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.fast_factorize:
hidden_states = self.call_factorize_mlp(hidden_states, lang, self.activation_fn, self.activation_dropout,
self.training)
else:
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang)
hidden_states = self.call_mlp(hidden_states, in_weight, out_weight, in_bias, out_bias,
self.activation_fn, self.activation_dropout, self.training,
self.fused, self.fused_function, checkpointing_ffn)
# hidden_states = fused_dropout_add(hidden_states, residual, self.dropout, self.training)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = hidden_states + residual
if self.has_adapter:
residual = hidden_states
if self.adapter_location == 1:
assert lang is not None
hidden_states = self.adapter(hidden_states, lang=lang)
hidden_states = hidden_states + residual
#
# if hidden_states.dtype == torch.float16 and (
# torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
# ):
# clamp_value = torch.finfo(hidden_states.dtype).max - 1000
# hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if contrastive_loss is not None:
outputs += (contrastive_loss,)
return outputs, incremental_cache
class MBartPreTrainedModel(PreTrainedModel):
config_class = MBartConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (MBartDecoder, MBartDecoder)):
module.gradient_checkpointing = value
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
MBART_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.MBartConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
MBART_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import MBartTokenizer, MBartForConditionalGeneration, MBartConfig
>>> model = MBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25')
>>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25')
>>> ARTICLE_TO_SUMMARIZE = "Meine Freunde sind cool, aber sie essen zu viel Kuchen."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
Mask filling example::
>>> from transformers import MBartTokenizer, MBartForConditionalGeneration
>>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25')
>>> # de_DE is the language symbol id <LID> for German
>>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE"
>>> model = MBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25')
>>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
"""
MBART_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.MBartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.MBartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
MBart uses a specific language id token as the starting token for :obj:`decoder_input_ids` generation that
varies according to source and target language, *e.g.* 25004 for `en_XX`, and 25003 for `de_DE`. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
For translation and summarization training, :obj:`decoder_input_ids` should be provided. If no
:obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to
the right for denoising pre-training following the paper.
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class MBartEncoder(MBartPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`MBartEncoderLayer`.
Args:
config: MBartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: MBartConfig, opt, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
config.dropout = opt.residual_dropout if opt.residual_dropout > 0 else opt.dropout
config.attention_dropout = opt.attn_dropout
config.activation_dropout = opt.ffn_dropout if opt.ffn_dropout > 0 else opt.dropout
config.layerdrop = opt.death_rate
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.opt = opt
self.word_dropout = opt.word_dropout
embed_dim = config.d_model
self.embed_dim = embed_dim
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([MBartEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = LayerNorm(embed_dim)
self.layer_norm = LayerNorm(config.d_model)
self.init_weights()
self.gradient_checkpointing = False
from onmt.modules.optimized.fast_mha import fast_bert_mha
self.fast_bert_mha = fast_bert_mha
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
checkpointing_ffn=False
):
"""
:param input_ids: [T x B] discrete input tokens
:param attention_mask: [B x T] attention mask (padded = 1, non-pad = 0]
:param inputs_embeds: [T x B x H] optional
:param output_attentions:
:param output_hidden_states:
:param return_dict:
:return:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# # retrieve input_ids and inputs_embeds
# if input_ids is not None and inputs_embeds is not None:
# raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
# elif input_ids is not None:
# pass
# elif inputs_embeds is not None:
# pass
# else:
# raise ValueError("You have to specify either input_ids or inputs_embeds")
#
# if inputs_embeds is None:
# inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
bsz, seq_len = input_ids.size(0), input_ids.size(1)
input_shape = torch.Size([bsz, seq_len])
elif inputs_embeds is None:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = embedded_dropout(self.embed_tokens, input_ids,
dropout=self.word_dropout if self.training else 0)
inputs_embeds = inputs_embeds * self.embed_scale
inputs_embeds = inputs_embeds.view(bsz, seq_len, -1)
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
else:
# use the input embeds from another stack
# maybe don't use layernorm_embedding
hidden_states = inputs_embeds
hidden_states = self.layernorm_embedding(hidden_states)
# should we use layernorm embedding here?
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# TODO: use fast bert mha
can_run_fast_bert_mha = False
# check if fast bert mha can be run
seq_len = hidden_states.size(1)
bsz = hidden_states.size(0)
sm = torch.cuda.get_device_capability()
total_bsz = 0
if self.fast_bert_mha and torch.is_autocast_enabled():
can_run_fast_bert_mha = True
x = hidden_states
padding_mask = attention_mask # [B x T]
# masked positions = 1 so to compute length we need the (1 -)
if padding_mask is None:
padding_mask = x.new_zeros(bsz, seq_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
x = x.view(-1, x.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
hidden_states = x.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
else:
max_len = -1
cu_seqlens = None
non_pad_indices = None
if not can_run_fast_bert_mha:
# transpose from [B x T x H] to [T x B x H]
hidden_states = hidden_states.transpose(0, 1).contiguous()
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
max_len=max_len, cu_seqlens=cu_seqlens,
checkpointing_ffn=checkpointing_ffn
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
# if we remove padding before (for fast bert MHA) then remember to put padding back
# to restore the form B x T X H
if can_run_fast_bert_mha:
# remove the patch
# if x.size(0) > total_bsz:
# x = x[:total_bsz, :]
hidden_states = index_copy(hidden_states, non_pad_indices, bsz * seq_len)
hidden_states = hidden_states.view(bsz, seq_len, -1)
hidden_states = hidden_states.transpose(0, 1).contiguous()
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
class MBartDecoder(MBartPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`MBartDecoderLayer`
\
Args:
config: MBartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: MBartConfig, opt, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.predict_language = opt.predict_language
config.dropout = opt.residual_dropout if opt.residual_dropout > 0 else opt.dropout
config.activation_dropout = opt.ffn_dropout if opt.ffn_dropout > 0 else opt.dropout
config.attention_dropout = opt.attn_dropout
self.dropout = config.dropout
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([MBartDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = LayerNorm(config.d_model)
self.layer_norm = LayerNorm(config.d_model)
self.init_weights()
self.gradient_checkpointing = False
self.model_size = config.d_model
self.switchout = 0.0
# self.word_lut = self.embed_tokens
self.config.bert_hidden_size = config.d_model
self.layerdrop = opt.death_rate_decoder
self.dec_pretrained_model = 'mbart'
if opt.freeze_embedding:
self.embed_tokens.weight.requires_grad = False
self.word_dropout = opt.word_dropout
# freeze parameters if declared
if opt.freeze_decoder_self_attn:
print("[INFO] Freezing decoder self-attn paramaters")
self.freeze_self_attn_params()
if opt.freeze_decoder_ffn:
self.freeze_ffn_params()
if opt.freeze_decoder:
print("[INFO] Freezing decoder parameters ...")
for p in self.parameters():
p.requires_grad = False
if opt.multilingual_factorized_weights_decoder:
# TODO: dyrank
print("[INFO] Factorizing MBART model into %d languages and %d factors"
% (opt.n_languages, opt.n_attributes))
self.add_factorize(opt.n_languages, rank=opt.mfw_rank,
multiplicative=opt.mfw_multiplicative,
fast=opt.fast_factorize)
# adapter
if opt.decoder_adapter > 0:
print("[INFO] Adding MBART Adapters for %d languages" % opt.n_languages)
for layer in self.layers:
layer.add_adapters(opt.n_languages, adapter_location=opt.decoder_adapter)
# flash attention
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
# language prediction
if self.predict_language:
self.linear_cls = torch.nn.Linear(self.model_size, opt.n_languages)
self.cross_attention_cls = MBartCrossAttention(self.model_size, self.model_size // 64,
dropout=0.0, is_decoder=True, bias=True)
self.layer_norm_cls = LayerNorm(self.model_size)
else:
self.linear_cls = None
self.cross_attention_cls = None
self.layer_norm_cls = None
def freeze_self_attn_params(self):
#
# self.layer_norm.weight.requires_grad = False
# self.layer_norm.bias.requires_grad = False
for layer in self.layers:
layer.freeze_self_attn_params()
def freeze_ffn_params(self):
for layer in self.layers:
layer.freeze_ffn_params()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def add_factorize(self, n_languages, rank=4, multiplicative=False, fast=False, dyrank=False, **kwargs):
idx = 0
for layer in self.layers:
idx += 1
# the first layer cannot be factorized because it has to be used to predict the language
if self.predict_language and idx == 1:
continue
layer.add_factorize(n_languages, rank=rank, multiplicative=multiplicative,
fast=fast, dyrank=dyrank)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
sub_encoder_hidden_states=None,
sub_encoder_attention_mask=None,
inputs_embeds=None,
incremental=False, incremental_cache=None,
lang=None, src_lang=None,
output_attentions=None,
output_hidden_states=None,
**kwargs
):
"""
:param checkpointing_cross_attn:
:param input_ids: [batch_size x seq_len]
:param attention_mask:
:param encoder_hidden_states:
:param encoder_attention_mask:
:param sub_encoder_hidden_states:
:param sub_encoder_attention_mask:
:param inputs_embeds:
:param incremental:
:param incremental_cache:
:param lang:
:param atb:
:param output_attentions:
:param output_hidden_states:
:return:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = 0
if inputs_embeds is None:
inputs_embeds = embedded_dropout(self.embed_tokens, input_ids,
dropout=self.word_dropout if self.training else 0)
inputs_embeds = inputs_embeds * self.embed_scale
bsz = input_ids.size(0)
qlen = input_ids.size(1)
klen = qlen
# if attention_mask is None:
padding_mask = attention_mask
attention_mask = torch.triu(
inputs_embeds.new_ones(qlen, klen), diagonal=1).bool()
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
# hidden_states = hidden_states
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
# next_decoder_cache = () if use_cache else None
contrastive_loss = 0
# self.fast_bert_mha = None
if self.fast_bert_mha is not None and hidden_states.dtype == torch.half:
can_run_fast_bert_mha = True
# lets unpad both hidden_states and context states
if padding_mask is None:
padding_mask = input_ids.new_zeros(bsz, qlen)
padding_mask = padding_mask.contiguous().long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
hidden_states = hidden_states.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=hidden_states.device)
non_pad_indices_q = non_pad_indices
# unpad the context
encoder_hidden_states = encoder_hidden_states.transpose(0, 1).contiguous()
padding_mask = encoder_attention_mask
if padding_mask is None:
context_len = encoder_hidden_states.size(1)
padding_mask = input_ids.new_zeros(bsz, context_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
encoder_hidden_states = encoder_hidden_states.view(-1, encoder_hidden_states.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
encoder_hidden_states = encoder_hidden_states.index_select(0, non_pad_indices)
max_len_kv = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens_kv = torch.cumsum(a, 0).to(dtype=torch.int32, device=encoder_hidden_states.device)
if src_lang is not None and src_lang.ndim == 3:
src_lang = src_lang.view(-1, src_lang.size(-1))
src_lang = src_lang.index_select(0, non_pad_indices)
else:
max_len, cu_seqlens = None, None
max_len_kv, cu_seqlens_kv = None, None
non_pad_indices_q = None
can_run_fast_bert_mha = False
hidden_states = hidden_states.transpose(0, 1).contiguous()
if src_lang is not None and src_lang.ndim == 3:
src_lang = src_lang.transpose(0, 1)
_lang = lang
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# Stochastic Layer (only applicable when not predicting language or idx > 0)
if not (self.predict_language and idx == 0):
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
# TODO: use pred_lang instead of lang if we use predict_language
if self.predict_language and idx == 0:
__lang = None
_src_lang = None
else:
__lang = _lang
_src_lang = src_lang
layer_outputs, _ = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
sub_encoder_hidden_states=sub_encoder_hidden_states,
sub_encoder_attention_mask=sub_encoder_attention_mask,
output_attentions=output_attentions,
lang=__lang,
src_lang=_src_lang,
max_len=max_len, cu_seqlens=cu_seqlens,
max_len_kv=max_len_kv, cu_seqlens_kv=cu_seqlens_kv
)
hidden_states = layer_outputs[0]
if self.predict_language and idx == 0:
cross_attn_input = self.layer_norm_cls(hidden_states)
cross_attn_output, _, _ = self.cross_attention_cls(
hidden_states=cross_attn_input,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
output_attentions=None,
incremental=False, incremental_cache=None,
cu_seqlens=cu_seqlens, max_len=max_len,
cu_seqlens_kv=cu_seqlens_kv, max_len_kv=max_len_kv
)
# maybe we need a gated function here to combin
cls_input = cross_attn_output + hidden_states
pred_lang = self.linear_cls(cls_input)
_lang = torch.nn.functional.softmax(pred_lang, dim=-1, dtype=torch.float32)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add up the contrastive_loss per layer
if sub_encoder_hidden_states is not None:
contrastive_loss_ = layer_outputs[-1]
# print("Receive contrastive loss after layer", contrastive_loss_.size())
contrastive_loss = contrastive_loss + contrastive_loss_
hidden_states = self.layer_norm(hidden_states)
# re-padding if we use flash attention
if can_run_fast_bert_mha:
seq_len = qlen
hidden_states = index_copy(hidden_states, non_pad_indices_q, bsz * seq_len)
hidden_states = hidden_states.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
if pred_lang is not None:
pred_lang = index_copy(pred_lang, non_pad_indices_q, bsz * seq_len)
pred_lang = pred_lang.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not self.predict_language:
pred_lang = None
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, contrastive_loss, pred_lang]
if v is not None
)
def step(self, input, decoder_state, **kwargs):
# context is stored in the decoder state in [T B H] format
encoder_hidden_states = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
# atb = decoder_state.tgt_atb
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
input_ids = input
input_shape = input_ids.size()
time_step = input.size(1)
input_ = input
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1 and len(buffers) > 0:
# if buffers has not been initilized and we have > 1 input length data
# then its a prefix decoding step
input_ = input[:, -1:]
past_key_values_length = input.size(1) - 1
else:
past_key_values_length = 0
else:
past_key_values_length = 0
inputs_embeds = self.embed_tokens(input_) * self.embed_scale
qlen = input_ids.size(1)
klen = qlen
attention_mask = torch.triu(
inputs_embeds.new_ones(qlen, klen), diagonal=1).bool()
if input.size(1) > 1 and len(buffers) > 0:
attention_mask = attention_mask[-1:, :]
encoder_attention_mask = decoder_state.src_mask
if not self.layers[0].encoder_attn.fast_attention:
raise NotImplementedError
else:
encoder_attention_mask = encoder_attention_mask.bool()
# embed positions
positions = self.embed_positions(input_.size(), past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = hidden_states.transpose(0, 1)
hidden_states = self.layernorm_embedding(hidden_states)
max_len = None
cu_seqlens = None
for idx, decoder_layer in enumerate(self.layers):
if buffering:
buffer = buffers[idx] if idx in buffers else None
else:
buffer = None
# TODO: handle self.predict_language
layer_outputs, buffer = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=None,
incremental=buffering, incremental_cache=buffer,
lang=lang,
max_len=max_len, cu_seqlens=cu_seqlens
)
if buffering:
decoder_state.update_attention_buffer(buffer, idx)
hidden_states = layer_outputs[0]
hidden_states = self.layer_norm(hidden_states)
output = hidden_states[-1].unsqueeze(0)
# just a fake coverage, at the moment coverage is not returned during step
coverage = hidden_states.new(hidden_states.size(1), 1, encoder_hidden_states.size(0)).zero_()
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = encoder_hidden_states
return output_dict
| 102,521
| 43.965789
| 155
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/tokenization_deltalm.py
|
import torch
import os
from contextlib import contextmanager
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from transformers.tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
class DeltaLMTokenizer(PreTrainedTokenizer):
"""
Construct a MBart50 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
src_lang (`str`, *optional*):
A string representing the source language.
tgt_lang (`str`, *optional*):
A string representing the target language.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import MBart50Tokenizer
>>> tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> model_inputs = tokenizer(src_text, return_tensors="pt")
>>> with tokenizer.as_target_tokenizer():
... labels = tokenizer(tgt_text, return_tensors="pt").input_ids
>>> # model(**model_inputs, labels=labels) should work
```"""
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file,
src_lang=None,
tgt_lang=None,
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", [])
# kwargs["additional_special_tokens"] += [
# code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
# ]
super().__init__(
src_lang=src_lang,
tgt_lang=tgt_lang,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
# self.lang_code_to_id = {
# code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES)
# }
# self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
# self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
#
# self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
# self._src_lang = "</s>" # src_lang if src_lang is not None else
# self.cur_lang_code_id = 2 # self.lang_code_to_id[self._src_lang]
# self.tgt_lang = tgt_lang
self._src_lang = src_lang
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def vocab_size(self) -> int:
return len(self.sp_model) + self.fairseq_offset
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
assert new_src_lang in ["src", "tgt"], "DeltaLM tokenizer at the moment only supports src and tgt."
self._src_lang = new_src_lang
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def get_vocab(self) -> Dict:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token (str) in an id using the vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
return self.sp_model.decode(tokens)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
print(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
self.set_tgt_lang_special_tokens(self.tgt_lang)
yield
self.set_src_lang_special_tokens(self.src_lang)
def set_src_lang_special_tokens(self, src_lang: str) -> None:
"""Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."""
if self.src_lang in ["<s>", "src"]:
self.prefix_tokens = []
elif self.src_lang in ["</s>", "tgt"]:
self.prefix_tokens = [self.eos_token_id]
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
"""Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos]."""
# self.cur_lang_code_id = self.lang_code_to_id[tgt_lang]
self.prefix_tokens = [self.eos_token_id]
self.suffix_tokens = [self.eos_token_id]
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `[src_lang_code] X [eos]`
- `labels`: (for decoder) `[tgt_lang_code] X [eos]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def prepare_seq2seq_batch(
self,
src_texts: List[str],
src_lang: str = "en_XX",
tgt_texts: Optional[List[str]] = None,
tgt_lang: str = "ro_RO",
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
class MultilingualDeltaLMTokenizer(DeltaLMTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file,
src_lang=None,
tgt_lang=None,
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
) -> None:
self.added_tokens_decoder = None
self.added_tokens_encoder = None
self.additional_special_tokens = None
super(MultilingualDeltaLMTokenizer, self).__init__(vocab_file, src_lang=src_lang, tgt_lang=tgt_lang,eos_token=eos_token,
sep_token=sep_token,cls_token=cls_token,unk_token=unk_token,pad_token=pad_token,
mask_token=mask_token, sp_model_kwargs=sp_model_kwargs)
@property
def vocab_size(self) -> int:
return len(self.sp_model) + self.fairseq_offset
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
def override_lang_list(self, lang_list):
_lang_list = []
for lang in lang_list:
if lang in self.fairseq_tokens_to_ids:
continue
else:
_lang_list.append(lang)
lang_list = _lang_list
self.additional_special_tokens = lang_list
start = 250001
self.added_tokens_encoder.clear()
for i, lang in enumerate(lang_list):
self.added_tokens_encoder[lang] = start + i
self.added_tokens_decoder.clear()
for word in self.added_tokens_encoder:
self.added_tokens_decoder[self.added_tokens_encoder[word]] = word
# print(self.added_tokens_encoder)
def tokenize(self, text, **kwargs):
if text in self.added_tokens_encoder:
return [text]
repeated_ = True
tokens = text.strip().split()
for token in tokens:
if token not in self.added_tokens_encoder:
repeated_ = False
if repeated_:
return tokens
return super(MultilingualDeltaLMTokenizer, self).tokenize(text, **kwargs)
def _tokenize(self, text: str) -> List[str]:
if self.src_lang in ["</s>", "src", "tgt"] :
return self.sp_model.encode(text, out_type=str)
else:
return [self.src_lang] + self.sp_model.encode(text, out_type=str)
@classmethod
def from_pretrained(cls, *args, lang_list=[], **kwargs, ):
tokenizer = super(MultilingualDeltaLMTokenizer, cls).from_pretrained(*args, **kwargs)
tokenizer.override_lang_list(lang_list)
return tokenizer
| 18,485
| 42.805687
| 221
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/configuration_mbart.py
|
# coding=utf-8
# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" MBART model configuration """
import warnings
from collections import OrderedDict
from typing import Mapping
from .configuration_utils import PretrainedConfig
BART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
MBART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/config.json",
# See all MBART models at https://huggingface.co/models?filter=mbart
}
class MBartConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.MBartModel`. It is used to
instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MBART `facebook/mbart-large-cc25
<https://huggingface.co/facebook/mbart-large-cc25>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 50265):
Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.MBartModel` or
:class:`~transformers.TFMBartModel`.
d_model (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (:obj:`int`, `optional`, defaults to 12):
Number of encoder layers.
decoder_layers (:obj:`int`, `optional`, defaults to 12):
Number of decoder layers.
encoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
dropout (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (:obj:`int`, `optional`, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
The LayerDrop probability for the encoder. See the `LayerDrop paper <see
https://arxiv.org/abs/1909.11556>`__ for more details.
decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
The LayerDrop probability for the decoder. See the `LayerDrop paper <see
https://arxiv.org/abs/1909.11556>`__ for more details.
scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (:obj:`int`, `optional`, defaults to 2):
The id of the token to force as the last generated token when :obj:`max_length` is reached. Usually set to
:obj:`eos_token_id`.
Example::
>>> from transformers import MBartModel, MBartConfig
>>> # Initializing a MBART facebook/mbart-large-cc25 style configuration
>>> configuration = MBartConfig()
>>> # Initializing a model from the facebook/mbart-large-cc25 style configuration
>>> model = MBartModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "mbart"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50265,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
forced_eos_token_id=2,
**kwargs
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
| 8,187
| 47.449704
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/tokenization_mbart50eu.py
|
import torch
import os
from contextlib import contextmanager
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from transformers.tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
# SPIECE_UNDERLINE = "▁"
#
# VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
#
# PRETRAINED_VOCAB_FILES_MAP = {
# "vocab_file": {
# "facebook/mbart-large-50-one-to-many-mmt": "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model",
# }
# }
#
# PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
# "facebook/mbart-large-50-one-to-many-mmt": 1024,
# }
#
# class MBART50TokenizerEU(PreTrainedTokenizer):
# """
# Construct a MBart50 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
# This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
# Users should refer to this superclass for more information regarding those methods.
# Args:
# vocab_file (`str`):
# Path to the vocabulary file.
# src_lang (`str`, *optional*):
# A string representing the source language.
# tgt_lang (`str`, *optional*):
# A string representing the target language.
# eos_token (`str`, *optional*, defaults to `"</s>"`):
# The end of sequence token.
# sep_token (`str`, *optional*, defaults to `"</s>"`):
# The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
# sequence classification or for a text and a question for question answering. It is also used as the last
# token of a sequence built with special tokens.
# cls_token (`str`, *optional*, defaults to `"<s>"`):
# The classifier token which is used when doing sequence classification (classification of the whole sequence
# instead of per-token classification). It is the first token of the sequence when built with special tokens.
# unk_token (`str`, *optional*, defaults to `"<unk>"`):
# The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
# token instead.
# pad_token (`str`, *optional*, defaults to `"<pad>"`):
# The token used for padding, for example when batching sequences of different lengths.
# mask_token (`str`, *optional*, defaults to `"<mask>"`):
# The token used for masking values. This is the token used when training this model with masked language
# modeling. This is the token which the model will try to predict.
# sp_model_kwargs (`dict`, *optional*):
# Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set:
# - `enable_sampling`: Enable subword regularization.
# - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
# - `nbest_size = {0,1}`: No sampling is performed.
# - `nbest_size > 1`: samples from the nbest_size results.
# - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
# using forward-filtering-and-backward-sampling algorithm.
# - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
# BPE-dropout.
# Examples:
# ```python
# >>> from transformers import MBart50Tokenizer
# >>> tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
# >>> src_text = " UN Chief Says There Is No Military Solution in Syria"
# >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
# >>> model_inputs = tokenizer(src_text, return_tensors="pt")
# >>> with tokenizer.as_target_tokenizer():
# ... labels = tokenizer(tgt_text, return_tensors="pt").input_ids
# >>> # model(**model_inputs, labels=labels) should work
# ```"""
#
# vocab_files_names = VOCAB_FILES_NAMES
# max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
# pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
# model_input_names = ["input_ids", "attention_mask"]
#
# prefix_tokens: List[int] = []
# suffix_tokens: List[int] = []
#
# def __init__(
# self,
# vocab_file,
# src_lang=None,
# tgt_lang=None,
# eos_token="</s>",
# sep_token="</s>",
# cls_token="<s>",
# unk_token="<unk>",
# pad_token="<pad>",
# mask_token="<mask>",
# sp_model_kwargs: Optional[Dict[str, Any]] = None,
# **kwargs
# ) -> None:
# # Mask token behave like a normal word, i.e. include the space before it
# mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
#
# self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
#
# kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", [])
# # kwargs["additional_special_tokens"] += [
# # code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
# # ]
#
# super().__init__(
# src_lang=src_lang,
# tgt_lang=tgt_lang,
# eos_token=eos_token,
# unk_token=unk_token,
# sep_token=sep_token,
# cls_token=cls_token,
# pad_token=pad_token,
# mask_token=mask_token,
# sp_model_kwargs=self.sp_model_kwargs,
# **kwargs,
# )
#
# self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
# self.sp_model.Load(str(vocab_file))
# self.vocab_file = vocab_file
#
# # Original fairseq vocab and spm vocab must be "aligned":
# # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
#
# # Mimic fairseq token-to-id alignment for the first 4 token
# self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
#
# # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
# self.fairseq_offset = 1
#
# self.sp_model_size = len(self.sp_model)
# # self.lang_code_to_id = {
# # code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES)
# # }
# # self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
# # self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
# #
# # self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
# self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
#
# # self._src_lang = "</s>" # src_lang if src_lang is not None else
# # self.cur_lang_code_id = 2 # self.lang_code_to_id[self._src_lang]
# # self.tgt_lang = tgt_lang
# self._src_lang = src_lang
# if "src_lang" == "eu":
# self._src_lang = "</s>"
# self.tgt_lang = tgt_lang
# self.set_src_lang_special_tokens(self._src_lang)
#
# @property
# def vocab_size(self) -> int:
# return len(self.sp_model) + self.fairseq_offset
#
# @property
# def src_lang(self) -> str:
# return self._src_lang
#
# @src_lang.setter
# def src_lang(self, new_src_lang: str) -> None:
# assert new_src_lang in ["src", "tgt"], "DeltaLM tokenizer at the moment only supports src and tgt."
# self._src_lang = new_src_lang
#
# def __getstate__(self) -> Dict:
# state = self.__dict__.copy()
# state["sp_model"] = None
# return state
#
# def __setstate__(self, d: Dict) -> None:
# self.__dict__ = d
#
# # for backward compatibility
# if not hasattr(self, "sp_model_kwargs"):
# self.sp_model_kwargs = {}
#
# self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
# self.sp_model.Load(self.vocab_file)
#
# def get_vocab(self) -> Dict:
# vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
# vocab.update(self.added_tokens_encoder)
# return vocab
#
# def _tokenize(self, text: str) -> List[str]:
# return self.sp_model.encode(text, out_type=str)
#
# def _convert_token_to_id(self, token: str) -> int:
# """Converts a token (str) in an id using the vocab."""
# if token in self.fairseq_tokens_to_ids:
# return self.fairseq_tokens_to_ids[token]
# spm_id = self.sp_model.PieceToId(token)
#
# # Need to return unknown token if the SP model returned 0
# return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
#
# def _convert_id_to_token(self, index: int) -> str:
# """Converts an index (integer) in a token (str) using the vocab."""
# if index in self.fairseq_ids_to_tokens:
# return self.fairseq_ids_to_tokens[index]
# return self.sp_model.IdToPiece(index - self.fairseq_offset)
#
# def convert_tokens_to_string(self, tokens: List[str]) -> str:
# """Converts a sequence of tokens (strings for sub-words) in a single string."""
# return self.sp_model.decode(tokens)
#
# def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
# if not os.path.isdir(save_directory):
# print(f"Vocabulary path ({save_directory}) should be a directory")
# return
# out_vocab_file = os.path.join(
# save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
# )
#
# if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
# copyfile(self.vocab_file, out_vocab_file)
#
# return (out_vocab_file,)
#
# @contextmanager
# def as_target_tokenizer(self):
# """
# Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
# sequence-to-sequence models that need a slightly different processing for the labels.
# """
# self.set_tgt_lang_special_tokens(self.tgt_lang)
# yield
# self.set_src_lang_special_tokens(self.src_lang)
#
# def set_src_lang_special_tokens(self, src_lang: str) -> None:
# """Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."""
# if self.src_lang in ["<s>", "src"]:
# self.prefix_tokens = []
# elif self.src_lang in ["</s>", "tgt"]:
# self.prefix_tokens = [self.eos_token_id]
# self.suffix_tokens = [self.eos_token_id]
#
# def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
# """Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos]."""
# # self.cur_lang_code_id = self.lang_code_to_id[tgt_lang]
# self.prefix_tokens = [self.eos_token_id]
# self.suffix_tokens = [self.eos_token_id]
#
# def get_special_tokens_mask(
# self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None,
# already_has_special_tokens: bool = False
# ) -> List[int]:
# """
# Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
# special tokens using the tokenizer `prepare_for_model` method.
#
# Args:
# token_ids_0 (`List[int]`):
# List of IDs.
# token_ids_1 (`List[int]`, *optional*):
# Optional second list of IDs for sequence pairs.
# already_has_special_tokens (`bool`, *optional*, defaults to `False`):
# Whether or not the token list is already formatted with special tokens for the model.
#
# Returns:
# `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
# """
#
# if already_has_special_tokens:
# return super().get_special_tokens_mask(
# token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
# )
#
# prefix_ones = [1] * len(self.prefix_tokens)
# suffix_ones = [1] * len(self.suffix_tokens)
# if token_ids_1 is None:
# return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
# return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
#
# def build_inputs_with_special_tokens(
# self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
# ) -> List[int]:
# """
# Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
# adding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence:
#
# - `input_ids` (for encoder) `[src_lang_code] X [eos]`
# - `labels`: (for decoder) `[tgt_lang_code] X [eos]`
#
# BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
# separator.
#
# Args:
# token_ids_0 (`List[int]`):
# List of IDs to which the special tokens will be added.
# token_ids_1 (`List[int]`, *optional*):
# Optional second list of IDs for sequence pairs.
#
# Returns:
# `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
# """
# if token_ids_1 is None:
# return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# # We don't expect to process pairs, but leave the pair logic for API consistency
# return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
#
# def _build_translation_inputs(
# self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
# ):
# """Used by translation pipeline, to prepare inputs for the generate function"""
# if src_lang is None or tgt_lang is None:
# raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
# self.src_lang = src_lang
# inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
# tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
# inputs["forced_bos_token_id"] = tgt_lang_id
# return inputs
#
# def prepare_seq2seq_batch(
# self,
# src_texts: List[str],
# src_lang: str = "en_XX",
# tgt_texts: Optional[List[str]] = None,
# tgt_lang: str = "ro_RO",
# **kwargs,
# ) -> BatchEncoding:
# self.src_lang = src_lang
# self.tgt_lang = tgt_lang
# return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
| 15,946
| 47.471125
| 223
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/configuration_whisper.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Whisper model configuration"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from .configuration_utils import PretrainedConfig
WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
NON_SPEECH_TOKENS = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
NON_SPEECH_TOKENS_MULTI = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
# fmt: on
# fmt: on
class WhisperConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`WhisperModel`]. It is used to instantiate a
Whisper model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Whisper
[openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 51865):
Vocabulary size of the Whisper model. Defines the number of different tokens that can be represented by the
`decoder_input_ids` passed when calling [`WhisperModel`]
num_mel_bins (`int`, *optional*, defaults to 80):
Number of mel features used per input features. Should correspond to the value used in the
`WhisperProcessor` class.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
decoder_start_token_id (`int`, *optional*, defaults to 50257):
Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
are provided to the `generate` function. It is used to guide the model`s generation process depending on
the task.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
d_model (`int`, *optional*, defaults to 256):
Dimensionality of the layers.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_embedding (`bool`, *optional*, defaults to False):
Scale embeddings by diving by sqrt(d_model).
max_source_positions (`int`, *optional*, defaults to 1500):
The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
max_target_positions (`int`, *optional*, defaults to 448):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
pad_token_id (`int`, *optional*, defaults to 50256):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 50256):
Begin of stream token id.
eos_token_id (`int`, *optional*, defaults to 50257):
End of stream token id.
suppress_tokens (`List[int]`, *optional*):
A list containing the non-speech tokens that will be used by the logit processor in the `generate`
function. NON_SPEECH_TOKENS and NON_SPEECH_TOKENS_MULTI each correspond to the `english-only` and the
`multilingual` model.
begin_suppress_tokens (`List[int]`, *optional*, defaults to `[220,50256]`):
A list containing tokens that will be supressed at the beginning of the sampling process. Initialized as
the token for `" "` (`blank_token_id`) and the `eos_token_id`
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`WhisperForAudioClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification. Only relevant when using an
instance of [`WhisperForAudioClassification`].
apply_spec_augment (`bool`, *optional*, defaults to `False`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://arxiv.org/abs/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
procecure generates `mask_time_prob*len(time_axis)/mask_time_length` independent masks over the axis. If
reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
actual percentage of masked vectors. This is only relevant if `apply_spec_augment == True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
masking procecure generates `mask_feature_prob*len(feature_axis)/mask_time_length` independent masks over
the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
mask_feature_min_masks (`int`, *optional*, defaults to 0),:
The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
step, irrespectively of `mask_feature_prob`. Only relevant if
`mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks`.
Example:
```python
>>> from transformers import WhisperConfig, WhisperModel
>>> # Initializing a Whisper tiny style configuration
>>> configuration = WhisperConfig()
>>> # Initializing a model (with random weights) from the tiny style configuration
>>> model = WhisperModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "whisper"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=51865,
num_mel_bins=80,
encoder_layers=6,
encoder_attention_heads=4,
decoder_layers=6,
decoder_attention_heads=4,
decoder_ffn_dim=1536,
encoder_ffn_dim=1536,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
decoder_start_token_id=50257,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=256,
dropout=0.0,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
scale_embedding=False,
max_source_positions=1500,
max_target_positions=448,
pad_token_id=50256,
bos_token_id=50257,
eos_token_id=50256,
suppress_tokens=None,
begin_suppress_tokens=[220, 50256],
use_weighted_layer_sum=False,
classifier_proj_size=256,
apply_spec_augment=False,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0,
mask_feature_length=10,
mask_feature_min_masks=0,
**kwargs,
):
self.vocab_size = vocab_size
self.num_mel_bins = num_mel_bins
self.d_model = d_model
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
self.classifier_proj_size = classifier_proj_size
self.use_weighted_layer_sum = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
suppress_tokens=suppress_tokens,
begin_suppress_tokens=begin_suppress_tokens,
**kwargs,
)
| 14,344
| 54.173077
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/configuration_bart.py
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BART model configuration """
import warnings
from collections import OrderedDict
from typing import Mapping
from .configuration_utils import PretrainedConfig
BART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class BartConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.BartModel`. It is used to
instantiate a BART model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the BART `facebook/bart-large
<https://huggingface.co/facebook/bart-large>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 50265):
Vocabulary size of the BART model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.BartModel` or
:class:`~transformers.TFBartModel`.
d_model (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (:obj:`int`, `optional`, defaults to 12):
Number of encoder layers.
decoder_layers (:obj:`int`, `optional`, defaults to 12):
Number of decoder layers.
encoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
dropout (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (:obj:`float`, `optional`, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (:obj:`int`, `optional`, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
The LayerDrop probability for the encoder. See the `LayerDrop paper <see
https://arxiv.org/abs/1909.11556>`__ for more details.
decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
The LayerDrop probability for the decoder. See the `LayerDrop paper <see
https://arxiv.org/abs/1909.11556>`__ for more details.
scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
num_labels: (:obj:`int`, `optional`, defaults to 3):
The number of labels to use in :class:`~transformers.BartForSequenceClassification`.
forced_eos_token_id (:obj:`int`, `optional`, defaults to 2):
The id of the token to force as the last generated token when :obj:`max_length` is reached. Usually set to
:obj:`eos_token_id`.
Example::
>>> from transformers import BartModel, BartConfig
>>> # Initializing a BART facebook/bart-large style configuration
>>> configuration = BartConfig()
>>> # Initializing a model from the facebook/bart-large style configuration
>>> model = BartModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "bart"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50265,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
use_cache=True,
num_labels=3,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
is_encoder_decoder=True,
decoder_start_token_id=2,
forced_eos_token_id=2,
**kwargs
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=num_labels,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
# # ensure backward compatibility for BART CNN models
# if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
# self.forced_bos_token_id = self.bos_token_id
# warnings.warn(
# f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
# "The config can simply be saved and uploaded again to be fixed."
# )
| 8,693
| 48.118644
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_bart.py
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BART model. """
import copy
import math
import random
import warnings
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.cuda.amp import autocast
from .activations import ACT2FN
# from ...file_utils import (
# add_code_sample_docstrings,
# add_end_docstrings,
# add_start_docstrings,
# add_start_docstrings_to_model_forward,
# replace_return_docstrings,
# )
from .modeling_outputs import (
BaseModelOutput,
# BaseModelOutputWithPastAndCrossAttentions,
# CausalLMOutputWithCrossAttentions,
# Seq2SeqLMOutput,
# Seq2SeqModelOutput,
# Seq2SeqQuestionAnsweringModelOutput,
# Seq2SeqSequenceClassifierOutput,
)
from .modeling_utils import PreTrainedModel
# from ...utils import logging
from .configuration_bart import BartConfig
import onmt
from collections import defaultdict
_CHECKPOINT_FOR_DOC = "facebook/bart-large"
_CONFIG_FOR_DOC = "BartConfig"
_TOKENIZER_FOR_DOC = "BartTokenizer"
BART_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/bart-large",
# See all BART models at https://huggingface.co/models?filter=bart
]
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
class BartLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions + self.offset)
class BartAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class BartEncoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BartAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class BartDecoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BartAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn_name = config.activation_function
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = BartAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
self.fused = False
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (:obj:`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
# todo: replace this with fused function
if self.fused and hidden_states.is_cuda:
with autocast(enabled=False):
weights = [self.fc1.weight.half(), self.fc2.weight.half()]
biases = [self.fc1.bias.half(), self.fc2.bias.half()]
seq_len, bsz, hidden_size = hidden_states.size(0), hidden_states.size(1), hidden_states.size(2)
dropout = self.activation_dropout if self.training else 0.0
hidden_states = self.fused_function(dropout, False, hidden_states.half().view(seq_len * bsz, -1),
*weights, *biases).type_as(hidden_states)
hidden_states = hidden_states.view(seq_len, bsz, hidden_size)
else:
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class BartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class BartPretrainedModel(PreTrainedModel):
config_class = BartConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (BartDecoder, BartEncoder)):
module.gradient_checkpointing = value
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
class PretrainedBartModel(BartPretrainedModel):
def __init_subclass__(self):
warnings.warn(
"The class `PretrainedBartModel` has been depreciated, please use `BartPretrainedModel` instead.",
FutureWarning,
)
BART_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BartConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BART_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
>>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
Mask filling example::
>>> from transformers import BartTokenizer, BartForConditionalGeneration
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')
>>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
"""
BART_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
Bart uses the :obj:`eos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
For translation and summarization training, :obj:`decoder_input_ids` should be provided. If no
:obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to
the right for denoising pre-training following the paper.
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and
modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more
information on the default strategy.
head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in ``[0,
1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class BartEncoder(BartPretrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`BartEncoderLayer`.
Args:
config: BartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = BartLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.init_weights()
self.gradient_checkpointing = False
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class BartDecoder(BartPretrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BartDecoderLayer`
Args:
config: BartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, opt, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = opt.dropout
self.layerdrop = opt.death_rate / 2
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.dec_pretrained_model = 'bart'
config.attention_dropout = opt.attn_dropout if opt.attn_dropout > 0 else opt.dropout
config.activation_dropout = opt.ffn_dropout if opt.ffn_dropout > 0 else opt.dropout
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = BartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.init_weights()
self.gradient_checkpointing = False
self.model_size = config.d_model
self.switchout = 0.0
self.config.bert_hidden_size = config.d_model
@property
def word_lut(self):
return self.embed_tokens
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2
tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (
len(self.layers)
), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
def step(self, input, decoder_state, **kwargs):
# context is stored in the decoder state in [T B H] format
encoder_hidden_states = decoder_state.context.transpose(0, 1)
# buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
# decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
# input_ids = decoder_state.input_seq.transpose(0, 1) # T x B -> B x T
input_ids = input
input_shape = input_ids.size()
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# TODO: adding buffers\
past_key_values_length = 0
attention_mask = input_ids.ne(onmt.constants.TGT_PAD).long()
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
encoder_attention_mask = decoder_state.src_mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
# decoder layers
all_cross_attentions = ()
head_mask, cross_attn_head_mask = None, None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (
len(self.layers)
), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# past_key_value = past_key_values[idx] if past_key_values is not None else None
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=None,
output_attentions=True,
use_cache=False,
)
hidden_states = layer_outputs[0]
all_cross_attentions += (layer_outputs[2],)
output = hidden_states.transpose(0, 1).contiguous()[-1].unsqueeze(0)
all_cross_attentions = ()
# if use_cache:
# next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
#
# if output_attentions:
# all_self_attns += (layer_outputs[1],)
#
coverage = all_cross_attentions
# raise NotImplementedError
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = encoder_hidden_states.transpose(0, 1)
return output_dict
# @add_start_docstrings(
# "The bare BART Model outputting raw hidden-states without any specific head on top.",
# BART_START_DOCSTRING,
# )
class BartModel(BartPretrainedModel):
def __init__(self, config: BartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = BartEncoder(config, self.shared)
self.decoder = BartDecoder(config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# different to other models, Bart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
#
# @add_start_docstrings(
# "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING
# )
# class BartForConditionalGeneration(BartPretrainedModel):
# base_model_prefix = "model"
# _keys_to_ignore_on_load_missing = [r"final_logits_bias", r"lm_head\.weight"]
#
# def __init__(self, config: BartConfig):
# super().__init__(config)
# self.model = BartModel(config)
# self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
# self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
#
# self.init_weights()
#
# def get_encoder(self):
# return self.model.get_encoder()
#
# def get_decoder(self):
# return self.model.get_decoder()
#
# def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
# new_embeddings = super().resize_token_embeddings(new_num_tokens)
# self._resize_final_logits_bias(new_num_tokens)
# return new_embeddings
#
# def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
# old_num_tokens = self.final_logits_bias.shape[-1]
# if new_num_tokens <= old_num_tokens:
# new_bias = self.final_logits_bias[:, :new_num_tokens]
# else:
# extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
# new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
# self.register_buffer("final_logits_bias", new_bias)
#
# def get_output_embeddings(self):
# return self.lm_head
#
# def set_output_embeddings(self, new_embeddings):
# self.lm_head = new_embeddings
#
# @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
# @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
# @add_end_docstrings(BART_GENERATION_EXAMPLE)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# decoder_input_ids=None,
# decoder_attention_mask=None,
# head_mask=None,
# decoder_head_mask=None,
# cross_attn_head_mask=None,
# encoder_outputs=None,
# past_key_values=None,
# inputs_embeds=None,
# decoder_inputs_embeds=None,
# labels=None,
# use_cache=None,
# output_attentions=None,
# output_hidden_states=None,
# return_dict=None,
# ):
# r"""
# labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
# Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
# config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
# (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
#
# Returns:
# """
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
#
# if labels is not None:
# if decoder_input_ids is None and decoder_inputs_embeds is None:
# decoder_input_ids = shift_tokens_right(
# labels, self.config.pad_token_id, self.config.decoder_start_token_id
# )
#
# outputs = self.model(
# input_ids,
# attention_mask=attention_mask,
# decoder_input_ids=decoder_input_ids,
# encoder_outputs=encoder_outputs,
# decoder_attention_mask=decoder_attention_mask,
# head_mask=head_mask,
# decoder_head_mask=decoder_head_mask,
# cross_attn_head_mask=cross_attn_head_mask,
# past_key_values=past_key_values,
# inputs_embeds=inputs_embeds,
# decoder_inputs_embeds=decoder_inputs_embeds,
# use_cache=use_cache,
# output_attentions=output_attentions,
# output_hidden_states=output_hidden_states,
# return_dict=return_dict,
# )
# lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
#
# masked_lm_loss = None
# if labels is not None:
# loss_fct = CrossEntropyLoss()
# masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
#
# if not return_dict:
# output = (lm_logits,) + outputs[1:]
# return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
#
# return Seq2SeqLMOutput(
# loss=masked_lm_loss,
# logits=lm_logits,
# past_key_values=outputs.past_key_values,
# decoder_hidden_states=outputs.decoder_hidden_states,
# decoder_attentions=outputs.decoder_attentions,
# cross_attentions=outputs.cross_attentions,
# encoder_last_hidden_state=outputs.encoder_last_hidden_state,
# encoder_hidden_states=outputs.encoder_hidden_states,
# encoder_attentions=outputs.encoder_attentions,
# )
#
# def prepare_inputs_for_generation(
# self,
# decoder_input_ids,
# past=None,
# attention_mask=None,
# head_mask=None,
# decoder_head_mask=None,
# cross_attn_head_mask=None,
# use_cache=None,
# encoder_outputs=None,
# **kwargs
# ):
# # cut decoder_input_ids if past is used
# if past is not None:
# decoder_input_ids = decoder_input_ids[:, -1:]
#
# return {
# "input_ids": None, # encoder_outputs is defined. input_ids not needed
# "encoder_outputs": encoder_outputs,
# "past_key_values": past,
# "decoder_input_ids": decoder_input_ids,
# "attention_mask": attention_mask,
# "head_mask": head_mask,
# "decoder_head_mask": decoder_head_mask,
# "cross_attn_head_mask": cross_attn_head_mask,
# "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
# }
#
# def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
# return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
#
# @staticmethod
# def _reorder_cache(past, beam_idx):
# reordered_past = ()
# for layer_past in past:
# # cached cross_attention states don't have to be reordered -> they are always the same
# reordered_past += (
# tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
# )
# return reordered_past
#
#
# @add_start_docstrings(
# """
# Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
# tasks.
# """,
# BART_START_DOCSTRING,
# )
# class BartForSequenceClassification(BartPretrainedModel):
# def __init__(self, config: BartConfig, **kwargs):
# super().__init__(config, **kwargs)
# self.model = BartModel(config)
# self.classification_head = BartClassificationHead(
# config.d_model,
# config.d_model,
# config.num_labels,
# config.classifier_dropout,
# )
# self.model._init_weights(self.classification_head.dense)
# self.model._init_weights(self.classification_head.out_proj)
#
# @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
# @add_code_sample_docstrings(
# tokenizer_class=_TOKENIZER_FOR_DOC,
# checkpoint=_CHECKPOINT_FOR_DOC,
# output_type=Seq2SeqSequenceClassifierOutput,
# config_class=_CONFIG_FOR_DOC,
# )
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# decoder_input_ids=None,
# decoder_attention_mask=None,
# head_mask=None,
# decoder_head_mask=None,
# cross_attn_head_mask=None,
# encoder_outputs=None,
# inputs_embeds=None,
# decoder_inputs_embeds=None,
# labels=None,
# use_cache=None,
# output_attentions=None,
# output_hidden_states=None,
# return_dict=None,
# ):
# r"""
# labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
# Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
# config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
# """
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# if labels is not None:
# use_cache = False
#
# if input_ids is None and inputs_embeds is not None:
# raise NotImplementedError(
# f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
# )
#
# outputs = self.model(
# input_ids,
# attention_mask=attention_mask,
# decoder_input_ids=decoder_input_ids,
# decoder_attention_mask=decoder_attention_mask,
# head_mask=head_mask,
# decoder_head_mask=decoder_head_mask,
# cross_attn_head_mask=cross_attn_head_mask,
# encoder_outputs=encoder_outputs,
# inputs_embeds=inputs_embeds,
# decoder_inputs_embeds=decoder_inputs_embeds,
# use_cache=use_cache,
# output_attentions=output_attentions,
# output_hidden_states=output_hidden_states,
# return_dict=return_dict,
# )
# hidden_states = outputs[0] # last hidden state
#
# eos_mask = input_ids.eq(self.config.eos_token_id)
#
# if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
# raise ValueError("All examples must have the same number of <eos> tokens.")
# sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
# :, -1, :
# ]
# logits = self.classification_head(sentence_representation)
#
# loss = None
# if labels is not None:
# if self.config.num_labels == 1:
# # regression
# loss_fct = MSELoss()
# loss = loss_fct(logits.view(-1), labels.view(-1))
# else:
# loss_fct = CrossEntropyLoss()
# loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
#
# if not return_dict:
# output = (logits,) + outputs[1:]
# return ((loss,) + output) if loss is not None else output
#
# return Seq2SeqSequenceClassifierOutput(
# loss=loss,
# logits=logits,
# past_key_values=outputs.past_key_values,
# decoder_hidden_states=outputs.decoder_hidden_states,
# decoder_attentions=outputs.decoder_attentions,
# cross_attentions=outputs.cross_attentions,
# encoder_last_hidden_state=outputs.encoder_last_hidden_state,
# encoder_hidden_states=outputs.encoder_hidden_states,
# encoder_attentions=outputs.encoder_attentions,
# )
#
#
# @add_start_docstrings(
# """
# BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
# layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
# """,
# BART_START_DOCSTRING,
# )
# class BartForQuestionAnswering(BartPretrainedModel):
# def __init__(self, config):
# super().__init__(config)
#
# config.num_labels = 2
# self.num_labels = config.num_labels
#
# self.model = BartModel(config)
# self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
#
# self.model._init_weights(self.qa_outputs)
#
# @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
# @add_code_sample_docstrings(
# tokenizer_class=_TOKENIZER_FOR_DOC,
# checkpoint=_CHECKPOINT_FOR_DOC,
# output_type=Seq2SeqQuestionAnsweringModelOutput,
# config_class=_CONFIG_FOR_DOC,
# )
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# decoder_input_ids=None,
# decoder_attention_mask=None,
# head_mask=None,
# decoder_head_mask=None,
# cross_attn_head_mask=None,
# encoder_outputs=None,
# start_positions=None,
# end_positions=None,
# inputs_embeds=None,
# decoder_inputs_embeds=None,
# use_cache=None,
# output_attentions=None,
# output_hidden_states=None,
# return_dict=None,
# ):
# r"""
# start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
# Labels for position (index) of the start of the labelled span for computing the token classification loss.
# Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
# are not taken into account for computing the loss.
# end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
# Labels for position (index) of the end of the labelled span for computing the token classification loss.
# Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
# are not taken into account for computing the loss.
# """
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# if start_positions is not None and end_positions is not None:
# use_cache = False
#
# outputs = self.model(
# input_ids,
# attention_mask=attention_mask,
# decoder_input_ids=decoder_input_ids,
# decoder_attention_mask=decoder_attention_mask,
# head_mask=head_mask,
# decoder_head_mask=decoder_head_mask,
# cross_attn_head_mask=cross_attn_head_mask,
# encoder_outputs=encoder_outputs,
# inputs_embeds=inputs_embeds,
# decoder_inputs_embeds=decoder_inputs_embeds,
# use_cache=use_cache,
# output_attentions=output_attentions,
# output_hidden_states=output_hidden_states,
# return_dict=return_dict,
# )
#
# sequence_output = outputs[0]
#
# logits = self.qa_outputs(sequence_output)
# start_logits, end_logits = logits.split(1, dim=-1)
# start_logits = start_logits.squeeze(-1).contiguous()
# end_logits = end_logits.squeeze(-1).contiguous()
#
# total_loss = None
# if start_positions is not None and end_positions is not None:
# # If we are on multi-GPU, split add a dimension
# if len(start_positions.size()) > 1:
# start_positions = start_positions.squeeze(-1)
# if len(end_positions.size()) > 1:
# end_positions = end_positions.squeeze(-1)
# # sometimes the start/end positions are outside our model inputs, we ignore these terms
# ignored_index = start_logits.size(1)
# start_positions = start_positions.clamp(0, ignored_index)
# end_positions = end_positions.clamp(0, ignored_index)
#
# loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
# start_loss = loss_fct(start_logits, start_positions)
# end_loss = loss_fct(end_logits, end_positions)
# total_loss = (start_loss + end_loss) / 2
#
# if not return_dict:
# output = (
# start_logits,
# end_logits,
# ) + outputs[1:]
# return ((total_loss,) + output) if total_loss is not None else output
#
# return Seq2SeqQuestionAnsweringModelOutput(
# loss=total_loss,
# start_logits=start_logits,
# end_logits=end_logits,
# past_key_values=outputs.past_key_values,
# decoder_hidden_states=outputs.decoder_hidden_states,
# decoder_attentions=outputs.decoder_attentions,
# cross_attentions=outputs.cross_attentions,
# encoder_last_hidden_state=outputs.encoder_last_hidden_state,
# encoder_hidden_states=outputs.encoder_hidden_states,
# encoder_attentions=outputs.encoder_attentions,
# )
#
#
# class BartDecoderWrapper(BartPretrainedModel):
# """
# This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
# used in combination with the :class:`~transformers.EncoderDecoderModel` framework.
# """
#
# def __init__(self, config):
# super().__init__(config)
# self.decoder = BartDecoder(config)
#
# def forward(self, *args, **kwargs):
# return self.decoder(*args, **kwargs)
#
#
# class BartForCausalLM(BartPretrainedModel):
# def __init__(self, config):
# super().__init__(config)
# config = copy.deepcopy(config)
# config.is_decoder = True
# config.is_encoder_decoder = False
# self.model = BartDecoderWrapper(config)
#
# self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
#
# self.init_weights()
#
# def get_input_embeddings(self):
# return self.model.decoder.embed_tokens
#
# def set_input_embeddings(self, value):
# self.model.decoder.embed_tokens = value
#
# def get_output_embeddings(self):
# return self.lm_head
#
# def set_output_embeddings(self, new_embeddings):
# self.lm_head = new_embeddings
#
# def set_decoder(self, decoder):
# self.model.decoder = decoder
#
# def get_decoder(self):
# return self.model.decoder
#
# @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# encoder_hidden_states=None,
# encoder_attention_mask=None,
# head_mask=None,
# cross_attn_head_mask=None,
# past_key_values=None,
# inputs_embeds=None,
# labels=None,
# use_cache=None,
# output_attentions=None,
# output_hidden_states=None,
# return_dict=None,
# ):
# r"""
# Args:
# input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
# Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
# provide it.
#
# Indices can be obtained using :class:`~transformers.BartTokenizer`. See
# :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
# for details.
#
# `What are input IDs? <../glossary.html#input-ids>`__
# attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
# Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
#
# - 1 for tokens that are **not masked**,
# - 0 for tokens that are **masked**.
#
# `What are attention masks? <../glossary.html#attention-mask>`__
# encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
# Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
# if the model is configured as a decoder.
# encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
# Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
# in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
# head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
# Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
#
# - 1 indicates the head is **not masked**,
# - 0 indicates the head is **masked**.
#
# cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
# Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
#
# - 1 indicates the head is **not masked**,
# - 0 indicates the head is **masked**.
#
# past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
# Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2
# tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
# tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two
# additional tensors are only required when the model is used as a decoder in a Sequence to Sequence
# model.
#
# Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
# cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential
# decoding.
#
# If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
# (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
# instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
# labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
# Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
# config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
# ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
# config.vocab_size]``.
# use_cache (:obj:`bool`, `optional`):
# If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
# decoding (see :obj:`past_key_values`).
#
# - 1 for tokens that are **not masked**,
# - 0 for tokens that are **masked**.
# output_attentions (:obj:`bool`, `optional`):
# Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
# returned tensors for more detail.
# output_hidden_states (:obj:`bool`, `optional`):
# Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
# for more detail.
# return_dict (:obj:`bool`, `optional`):
# Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
#
# Returns:
#
# Example::
#
# >>> from transformers import BartTokenizer, BartForCausalLM
#
# >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
# >>> model = BartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
# >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
# >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
# >>> outputs = model(**inputs)
#
# >>> last_hidden_states = outputs.last_hidden_state
# """
#
# output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
# output_hidden_states = (
# output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
# )
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
#
# # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
# outputs = self.model.decoder(
# input_ids=input_ids,
# attention_mask=attention_mask,
# encoder_hidden_states=encoder_hidden_states,
# encoder_attention_mask=encoder_attention_mask,
# head_mask=head_mask,
# cross_attn_head_mask=cross_attn_head_mask,
# past_key_values=past_key_values,
# inputs_embeds=inputs_embeds,
# use_cache=use_cache,
# output_attentions=output_attentions,
# output_hidden_states=output_hidden_states,
# return_dict=return_dict,
# )
#
# logits = self.lm_head(outputs[0])
#
# loss = None
# if labels is not None:
# loss_fct = CrossEntropyLoss()
# loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
#
# if not return_dict:
# output = (logits,) + outputs[1:]
# return (loss,) + output if loss is not None else output
#
# return CausalLMOutputWithCrossAttentions(
# loss=loss,
# logits=logits,
# past_key_values=outputs.past_key_values,
# hidden_states=outputs.hidden_states,
# attentions=outputs.attentions,
# cross_attentions=outputs.cross_attentions,
# )
#
# def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
# if attention_mask is None:
# attention_mask = input_ids.new_ones(input_ids.shape)
#
# if past:
# input_ids = input_ids[:, -1:]
# # first step, decoder_cached_states are empty
# return {
# "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
# "attention_mask": attention_mask,
# "past_key_values": past,
# "use_cache": use_cache,
# }
#
# @staticmethod
# def _reorder_cache(past, beam_idx):
# reordered_past = ()
# for layer_past in past:
# reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
# return reordered_past
| 90,013
| 45.327329
| 161
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/file_utils.py
|
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import fnmatch
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from dataclasses import fields
from functools import partial, wraps
from hashlib import sha256
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import numpy as np
import requests
# from filelock import FileLock
from tqdm.auto import tqdm
# from . import __version__
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"):
import torch
_torch_available = True # pylint: disable=invalid-name
logger.info("PyTorch version {} available.".format(torch.__version__))
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
except ImportError:
_torch_available = False # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"):
import tensorflow as tf
assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2
_tf_available = True # pylint: disable=invalid-name
logger.info("TensorFlow version {} available.".format(tf.__version__))
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
except (ImportError, AssertionError):
_tf_available = False # pylint: disable=invalid-name
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
try:
import torch_xla.core.xla_model as xm # noqa: F401
if _torch_available:
_torch_tpu_available = True # pylint: disable=
else:
_torch_tpu_available = False
except ImportError:
_torch_tpu_available = False
try:
import psutil # noqa: F401
_psutil_available = True
except ImportError:
_psutil_available = False
try:
import py3nvml # noqa: F401
_py3nvml_available = True
except ImportError:
_py3nvml_available = False
try:
from apex import amp # noqa: F401
_has_apex = True
except ImportError:
_has_apex = False
default_cache_path = os.path.join(torch_cache_home, "transformers")
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
CONFIG_NAME = "config.json"
MODEL_CARD_NAME = "modelcard.json"
MULTIPLE_CHOICE_DUMMY_INPUTS = [[[0], [1]], [[0], [1]]]
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
def is_torch_tpu_available():
return _torch_tpu_available
def is_psutil_available():
return _psutil_available
def is_py3nvml_available():
return _py3nvml_available
def is_apex_available():
return _has_apex
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_start_docstrings_to_callable(*docstr):
def docstring_decorator(fn):
class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0])
intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name)
note = r"""
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
pre and post processing steps while the latter silently ignores them.
"""
fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + "".join(docstr)
return fn
return docstring_decorator
RETURN_INTRODUCTION = r"""
Returns:
:class:`~{full_output_type}` or :obj:`tuple(torch.FloatTensor)`:
A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a
tuple of :obj:`torch.FloatTensor` comprising various elements depending on the configuration
(:class:`~transformers.{config_class}`) and inputs.
"""
def _get_indent(t):
"""Returns the indentation in the first line of t"""
search = re.search(r"^(\s*)\S", t)
return "" if search is None else search.groups()[0]
def _convert_output_args_doc(output_args_doc):
"""Convert output_args_doc to display properly."""
# Split output_arg_doc in blocks argument/description
indent = _get_indent(output_args_doc)
blocks = []
current_block = ""
for line in output_args_doc.split("\n"):
# If the indent is the same as the beginning, the line is the name of new arg.
if _get_indent(line) == indent:
if len(current_block) > 0:
blocks.append(current_block[:-1])
current_block = f"{line}\n"
else:
# Otherwise it's part of the description of the current arg.
# We need to remove 2 spaces to the indentation.
current_block += f"{line[2:]}\n"
blocks.append(current_block[:-1])
# Format each block for proper rendering
for i in range(len(blocks)):
blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i])
blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i])
return "\n".join(blocks)
def _prepare_output_docstrings(output_type, config_class):
"""
Prepares the return part of the docstring using `output_type`.
"""
docstrings = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
docstrings = "\n".join(lines[(i + 1) :])
docstrings = _convert_output_args_doc(docstrings)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = RETURN_INTRODUCTION.format(full_output_type=full_output_type, config_class=config_class)
return intro + docstrings
PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_QUESTION_ANSWERING_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_scores
>>> end_scores = outputs.end_scores
"""
PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_MASKED_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> input_ids = tokenizer("Hello, my dog is cute", return_tensors="pt")["input_ids"]
>>> outputs = model(input_ids, labels=input_ids)
>>> loss = outputs.loss
>>> prediction_logits = outputs.logits
"""
PT_BASE_MODEL_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
PT_MULTIPLE_CHOICE_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True)
>>> outputs = model(**{{k: v.unsqueeze(0) for k,v in encoding.items()}}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_CAUSAL_LM_SAMPLE = r"""
Example::
>>> import torch
>>> from transformers import {tokenizer_class}, {model_class}
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs, labels=inputs["input_ids"])
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> input_ids = inputs["input_ids"]
>>> inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1
>>> outputs = model(inputs)
>>> loss, scores = outputs[:2]
"""
TF_QUESTION_ANSWERING_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> input_dict = tokenizer(question, text, return_tensors='tf')
>>> start_scores, end_scores = model(input_dict)
>>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0])
>>> answer = ' '.join(all_tokens[tf.math.argmax(start_scores, 1)[0] : tf.math.argmax(end_scores, 1)[0]+1])
"""
TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1
>>> outputs = model(inputs)
>>> loss, logits = outputs[:2]
"""
TF_MASKED_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores = outputs[0]
"""
TF_BASE_MODEL_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
TF_MULTIPLE_CHOICE_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='tf', padding=True)
>>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
>>> outputs = model(inputs) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> logits = outputs[0]
"""
TF_CAUSAL_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> logits = outputs[0]
"""
def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None):
def docstring_decorator(fn):
model_class = fn.__qualname__.split(".")[0]
is_tf_class = model_class[:2] == "TF"
if "SequenceClassification" in model_class:
code_sample = TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE
elif "QuestionAnswering" in model_class:
code_sample = TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE
elif "TokenClassification" in model_class:
code_sample = TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE
elif "MultipleChoice" in model_class:
code_sample = TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE
elif "MaskedLM" in model_class:
code_sample = TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE
elif "LMHead" in model_class:
code_sample = TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE
elif "Model" in model_class:
code_sample = TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
output_doc = _prepare_output_docstrings(output_type, config_class) if output_type is not None else ""
built_doc = code_sample.format(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint)
fn.__doc__ = (fn.__doc__ or "") + "".join(docstr) + output_doc + built_doc
return fn
return docstring_decorator
def replace_return_docstrings(output_type=None, config_class=None):
def docstring_decorator(fn):
docstrings = fn.__doc__
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
lines[i] = _prepare_output_docstrings(output_type, config_class)
docstrings = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:\n{docstrings}"
)
fn.__doc__ = docstrings
return fn
return docstring_decorator
def is_tensor(x):
""" Tests if ``x`` is a :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`. """
if is_torch_available():
import torch
if isinstance(x, torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(x, tf.Tensor):
return True
return isinstance(x, np.ndarray)
class ModelOutput(OrderedDict):
"""
Base class for all model outputs as dataclass. Has a ``__getitem__`` that allows indexing by integer or slice (like
a tuple) or strings (like a dictionnary) that will ignore the ``None`` attributes. Otherwise behaves like a
regular python dictionary.
.. warning::
You can't unpack a :obj:`ModelOutput` directly. Use the :meth:`~transformers.file_utils.ModelOutput.to_tuple`
method to convert it to a tuple before.
"""
def __post_init__(self):
class_fields = fields(self)
# Safety and consistency checks
assert len(class_fields), f"{self.__class__.__name__} has no fields."
assert all(
field.default is None for field in class_fields[1:]
), f"{self.__class__.__name__} should not have more than one required field."
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(first_field):
try:
iterator = iter(first_field)
first_field_iterator = True
except TypeError:
first_field_iterator = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for element in iterator:
if (
not isinstance(element, (list, tuple))
or not len(element) == 2
or not isinstance(element[0], str)
):
break
setattr(self, element[0], element[1])
if element[1] is not None:
self[element[0]] = element[1]
else:
for field in class_fields:
v = getattr(self, field.name)
if v is not None:
self[field.name] = v
def __delitem__(self, *args, **kwargs):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def setdefault(self, *args, **kwargs):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def pop(self, *args, **kwargs):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def update(self, *args, **kwargs):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
def to_tuple(self) -> Tuple[Any]:
"""
Convert self to a tuple containing all the attributes/keys that are not ``None``.
"""
return tuple(self[k] for k in self.keys())
| 22,028
| 37.512238
| 150
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/activations.py
|
import logging
import math
import torch
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def swish(x):
return x * torch.sigmoid(x)
def _gelu_python(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
This is now written in C in torch.nn.functional
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
""" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
if torch.__version__ < "1.4.0":
gelu = _gelu_python
else:
gelu = F.gelu
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
ACT2FN = {
"relu": F.relu,
"swish": swish,
"gelu": gelu,
"tanh": torch.tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
}
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
| 1,536
| 26.446429
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/configuration_roberta.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" RoBERTa configuration """
from .configuration_bert import BertConfig
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json",
"roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-config.json",
"roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-config.json",
"distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-config.json",
"roberta-base-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-openai-detector-config.json",
"roberta-large-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-openai-detector-config.json",
}
class RobertaConfig(BertConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.RobertaModel`.
It is used to instantiate an RoBERTa model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
The :class:`~transformers.RobertaConfig` class directly inherits :class:`~transformers.BertConfig`.
It reuses the same defaults. Please check the parent class for more information.
Example::
"""
model_type = "roberta"
def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
"""Constructs RobertaConfig.
"""
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
| 2,728
| 54.693878
| 133
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_whisper.py
|
import copy
import math
import random
from typing import Optional, Tuple, Any, Dict, List, Union
import torch
import torch.utils.checkpoint
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
import numpy as np
from torch.nn import CrossEntropyLoss, MSELoss
from onmt.modules.layer_norm import LayerNorm
from onmt.modules.optimized.self_attention_func import self_attn_func
from onmt.modules.optimized.encdec_attention_func_bias import encdec_attn_bias_func
from onmt.modules.dropout import embedded_dropout
from onmt.modules.optimized.dropout_add import fused_dropout_add
from onmt.modules.optimized.linear import linear_function
from torch.cuda.amp import custom_fwd, custom_bwd
from onmt.models.speech_recognizer.fairseq_wav2vec2.fairseq_modules import index_copy
from .activations import ACT2FN
from .modeling_outputs import (
BaseModelOutput,
)
from .modeling_utils import PreTrainedModel
# from ...utils import logging
# from .configuration_bart import BartConfig
import onmt
from collections import defaultdict
from .configuration_whisper import WhisperConfig
from .modeling_mbart import MBartAttention
from .modeling_mbart import MBartCrossAttention
from .modeling_mbart import MBartEncoderLayer as WhisperEncoderLayer
from .modeling_mbart import MBartDecoderLayer as WhisperDecoderLayer
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
class WhisperPositionalEmbedding(nn.Embedding):
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__(num_positions, embedding_dim)
def forward(self, input_ids, past_key_values_length=0):
return self.weight[past_key_values_length : past_key_values_length + input_ids.shape[-1]]
class WhisperPreTrainedModel(PreTrainedModel):
config_class = WhisperConfig
base_model_prefix = "model"
main_input_name = "input_features"
supports_gradient_checkpointing = True
_no_split_modules = ["WhisperEncoderLayer", "WhisperDecoderLayer"]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, (nn.Linear, nn.Conv1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (WhisperDecoder, WhisperEncoder)):
module.gradient_checkpointing = value
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
class WhisperEncoder(WhisperPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`WhisperEncoderLayer`].
Args:
config: WhisperConfig
"""
def __init__(self, config: WhisperConfig):
super().__init__(config)
self.config = config
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.num_mel_bins = config.num_mel_bins
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim)
self.layers = nn.ModuleList([WhisperEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def get_input_embeddings(self) -> nn.Module:
return self.conv1
def set_input_embeddings(self, value: nn.Module):
self.conv1 = value
def _mask_input_features(
self,
input_features: torch.FloatTensor,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
# B x T x H -> B x H x T
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return input_features
# generate indices & apply SpecAugment along time axis
batch_size, hidden_size, sequence_length = input_features.size()
if self.config.mask_time_prob > 0 and self.training:
# generate indices & apply SpecAugment along time axis
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=input_features.device, dtype=torch.bool)
mask_time_indices = mask_time_indices[:, None].expand(-1, hidden_size, -1)
input_features[mask_time_indices] = 0
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=input_features.device, dtype=torch.bool)
input_features[mask_feature_indices] = 0
return input_features
def forward(
self,
input_features,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`):
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
attention_mask (`torch.Tensor`)`, *optional*):
Whisper does not support masking of the `input_features`, this argument is preserved for compatibility,
but it is not used. By default the silence in the input log mel spectrogram are ignored.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
# The data has been constructed that the first dimension is padding mask
# 0 for tokens that are not masked, 1 for tokens that are masked
with torch.no_grad():
long_mask = input_features.narrow(2, 0, 1).squeeze(2).eq(0).long()
input_features = input.narrow(2, 1, input.size(2) - 1)
attention_mask = long_mask
# [ B x H x T ] -> [ B x T x H ]
input_features = input_features.permute(0, 2, 1)
# apply spectral augmentation
input_features = self._mask_input_features(input_features, attention_mask=attention_mask)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# downsampling stuffs
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
# remove the diluted values in the input
inputs_embeds.masked_fill_(attention_mask.unsqueeze(1), 0)
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
# recomputing attention mask
attention_mask = attention_mask[:, 2::2]
# remove the diluted values in the input
inputs_embeds.masked_fill_(attention_mask.unsqueeze(1), 0)
inputs_embeds = inputs_embeds.permute(0, 2, 1)
# find position encodings
bsz, seq_len = inputs_embeds.size(0), inputs_embeds.size(1)
positions = torch.arange(
0, seq_len, dtype=torch.long, device=inputs_embeds.device
).clamp_(max = self.max_source_positions - 1)
embed_pos = self.embed_positions(positions)
hidden_states = inputs_embeds + embed_pos.unsqueeze(0)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# # check if head_mask has a correct number of layers specified if desired
# if head_mask is not None:
# assert head_mask.size()[0] == (
# len(self.layers)
# ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
can_run_fast_bert_mha = False
# check if fast bert mha can be run
seq_len = hidden_states.size(1)
bsz = hidden_states.size(0)
sm = torch.cuda.get_device_capability()
total_bsz = 0
if self.fast_bert_mha and torch.is_autocast_enabled():
can_run_fast_bert_mha = True
x = hidden_states
padding_mask = attention_mask # [B x T]
# masked positions = 1 so to compute length we need the (1 -)
if padding_mask is None:
padding_mask = x.new_zeros(bsz, seq_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
x = x.view(-1, x.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
hidden_states = x.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
else:
max_len = -1
cu_seqlens = None
non_pad_indices = None
if not can_run_fast_bert_mha:
# transpose from [B x T x H] to [T x B x H]
hidden_states = hidden_states.transpose(0, 1).contiguous()
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
raise NotImplementedError
# def create_custom_forward(module):
# def custom_forward(*inputs):
# return module(*inputs, output_attentions)
#
# return custom_forward
#
# layer_outputs = torch.utils.checkpoint.checkpoint(
# create_custom_forward(encoder_layer),
# hidden_states,
# attention_mask,
# (head_mask[idx] if head_mask is not None else None),
# )
else:
layer_outputs = encoder_layer(
# hidden_states,
# None,
# layer_head_mask=(head_mask[idx] if head_mask is not None else None),
# output_attentions=output_attentions,
hidden_states,
attention_mask,
output_attentions=output_attentions,
max_len=max_len, cu_seqlens=cu_seqlens,
checkpointing_ffn=checkpointing_ffn
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
# if we remove padding before (for fast bert MHA) then remember to put padding back
# to restore the form B x T X H
if can_run_fast_bert_mha:
# remove the patch
# if x.size(0) > total_bsz:
# x = x[:total_bsz, :]
hidden_states = index_copy(hidden_states, non_pad_indices, bsz * seq_len)
hidden_states = hidden_states.view(bsz, seq_len, -1)
hidden_states = hidden_states.transpose(0, 1).contiguous()
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return tuple(v for v in [hidden_states, encoder_states, all_attentions, attention_mask] if v is not None)
class WhisperDecoder(WhisperPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`WhisperDecoderLayer`]
Args:
config: WhisperConfig
"""
def __init__(self, config: WhisperConfig, opt, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_target_positions
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
# self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = WhisperPositionalEmbedding(self.max_target_positions, config.d_model)
self.layers = nn.ModuleList([WhisperDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
self.model_size = config.d_model
self.switchout = 0.0
# self.word_lut = self.embed_tokens
self.config.bert_hidden_size = config.d_model
self.layerdrop = opt.death_rate_decoder
self.dec_pretrained_model = 'mbart'
if opt.freeze_embedding:
self.embed_tokens.weight.requires_grad = False
self.word_dropout = opt.word_dropout
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
sub_encoder_hidden_states=None,
sub_encoder_attention_mask=None,
inputs_embeds=None,
incremental=False, incremental_cache=None,
lang=None, atb=None,
output_attentions=None,
output_hidden_states=None,
checkpointing_ffn=False,
checkpointing_cross_attn=False,
checkpointing_self_attn=False,
**kwargs):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
bsz = input_ids.size(0)
qlen = input_ids.size(1)
klen = qlen
# if attention_mask is None:
padding_mask = attention_mask
attention_mask = torch.triu(
inputs_embeds.new_ones(qlen, klen), diagonal=1).bool()
# embed positions
if input_ids is not None:
positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
else:
positions = self.embed_positions(inputs_embeds, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
self.fast_bert_mha = None
if self.fast_bert_mha is not None and hidden_states.dtype == torch.half:
can_run_fast_bert_mha = True
# lets unpad both
if padding_mask is None:
padding_mask = input_ids.new_zeros(bsz, qlen)
padding_mask = padding_mask.contiguous().long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
hidden_states = hidden_states.index_select(0, non_pad_indices)
max_len = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=hidden_states.device)
non_pad_indices_q = non_pad_indices
# bsz, seq_len = hidden_states.size(0), hidden_states.size(1)
# lengths = [seq_len] * bsz
# a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
# cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=hidden_states.device)
# max_len = seq_len
# total_bsz = hidden_states.size(0)
# hidden_states = hidden_states.view(-1, hidden_states.size(-1))
# unpad the context
encoder_hidden_states = encoder_hidden_states.transpose(0, 1).contiguous()
padding_mask = encoder_attention_mask
if padding_mask is None:
context_len = encoder_hidden_states.size(1)
padding_mask = input_ids.new_zeros(bsz, context_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist() # list of lengths for B seqs
encoder_hidden_states = encoder_hidden_states.view(-1, encoder_hidden_states.size(-1))
non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
encoder_hidden_states = encoder_hidden_states.index_select(0, non_pad_indices)
max_len_kv = max(lengths)
# cumulative sequence lengths (required input for fmha)
a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
cu_seqlens_kv = torch.cumsum(a, 0).to(dtype=torch.int32, device=encoder_hidden_states.device)
else:
max_len, cu_seqlens = None, None
max_len_kv, cu_seqlens_kv = None, None
non_pad_indices_q = None
can_run_fast_bert_mha = False
hidden_states = hidden_states.transpose(0, 1).contiguous()
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_outputs, _ = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
sub_encoder_hidden_states=sub_encoder_hidden_states,
sub_encoder_attention_mask=sub_encoder_attention_mask,
output_attentions=output_attentions,
lang=lang,
atb=atb,
checkpointing_ffn=checkpointing_ffn,
checkpointing_cross_attn=checkpointing_cross_attn,
checkpointing_self_attn=checkpointing_self_attn,
max_len=max_len, cu_seqlens=cu_seqlens,
max_len_kv=max_len_kv, cu_seqlens_kv=cu_seqlens_kv
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
if can_run_fast_bert_mha:
seq_len = qlen
hidden_states = index_copy(hidden_states, non_pad_indices_q, bsz * seq_len)
hidden_states = hidden_states.view(bsz, seq_len, -1).transpose(0, 1).contiguous()
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, contrastive_loss]
if v is not None
)
def step(self, input, decoder_state, **kwargs):
# context is stored in the decoder state in [T B H] format
encoder_hidden_states = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
atb = decoder_state.tgt_atb
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
input_ids = input
input_shape = input_ids.size()
time_step = input.size(1)
input_ = input
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1 and len(buffers) > 0:
# if buffers has not been initilized and we have > 1 input length data
# then its a prefix decoding step
input_ = input[:, -1:]
past_key_values_length = input.size(1) - 1
else:
past_key_values_length = 0
else:
past_key_values_length = 0
inputs_embeds = self.embed_tokens(input_) * self.embed_scale
qlen = input_ids.size(1)
klen = qlen
attention_mask = torch.triu(
inputs_embeds.new_ones(qlen, klen), diagonal=1).bool()
if input.size(1) > 1 and len(buffers) > 0:
attention_mask = attention_mask[-1:, :]
encoder_attention_mask = decoder_state.src_mask
if not self.layers[0].encoder_attn.fast_attention:
raise NotImplementedError
else:
encoder_attention_mask = encoder_attention_mask.bool()
# embed positions. here it takes input instead of input_size
positions = self.embed_positions(input_, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = hidden_states.transpose(0, 1)
hidden_states = self.layernorm_embedding(hidden_states)
max_len = None
cu_seqlens = None
for idx, decoder_layer in enumerate(self.layers):
if buffering:
buffer = buffers[idx] if idx in buffers else None
else:
buffer = None
layer_outputs, buffer = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=None,
incremental=buffering, incremental_cache=buffer,
lang=lang, atb=atb,
max_len=max_len, cu_seqlens=cu_seqlens
)
if buffering:
decoder_state.update_attention_buffer(buffer, idx)
hidden_states = layer_outputs[0]
hidden_states = self.layer_norm(hidden_states)
output = hidden_states[-1].unsqueeze(0)
# just a fake coverage, at the moment coverage is not returned during step
coverage = hidden_states.new(hidden_states.size(1), 1, encoder_hidden_states.size(0)).zero_()
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = encoder_hidden_states
return output_dict
class WhisperFeatureExtractor:
def __init__(self, config: WhisperConfig):
self.num_mel_bins = config.num_mel_bins
try:
self.sampling_rate = config.sampling_rate
except Exception:
self.sampling_rate = 16000
def __call__(
self,
raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
# truncation: bool = True,
# sampling_rate: Optional[int] = None,
do_normalize: Optional[bool] = None,
**kwargs,
):
from onmt.data.whisper_audio import get_mel_filters, fram_wave, \
np_extract_fbank_features, zero_mean_unit_var_norm
if do_normalize:
x = zero_mean_unit_var_norm(raw_speech)
else:
x = raw_speech
# padded_inputs["input_features"] = self.zero_mean_unit_var_norm(
# padded_inputs["input_features"],
# attention_mask=padded_inputs["attention_mask"],
# padding_value=self.padding_value,
# )
# padded_inputs["input_features"] = np.stack(padded_inputs["input_features"], axis=0)
| 35,008
| 40.876794
| 121
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_roberta.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import torch
import torch.nn as nn
import torch.nn.functional as F
from .configuration_roberta import RobertaConfig
from .modeling_bert import BertModel
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"distilroberta-base",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
# consistant with Fairseq
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config, **kwargs):
super().__init__()
self.padding_idx = config.pad_token_id
self.no_emb_offset = config.no_emb_offset # by default it is false, for example, for EN roberta
print("* emb_offset:", not self.no_emb_offset)
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
self.max_position_id = config.max_position_embeddings
self.bert_word_dropout = config.bert_word_dropout
self.emb_dropout = nn.Dropout(config.bert_emb_dropout)
self.emb_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
# ther is no offset for Zh pretrained model, also we set the zh "roberta" model type bert
seq_length = input_ids.size(1)
if self.no_emb_offset :
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
if seq_length > self.max_position_id:
position_ids = torch.clamp(position_ids, 0, self.max_position_id - 1)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
else:
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
if seq_length > self.max_position_id:
position_ids = torch.clamp(position_ids, 0, self.max_position_id - 1)
position_embeddings = self.position_embeddings(position_ids)
if inputs_embeds is None:
embed = self.word_embeddings
if self.bert_word_dropout and self.training:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(
1 - self.bert_word_dropout). \
expand_as(embed.weight) / (1 - self.bert_word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
words_embeddings = F.embedding(
input_ids, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
embeddings = words_embeddings + position_embeddings
embeddings = self.emb_layernorm(embeddings)
embeddings = self.emb_dropout(embeddings)
return embeddings
def emb_step(self, tgt_len, input_ids, token_type_ids=None):
if self.no_emb_offset:
if tgt_len > self.max_position_id:
position_ids = torch.tensor(self.max_position_id-1, dtype=torch.long, device=input_ids.device)
else:
position_ids = torch.tensor(tgt_len-1, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
else:
# tgt_len + self.padding_idx = (tgt_len-1) + (self.padding_idx + 1)
if tgt_len + self.padding_idx+1 > self.max_position_id:
position_ids = torch.tensor(self.max_position_id-1, 0, self.max_position_id - 1)
else:
position_ids = torch.tensor(tgt_len + self.padding_idx, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
embed = self.word_embeddings
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
words_embeddings = F.embedding(
input_ids, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
# words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.emb_layernorm(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
""" We are provided embeddings directly. We cannot infer which are padded so just generate
sequential position ids.
:param torch.Tensor inputs_embeds:
:return torch.Tensor:
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
class RobertaModel(BertModel):
"""
This class overrides :class:`~transformers.BertModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config,
bert_word_dropout=None,
bert_emb_dropout=None,
bert_atten_dropout=None,
bert_hidden_dropout=None,
bert_hidden_size=None,
is_decoder=False,
before_plm_output_ln=False,
gradient_checkpointing=False,
**kwargs,
):
super().__init__(config, bert_word_dropout,
bert_emb_dropout,
bert_atten_dropout,
bert_hidden_dropout,
bert_hidden_size,
is_decoder,
before_plm_output_ln,
gradient_checkpointing,
**kwargs
)
# replace the original bert embedding with roberta embedding
config.no_emb_offset = kwargs.pop('no_emb_offset', False) # by default it is false, for example, for EN roberta
roberta_embeddings = RobertaEmbeddings(config)
self.add_module('embeddings', roberta_embeddings)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def create_position_ids_from_input_ids(input_ids, padding_idx):
""" Replace non-padding symbols with their position numbers. Position numbers begin at
padding_idx+1. Padding symbols are ignored. This is modified from fairseq's
`utils.make_positions`.
:param torch.Tensor x:
:return torch.Tensor:
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
| 8,232
| 40.791878
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/mbart50_tokenizer.py
|
from transformers import MBart50TokenizerFast
import os
from contextlib import contextmanager
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model",
}
}
FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class MultilingualBart50TokenizerFast(MBart50TokenizerFast):
"""
Construct a MBart50 tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
src_lang (:obj:`str`, `optional`):
A string representing the source language.
tgt_lang (:obj:`str`, `optional`):
A string representing the target language.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (:obj:`dict`, `optional`):
Will be passed to the ``SentencePieceProcessor.__init__()`` method. The `Python wrapper for SentencePiece
<https://github.com/google/sentencepiece/tree/master/python>`__ can be used, among other things, to set:
- ``enable_sampling``: Enable subword regularization.
- ``nbest_size``: Sampling parameters for unigram. Invalid for BPE-Dropout.
- ``nbest_size = {0,1}``: No sampling is performed.
- ``nbest_size > 1``: samples from the nbest_size results.
- ``nbest_size < 0``: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- ``alpha``: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples::
>>> from transformers import MBart50Tokenizer
>>> tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> model_inputs = tokenizer(src_text, return_tensors="pt")
>>> with tokenizer.as_target_tokenizer():
... labels = tokenizer(tgt_text, return_tensors="pt").input_ids
>>> # model(**model_inputs, labels=labels) should work
"""
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
| 5,098
| 58.988235
| 493
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/huggingface_tokenizers_tbc/tokenization_mbart50.py
|
import os
from contextlib import contextmanager
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
import onmt.logging as logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN",
"hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP",
"nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ",
"bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN",
"mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN",
"th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
# fmt: on
class MBart50Tokenizer(PreTrainedTokenizer):
"""
Construct a MBart50 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
src_lang (`str`, *optional*):
A string representing the source language.
tgt_lang (`str`, *optional*):
A string representing the target language.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import MBart50Tokenizer
>>> tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> model_inputs = tokenizer(src_text, return_tensors="pt")
>>> with tokenizer.as_target_tokenizer():
... labels = tokenizer(tgt_text, return_tensors="pt").input_ids
>>> # model(**model_inputs, labels=labels) should work
```"""
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file,
src_lang=None,
tgt_lang=None,
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=src_lang,
tgt_lang=tgt_lang,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES)
}
self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
self._src_lang = src_lang if src_lang is not None else "en_XX"
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def vocab_size(self) -> int:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def get_vocab(self) -> Dict:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token (str) in an id using the vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
return self.sp_model.decode(tokens)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `[src_lang_code] X [eos]`
- `labels`: (for decoder) `[tgt_lang_code] X [eos]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def prepare_seq2seq_batch(
self,
src_texts: List[str],
src_lang: str = "en_XX",
tgt_texts: Optional[List[str]] = None,
tgt_lang: str = "ro_RO",
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
self.set_tgt_lang_special_tokens(self.tgt_lang)
yield
self.set_src_lang_special_tokens(self.src_lang)
def set_src_lang_special_tokens(self, src_lang: str) -> None:
"""Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."""
self.cur_lang_code_id = self.lang_code_to_id[src_lang]
self.prefix_tokens = [self.cur_lang_code_id]
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
"""Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos]."""
self.cur_lang_code_id = self.lang_code_to_id[tgt_lang]
self.prefix_tokens = [self.cur_lang_code_id]
self.suffix_tokens = [self.eos_token_id]
| 15,544
| 44.855457
| 217
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/huggingface_tokenizers_tbc/tokenization_util_fast.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/huggingface_tokenizers_tbc/file_utils.py
|
# Copyright 2020 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with the local dataset cache. Parts of this file is adapted from the AllenNLP library at
https://github.com/allenai/allennlp.
"""
import copy
import fnmatch
import functools
import importlib.util
import io
import json
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import types
from collections import OrderedDict, UserDict
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from functools import partial, wraps
from hashlib import sha256
from itertools import chain
from pathlib import Path
from types import ModuleType
from typing import Any, BinaryIO, ContextManager, Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse
from uuid import uuid4
from zipfile import ZipFile, is_zipfile
import numpy as np
from packaging import version
from tqdm.auto import tqdm
import requests
from filelock import FileLock
# from huggingface_hub import HfFolder, Repository, create_repo, list_repo_files, whoami
# from transformers.utils.versions import importlib_metadata
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
_torch_available = importlib.util.find_spec("torch") is not None
if _torch_available:
try:
_torch_version = importlib_metadata.version("torch")
logger.info(f"PyTorch version {_torch_version} available.")
except importlib_metadata.PackageNotFoundError:
_torch_available = False
_datasets_available = importlib.util.find_spec("datasets") is not None
try:
# Check we're not importing a "datasets" directory somewhere but the actual library by trying to grab the version
# AND checking it has an author field in the metadata that is HuggingFace.
_ = importlib_metadata.version("datasets")
_datasets_metadata = importlib_metadata.metadata("datasets")
if _datasets_metadata.get("author", "") != "HuggingFace Inc.":
_datasets_available = False
except importlib_metadata.PackageNotFoundError:
_datasets_available = False
| 2,795
| 33.95
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/huggingface_tokenizers_tbc/tokenization_util_base.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user
fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary
of output with special method for the Fast tokenizers)
"""
import copy
import json
import os
import re
import warnings
from collections import OrderedDict, UserDict
from contextlib import contextmanager
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from packaging import version
import requests
| 1,250
| 35.794118
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/huggingface_tokenizers_tbc/tokenization_utils.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tokenization classes for python tokenizers. For fast tokenizers (provided by HuggingFace's tokenizers library) see
tokenization_utils_fast.py
"""
import bisect
import itertools
import re
import unicodedata
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union, overload
class PreTrainedTokenizer(PreTrainedTokenizerBase):
"""
Base class for all slow tokenizers.
Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Added tokens - We store this for both slow and fast tokenizers
# until the serialization of Fast tokenizers is updated
self.added_tokens_encoder: Dict[str, int] = {}
self.added_tokens_decoder: Dict[int, str] = {}
self.unique_no_split_tokens: List[str] = []
self.tokens_trie = Trie()
self._decode_use_source_tokenizer = False
@property
def is_fast(self) -> bool:
return False
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
raise NotImplementedError
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
`Dict[str, int]`: The added tokens.
"""
return self.added_tokens_encoder
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.vocab_size + len(self.added_tokens_encoder)
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary.
Args:
new_tokens (`List[str]`or `List[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
checking if the tokenizer assign the index of the `unk_token` to them).
special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the tokens should be added as special tokens.
Returns:
`int`: The number of tokens actually added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
# Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
new_tokens = [str(tok) for tok in new_tokens]
tokens_to_add = []
for token in new_tokens:
if not isinstance(token, str):
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
if not special_tokens and hasattr(self, "do_lower_case") and self.do_lower_case:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
if self.verbose:
logger.info(f"Adding {token} to the vocabulary")
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
# Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
if special_tokens:
if len(new_tokens) == 1:
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, new_tokens[0])
else:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(new_tokens)))
else:
# Or on the newly added tokens
if len(tokens_to_add) == 1:
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, tokens_to_add[0])
else:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(tokens_to_add)))
self._create_trie(self.unique_no_split_tokens)
return len(tokens_to_add)
def _create_trie(self, unique_no_split_tokens):
trie = Trie()
for token in unique_no_split_tokens:
if hasattr(self, "do_lower_case") and self.do_lower_case and token not in self.all_special_tokens:
trie.add(token.lower())
else:
trie.add(token)
self.tokens_trie = trie
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
<Tip>
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not
put this inside your training loop.
</Tip>
Args:
pair (`bool`, *optional*, defaults to `False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
`int`: Number of special tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def tokenize(self, text: TextInput, **kwargs) -> List[str]:
"""
Converts a string in a sequence of tokens, using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
(BPE/SentencePieces/WordPieces). Takes care of added tokens.
Args:
text (`str`):
The sequence to be encoded.
**kwargs (additional keyword arguments):
Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
Returns:
`List[str]`: The list of tokens.
"""
# Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
all_special_tokens_extended = dict(
(str(t), t) for t in self.all_special_tokens_extended if isinstance(t, AddedToken)
)
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
if kwargs:
logger.warning(f"Keyword arguments {kwargs} not recognized.")
# TODO: should this be in the base class?
if hasattr(self, "do_lower_case") and self.do_lower_case:
# convert non-special tokens to lowercase
escaped_special_toks = [
re.escape(s_tok) for s_tok in (self.unique_no_split_tokens + self.all_special_tokens)
]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
no_split_token = set(self.unique_no_split_tokens)
tokens = self.tokens_trie.split(text)
# ["This is something", "<special_token_1>", " else"]
for i, token in enumerate(tokens):
if token in no_split_token:
tok_extended = all_special_tokens_extended.get(token, None)
left = tokens[i - 1] if i > 0 else None
right = tokens[i + 1] if i < len(tokens) - 1 else None
if isinstance(tok_extended, AddedToken):
if tok_extended.rstrip and right:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
tokens[i + 1] = right.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and left:
tokens[i - 1] = left.rstrip() # Opposite here
else:
# We strip left and right by default
if right:
tokens[i + 1] = right.lstrip()
if left:
tokens[i - 1] = left.rstrip()
# ["This is something", "<special_token_1>", "else"]
tokenized_text = []
for token in tokens:
# Need to skip eventual empty (fully stripped) tokens
if not token:
continue
if token in no_split_token:
tokenized_text.append(token)
else:
tokenized_text.extend(self._tokenize(token))
# ["This", " is", " something", "<special_token_1>", "else"]
return tokenized_text
def _tokenize(self, text, **kwargs):
"""
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
vocabulary.
Args:
tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).
Returns:
`int` or `List[int]`: The token id or list of token ids.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def prepare_for_tokenization(
self, text: str, is_split_into_words: bool = False, **kwargs
) -> Tuple[str, Dict[str, Any]]:
"""
Performs any necessary transformations before tokenization.
This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
`kwargs` at the end of the encoding process to be sure all the arguments have been used.
Args:
text (`str`):
The text to prepare.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
kwargs:
Keyword arguments to use for the tokenization.
Returns:
`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
"""
return (text, kwargs)
@overload
def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str:
...
@overload
def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]:
...
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return " ".join(tokens)
def _decode(
self,
token_ids: List[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
spaces_between_special_tokens: bool = True,
**kwargs
) -> str:
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
if spaces_between_special_tokens:
text = " ".join(sub_texts)
else:
text = "".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
| 16,212
| 42.700809
| 129
|
py
|
NMTGMinor
|
NMTGMinor-master/ae/VariationalLayer.py
|
import torch
import torch.nn as nn
class VariationalLayer(nn.Module):
def __init__(self, inputSize, outputSize):
super(VariationalLayer, self).__init__()
print("Variational layer")
self.inputSize = inputSize
self.outputSize = outputSize
self.meanLL= nn.Linear(self.inputSize, self.outputSize)
self.stdLL = nn.Linear(self.inputSize, self.outputSize)
self.meanAct = nn.Tanh()
self.stdAct = nn.Softplus()
def forward(self, input):
mean = self.meanLL(input)
mean = self.meanAct(mean)
self.mean = mean
if(self.training):
std = self.stdLL(input)
std = self.stdAct(std)
random = torch.randn(std.size())
if(std.is_cuda):
random = random.cuda()
self.std = std
mean = mean + random * std
return mean
| 901
| 23.378378
| 63
|
py
|
NMTGMinor
|
NMTGMinor-master/ae/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/ae/Trainer.py
|
from __future__ import division
import sys, tempfile
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import torch.nn as nn
from torch import cuda
from torch.autograd import Variable
import math
import time, datetime
import random
import numpy as np
from onmt.multiprocessing.multiprocessing_wrapper import MultiprocessingRunner
from onmt.train_utils.trainer import BaseTrainer
class AETrainer(BaseTrainer):
def __init__(self, autoencoder, model, loss_function, trainData, validData, dicts, opt):
super().__init__(model, loss_function, trainData, validData, dicts, opt)
self.optim = onmt.Optim(opt)
self.autoencoder = autoencoder
if(opt.auto_encoder_type is None):
self.auto_encoder_type = "Baseline"
else:
self.auto_encoder_type = opt.auto_encoder_type
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
self.autoencoder = self.autoencoder.cuda()
self.optim.set_parameters(self.autoencoder.parameters())
def save(self, epoch, valid_ppl, batchOrder=None, iteration=-1):
opt = self.opt
autoencoder = self.autoencoder
autoencoder_state_dict = self.autoencoder.state_dict()
optim_state_dict = self.optim.state_dict()
# drop a checkpoint
checkpoint = {
'autoencoder': autoencoder_state_dict,
'opt': opt,
'epoch': epoch,
'iteration': iteration,
'batchOrder': batchOrder,
'optim': optim_state_dict
}
file_name = '%s_ppl_%.2f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check te save directory here
def eval(self, data):
total_loss = 0
total_words = 0
batch_order = data.create_order(random=False)
self.model.eval()
self.autoencoder.eval()
""" New semantics of PyTorch: save space by not creating gradients """
with torch.no_grad():
for i in range(len(data)):
batch = data.next()[0]
if (self.cuda):
batch.cuda()
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
targets,outputs = self.autoencoder(batch)
loss_data = self.loss_function(outputs, targets)
# ~
total_loss += loss_data
total_words += outputs.size(0)
self.autoencoder.train()
return total_loss / total_words
def train_epoch(self, epoch, resume=False, batchOrder=None, iteration=0):
opt = self.opt
train_data = self.train_data
# Clear the gradients of the model
# self.runner.zero_grad()
self.autoencoder.zero_grad()
self.model.eval()
if opt.extra_shuffle and epoch > opt.curriculum:
train_data.shuffle()
# Shuffle mini batch order.
if resume:
train_data.batchOrder = batchOrder
train_data.set_index(iteration)
print("Resuming from iteration: %d" % iteration)
else:
batchOrder = train_data.create_order()
iteration = 0
total_loss, total_words = 0, 0
report_loss, report_mu,report_sig,report_el,report_mse, report_kl, report_tgt_words = 0, 0,0,0,0,0,0
report_src_words = 0
start = time.time()
nSamples = len(train_data)
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
for i in range(iteration, nSamples):
curriculum = (epoch < opt.curriculum)
batch = train_data.next(curriculum=curriculum)[0]
if (self.cuda):
batch.cuda()
oom = False
try:
batch_size = batch.size
# print("Input size:",batch[0].size())
targets,outputs = self.autoencoder(batch)
loss_data= self.loss_function(outputs, targets.data)
if(self.auto_encoder_type == "Variational"):
m = self.autoencoder.variational_layer.mean
std = self.autoencoder.variational_layer.std
m = m.mul(m)
one = torch.ones(m.size())
if(m.is_cuda):
one = one.cuda()
var_loss = ((m+std-std.log()-one)*0.5).sum()
report_mse += loss_data.item()
report_kl += var_loss.item()
report_mu += m.sum().item()
report_sig += std.sum().item()
report_el += m.numel()
loss_data = loss_data + var_loss
loss_data.backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
else:
raise e
if not oom:
counter = counter + 1
num_accumulated_words += targets.size(0)
num_accumulated_sents += batch_size
if num_accumulated_words >= opt.batch_size_update * 0.95:
grad_denom = 1
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words
# Update the parameters.
self.optim.step(grad_denom=grad_denom)
self.autoencoder.zero_grad()
self.model.zero_grad()
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
print('Validation perplexity: %g' % valid_loss)
ep = float(epoch) - 1. + ((float(i) + 1.) / nSamples)
self.save(ep, valid_loss, batchOrder=batchOrder, iteration=i)
num_words = targets.size(0)
report_loss += loss_data
report_tgt_words += num_words
total_loss += loss_data
total_words += num_words
optim = self.optim
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
print(("Epoch %2d, %5d/%5d; ; loss: %6.2f (%6.2f, %6.2f) ; var: mu %6.2f sig: %6.2f; lr: %.7f ; num updates: %7d " +
"%5.0f src tok/s; %s elapsed") %
(epoch, i + 1, len(train_data),
report_loss / report_tgt_words,report_mse/report_tgt_words,report_kl/report_tgt_words,
report_mu / max(1,report_el), report_sig / max(1,report_el),
optim.getLearningRate(),
optim._step,
report_tgt_words / (time.time() - start),
str(datetime.timedelta(seconds=int(time.time() - self.start_time)))))
report_loss, report_tgt_words ,report_mse,report_kl= 0, 0, 0,0
report_mu,report_sig,report_el = 0,0,0
report_src_words = 0
start = time.time()
return total_loss / total_words
def run(self, save_file=None):
opt = self.opt
model = self.model
autoencoder = self.autoencoder
optim = self.optim
# Try to load the save_file
batchOrder = None
iteration = 0
print('Initializing model parameters')
autoencoder.init_model_parameters()
resume = False
valid_loss = self.eval(self.valid_data)
print('Validation loss: %g' % valid_loss)
self.start_time = time.time()
for epoch in range(opt.start_epoch, opt.start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume,
batchOrder=batchOrder,
iteration=iteration)
print('Train loss: %g' % train_loss)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
print('Validation perplexity: %g' % valid_loss)
self.save(epoch, valid_loss)
batchOrder = None
iteration = None
resume = False
| 9,048
| 33.14717
| 136
|
py
|
NMTGMinor
|
NMTGMinor-master/ae/Evaluator.py
|
import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from torch.autograd import Variable
from onmt.model_factory import build_model
import torch.nn.functional as F
from ae.Autoencoder import Autoencoder
import sys
model_list = ['transformer', 'stochastic_transformer']
class Evaluator(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.start_with_bos = opt.start_with_bos
self.fp16 = opt.fp16
self.models = list()
self.model_types = list()
# models are string with | as delimiter
models = opt.model.split("|")
print(models)
self.n_models = len(models)
self._type = 'text'
check_m = None;
for i, model in enumerate(models):
if opt.verbose:
print('Loading model from %s' % model)
checkpoint = torch.load(model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
if i == 0:
if ("src" in checkpoint['dicts']):
self.src_dict = checkpoint['dicts']['src']
else:
self._type = "audio"
self.tgt_dict = checkpoint['dicts']['tgt']
# Build model from the saved option
model = build_model(model_opt, checkpoint['dicts'])
model.load_state_dict(checkpoint['model'])
check_m = checkpoint['model'];
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
if opt.fp16:
model = model.half()
model.eval()
self.models.append(model)
self.model_types.append(model_opt.model)
self.cuda = opt.cuda
## Autoencoder
if opt.verbose:
print('Loading autoencoder from %s' % opt.autoencoder)
checkpoint = torch.load(opt.autoencoder,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
posSize= checkpoint['autoencoder']['nmt.decoder.positional_encoder.pos_emb'].size(0)
self.models[0].decoder.renew_buffer(posSize)
# Build model from the saved option
self.autoencoder = Autoencoder(self.models[0],model_opt)
self.autoencoder.load_state_dict(checkpoint['autoencoder'])
for k in checkpoint['autoencoder']:
if(k.startswith("nmt") and k[4:] in check_m):
n = checkpoint['autoencoder'][k]
o = check_m[k[4:]]
if(o.size() != n.size()):
print("Different size:",k[4:])
elif((n - o).sum() != 0):
print("Different weight:",k[4:])
if self.autoencoder.nmt.decoder.positional_encoder.len_max < self.opt.max_sent_length:
self.autoencoder.nmt.decoder.renew_buffer(self.opt.max_sent_length)
if opt.cuda:
self.autoencoder = self.autoencoder.cuda()
else:
self.autoencoder = self.autoencoder.cpu()
if opt.fp16:
self.autoencoder = self.autoencoder.half()
self.autoencoder.eval()
if opt.verbose:
print('Done')
# Combine distributions from different models
def _combineOutputs(self, outputs):
if len(outputs) == 1:
return outputs[0]
if self.ensemble_op == "logSum":
output = (outputs[0])
# sum the log prob
for i in range(1, len(outputs)):
output += (outputs[i])
output.div(len(outputs))
# ~ output = torch.log(output)
output = F.log_softmax(output, dim=-1)
elif self.ensemble_op == "mean":
output = torch.exp(outputs[0])
# sum the log prob
for i in range(1, len(outputs)):
output += torch.exp(outputs[i])
output.div(len(outputs))
# ~ output = torch.log(output)
output = torch.log(output)
elif self.ensemble_op == 'gmean':
output = torch.exp(outputs[0])
# geometric mean of the probabilities
for i in range(1, len(outputs)):
output *= torch.exp(outputs[i])
# have to normalize
output.pow_(1.0 / float(len(outputs)))
norm_ = torch.norm(output, p=1, dim=-1)
output.div_(norm_.unsqueeze(-1))
output = torch.log(output)
else:
raise ValueError(
'Emsemble operator needs to be "mean" or "logSum", the current value is %s' % self.ensemble_op)
return output
# Take the average of attention scores
def _combineAttention(self, attns):
attn = attns[0]
for i in range(1, len(attns)):
attn += attns[i]
attn.div(len(attns))
return attn
def _getBatchSize(self, batch):
# if self._type == "text":
return batch.size(1)
# else:
# return batch.size(0)
def to_variable(self, data):
for i, t in enumerate(data):
if data[i] is not None:
if self.cuda:
if (data[i].type() == "torch.FloatTensor" and self.fp16):
data[i] = data[i].half()
data[i] = Variable(data[i].cuda())
else:
data[i] = Variable(data[i])
else:
data[i] = None
return data
def buildData(self, srcBatch, goldBatch):
# This needs to be the same as preprocess.py.
if self.start_with_bos:
srcData = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in srcBatch]
else:
srcData = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in srcBatch]
tgtData = None
if goldBatch:
tgtData = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in goldBatch]
return onmt.Dataset(srcData, tgtData, 9999,
data_type=self._type,
batch_size_sents=self.opt.batch_size)
def buildASRData(self, srcData, goldBatch):
# This needs to be the same as preprocess.py.
tgtData = None
if goldBatch:
tgtData = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in goldBatch]
return onmt.Dataset(srcData, tgtData, sys.maxsize,
[self.opt.gpu],
data_type=self._type, max_seq_num=self.opt.batch_size)
def buildTargetTokens(self, pred, src, attn):
tokens = self.tgt_dict.convertToLabels(pred, onmt.constants.EOS)
tokens = tokens[:-1] # EOS
return tokens
def evalBatch(self, batch):
torch.set_grad_enabled(False)
# Batch size is in different location depending on data.
if(self.autoencoder.representation == "EncoderDecoderHiddenState"):
state,prediction = self.autoencoder.calcAlignment(batch)
else:
state, prediction = self.autoencoder(batch)
return state,prediction
def eval(self, srcBatch, goldBatch):
# (1) convert words to indexes
dataset = self.buildData(srcBatch, goldBatch)
batch = dataset.next()[0]
if self.cuda:
batch.cuda()
# (2) eval
state,prediction = self.evalBatch(batch)
# (3) convert indexes to words
return self.calcDistance(state,prediction)
def evalASR(self, srcBatch, goldBatch):
# (1) convert words to indexes
dataset = self.buildASRData(srcBatch, goldBatch)
batch = self.to_variable(dataset.next()[0])
src, tgt = batch
batchSize = self._getBatchSize(src)
# (2) eval
state,prediction = self.evalBatch(src, tgt)
# (3) convert indexes to words
return self.calcDistance(state,prediction)
def calcDistance(self,state,prediction):
if(self.autoencoder.representation == "EncoderDecoderHiddenState"):
state = state.unsqueeze(0).expand(prediction.size(0),-1,-1,-1)
prediction = prediction.unsqueeze(1).expand(-1,state.size(1),-1,-1)
loss = state - prediction
loss = loss.mul(loss)
loss = loss.sum(-1)
else:
loss = state - prediction
loss = loss.mul(loss)
loss = loss.sum(1)
return loss
| 9,240
| 30.010067
| 111
|
py
|
NMTGMinor
|
NMTGMinor-master/ae/Autoencoder.py
|
import torch
import torch.nn as nn
import onmt
import torch.nn.functional as F
from ae.VariationalLayer import VariationalLayer
class Autoencoder(nn.Module):
def __init__(self, nmt_model,opt):
super(Autoencoder, self).__init__()
self.param_init = opt.param_init
self.nmt = nmt_model
self.representation = opt.representation
if(opt.auto_encoder_type is None):
self.model_type = "Baseline"
else:
self.model_type = opt.auto_encoder_type
if(opt.representation == "EncoderHiddenState"):
self.inputSize = nmt_model.encoder.model_size
elif (opt.representation == "DecoderHiddenState"):
self.inputSize = nmt_model.decoder.model_size
elif (opt.representation == "EncoderDecoderHiddenState"):
self.inputSize = nmt_model.encoder.model_size
elif (opt.representation == "Probabilities"):
if(type(nmt_model.generator) is nn.ModuleList):
self.inputSize = nmt_model.generator[0].output_size
else:
self.inputSize = nmt_model.generator.output_size
else:
raise NotImplementedError("Waring!"+opt.represenation+" not implemented for auto encoder")
self.outputSize = self.inputSize
if (opt.representation == "EncoderDecoderHiddenState"):
self.outputSize = self.inputSize = nmt_model.decoder.model_size
self.hiddenSize = opt.auto_encoder_hidden_size
layers = []
if(opt.auto_encoder_drop_out > 0):
layers.append(nn.Dropout(opt.auto_encoder_drop_out))
if(self.model_type == "Baseline"):
layers.append(nn.Linear(self.inputSize, self.hiddenSize))
layers.append(nn.Sigmoid())
elif(self.model_type == "Variational"):
self.variational_layer = VariationalLayer(self.inputSize,self.hiddenSize)
layers.append(self.variational_layer)
else:
raise NotImplementedError("Waring!" + self.model_type + " not implemented for auto encoder")
# if(opt.auto_encoder_drop_out > 0):
# layers.append(nn.Dropout(opt.auto_encoder_drop_out,inplace=True))
layers.append(nn.Linear(self.hiddenSize, self.outputSize))
self.model = nn.Sequential(*layers)
self.layers = layers
print("Autoencoder:",self.model)
def forward(self,batch):
src = batch.get('source')
tgt = batch.get('target_input')
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
if(self.representation == "EncoderHiddenState"):
with torch.no_grad():
encoder_output = self.nmt.encoder(src)
context = encoder_output['context']
src_mask = encoder_output['src_mask']
flattened_context = context.contiguous().view(-1, context.size(-1))
flattened_mask = src_mask.squeeze(1).transpose(0,1).contiguous().view(-1)
non_pad_indices = torch.nonzero(1-flattened_mask).squeeze(1)
clean_context = flattened_context.index_select(0, non_pad_indices)
clean_output = clean_context
elif(self.representation == "DecoderHiddenState"):
with torch.no_grad():
encoder_output = self.nmt.encoder(src)
context = encoder_output['context']
decoder_output = self.nmt.decoder(tgt, context, src)
output = decoder_output['hidden']
tgt_mask = tgt.data.eq(onmt.constants.PAD).unsqueeze(1)
tgt_mask2 = tgt.data.eq(onmt.constants.EOS).unsqueeze(1)
tgt_mask = tgt_mask + tgt_mask2
flattened_output = output.contiguous().view(-1, output.size(-1))
flattened_mask = tgt_mask.squeeze(1).transpose(0,1).contiguous().view(-1)
non_pad_indices = torch.nonzero(1-flattened_mask).squeeze(1)
clean_context = flattened_output.index_select(0, non_pad_indices)
clean_output = clean_context
elif (self.representation == "EncoderDecoderHiddenState"):
with torch.no_grad():
encoder_output = self.nmt.encoder(src)
context = encoder_output['context']
decoder_output = self.nmt.decoder(tgt, context, src)
output = decoder_output['hidden']
clean_output = output.clone()
# predict sum of target for all inputs
#tgt_mask = tgt.data.eq(onmt.Constants.PAD).unsqueeze(1)
#tgt_mask2 = tgt.data.eq(onmt.Constants.EOS).unsqueeze(1)
#tgt_mask = (tgt_mask + tgt_mask2).squeeze(1).transpose(0,1)
#output.masked_fill_(tgt_mask.unsqueeze(2).expand(-1,-1,output.size(2)),0)
#output = output.sum(0).unsqueeze(0).expand(context.size(0),-1,-1)
#select encoder states
src_mask = encoder_output['src_mask']
#flattened_context = context.contiguous().view(-1, context.size(-1))
#flattened_mask = src_mask.squeeze(1).transpose(0,1).contiguous().view(-1)
##non_pad_indices = torch.nonzero(1-flattened_mask).squeeze(1)
#clean_context = flattened_context.index_select(0, non_pad_indices)
#clean_output = output.contiguous().view(-1, output.size(-1)).index_select(0,non_pad_indices)
clean_context = context.contiguous().view(-1, context.size(-1)).clone()
elif(self.representation == "Probabilities"):
with torch.no_grad():
encoder_output = self.nmt.encoder(src)
context = encoder_output['context']
decoder_output = self.nmt.decoder(tgt, context, src)
output = decoder_output['hidden']
tgt_mask = tgt.data.eq(onmt.constants.PAD).unsqueeze(1)
tgt_mask2 = tgt.data.eq(onmt.constants.EOS).unsqueeze(1)
tgt_mask = tgt_mask + tgt_mask2
flattened_output = output.contiguous().view(-1, output.size(-1))
flattened_mask = tgt_mask.squeeze(1).transpose(0,1).contiguous().view(-1)
non_pad_indices = torch.nonzero(1-flattened_mask).squeeze(1)
clean_context = flattened_output.index_select(0, non_pad_indices)
if (type(self.nmt.generator) is nn.ModuleList):
clean_context = self.nmt.generator[0](clean_context)
else:
clean_context = self.nmt.generator(clean_context)
clean_output = clean_context
else:
raise NotImplementedError("Waring!"+opt.represenation+" not implemented for auto encoder")
# clean_context.require_grad=False
clean_context.detach_()
clean_output.detach_()
#result = self.model(clean_context)
result = clean_context
for i in range(len(self.layers)):
result = self.layers[i](result)
if (self.representation == "Probabilities"):
result = F.log_softmax(result, dim=-1)
if (self.representation == "EncoderDecoderHiddenState"):
result = result.view(src_mask.size(-1),src_mask.size(0),-1)
expand_result = result.unsqueeze(1).expand(-1,clean_output.size(0),-1,-1)
clean_output = clean_output.unsqueeze(0).expand(result.size(0),-1, -1, -1)
cos = nn.CosineSimilarity(dim=-1, eps=1e-6)
sim = cos(expand_result,clean_output)
tgt_mask = tgt.data.eq(onmt.constants.PAD).unsqueeze(1)
tgt_mask2 = tgt.data.eq(onmt.constants.EOS).unsqueeze(1)
tgt_mask = (tgt_mask + tgt_mask2).squeeze(1).transpose(0,1).unsqueeze(0).expand(result.size(0),-1,-1)
src_mask_align = src_mask.transpose(0,2).expand(-1,tgt_mask.size(1),-1)
mask = torch.max(src_mask_align,tgt_mask)
sim.masked_fill_(mask,-float('inf'))
alignment = F.softmax(sim,dim=0).masked_fill_(mask,0)
clean_output = (alignment.unsqueeze(-1).expand(-1,-1,-1,result.size(-1)) * clean_output).sum(1)
flattened_result = result.contiguous().view(-1, result.size(-1))
flattened_output = clean_output.contiguous().view(-1, clean_output.size(-1))
flattened_mask = src_mask.squeeze(1).transpose(0,1).contiguous().view(-1)
non_pad_indices = torch.nonzero(1-flattened_mask).squeeze(1)
result = flattened_result.index_select(0, non_pad_indices)
clean_output = flattened_output.index_select(0, non_pad_indices)
return clean_output,result
def calcAlignment(self, batch):
src = batch.get('source')
tgt = batch.get('target_input')
src = src.transpose(0, 1) # transpose to have batch first
tgt = tgt.transpose(0, 1)
if (self.representation == "EncoderDecoderHiddenState"):
with torch.no_grad():
encoder_output = self.nmt.encoder(src)
context = encoder_output['context']
decoder_output = self.nmt.decoder(tgt, context, src)
output = decoder_output['hidden']
flat_context = context.contiguous().view(-1, context.size(-1))
else:
raise NotImplementedError("Waring!" + opt.represenation + " not implemented for auto encoder")
# clean_context.require_grad=False
flat_context = flat_context.detach()
output = output.detach()
# result = self.model(clean_context)
result = flat_context
for i in range(len(self.layers)):
result = self.layers[i](result)
if (self.representation == "Probabilities"):
result = F.log_softmax(result, dim=-1)
return output, result.view(context.size(0),context.size(1),-1)
def autocode(self,input):
result = input.view(-1,input.size(2))
for i in range(len(self.layers)):
result = self.layers[i](result)
return result.view(input.size())
def init_model_parameters(self):
for p in self.parameters():
p.data.uniform_(-self.param_init, self.param_init)
def parameters(self):
param = []
for (n,p) in self.named_parameters():
if('nmt' not in n):
param.append(p)
return param
def load_state_dict(self, state_dict, strict=True):
def condition(param_name):
if 'positional_encoder' in param_name:
return False
if 'time_transformer' in param_name and self.nmt.encoder.time == 'positional_encoding':
return False
if param_name == 'nmt.decoder.mask':
return False
# if 'nmt' in param_name:
# return False
return True
if("nmt.generator.linear.weight" in state_dict and type(self.nmt.generator) is nn.ModuleList):
self.nmt.generator = self.nmt.generator[0]
filtered = {k: v for k, v in state_dict.items() if condition(k)}
model_dict = self.state_dict()
for k,v in model_dict.items():
if k not in filtered:
filtered[k] = v
super().load_state_dict(filtered)
if(type(self.nmt.generator) is not nn.ModuleList):
self.nmt.generator = nn.ModuleList([self.nmt.generator])
| 11,572
| 40.332143
| 113
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/Rescorer.py
|
import onmt
import onmt.modules
import torch.nn as nn
import torch
import math
from onmt.model_factory import build_model, build_language_model
from ae.Autoencoder import Autoencoder
import torch.nn.functional as F
import sys
model_list = ['transformer', 'stochastic_transformer', 'fusion_network']
class Rescorer(object):
def __init__(self, opt):
self.opt = opt
self.tt = torch.cuda if opt.cuda else torch
self.beam_accum = None
self.beta = opt.beta
self.alpha = opt.alpha
self.start_with_bos = opt.start_with_bos
self.fp16 = opt.fp16
self.attributes = opt.attributes # attributes split by |. for example: de|domain1
self.bos_token = opt.bos_token
self.sampling = opt.sampling
if self.attributes:
self.attributes = self.attributes.split("|")
self.models = list()
self.model_types = list()
# models are string with | as delimiter
models = opt.model.split("|")
print(models)
self.n_models = len(models)
self._type = 'text'
for i, model in enumerate(models):
if opt.verbose:
print('Loading model from %s' % model)
checkpoint = torch.load(model,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
if i == 0:
if "src" in checkpoint['dicts']:
self.src_dict = checkpoint['dicts']['src']
else:
self._type = "audio"
self.tgt_dict = checkpoint['dicts']['tgt']
if "atb" in checkpoint["dicts"]:
self.atb_dict = checkpoint['dicts']['atb']
else:
self.atb_dict = None
self.bos_id = self.tgt_dict.labelToIdx[self.bos_token]
# Build model from the saved option
# if hasattr(model_opt, 'fusion') and model_opt.fusion == True:
# print("* Loading a FUSION model")
# model = build_fusion(model_opt, checkpoint['dicts'])
# else:
# model = build_model(model_opt, checkpoint['dicts'])
model = build_model(model_opt, checkpoint['dicts'])
model.load_state_dict(checkpoint['model'])
if model_opt.model in model_list:
# if model.decoder.positional_encoder.len_max < self.opt.max_sent_length:
# print("Not enough len to decode. Renewing .. ")
# model.decoder.renew_buffer(self.opt.max_sent_length)
model.renew_buffer(self.opt.max_sent_length)
if opt.fp16:
model = model.half()
if opt.cuda:
model = model.cuda()
else:
model = model.cpu()
model.eval()
self.models.append(model)
self.model_types.append(model_opt.model)
# language model
if opt.lm is not None:
if opt.verbose:
print('Loading language model from %s' % opt.lm)
lm_chkpoint = torch.load(opt.lm, map_location=lambda storage, loc: storage)
lm_opt = lm_chkpoint['opt']
lm_model = build_language_model(lm_opt, checkpoint['dicts'])
if opt.fp16:
lm_model = lm_model.half()
if opt.cuda:
lm_model = lm_model.cuda()
else:
lm_model = lm_model.cpu()
self.lm_model = lm_model
self.cuda = opt.cuda
self.ensemble_op = opt.ensemble_op
if opt.autoencoder is not None:
if opt.verbose:
print('Loading autoencoder from %s' % opt.autoencoder)
checkpoint = torch.load(opt.autoencoder,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# posSize= checkpoint['autoencoder']['nmt.decoder.positional_encoder.pos_emb'].size(0)
# self.models[0].decoder.renew_buffer(posSize)
# self.models[0].decoder.renew_buffer(posSize)
# Build model from the saved option
self.autoencoder = Autoencoder(self.models[0], model_opt)
self.autoencoder.load_state_dict(checkpoint['autoencoder'])
if opt.cuda:
self.autoencoder = self.autoencoder.cuda()
self.models[0] = self.models[0].cuda()
else:
self.autoencoder = self.autoencoder.cpu()
self.models[0] = self.models[0].cpu()
self.models[0].autoencoder = self.autoencoder
if opt.verbose:
print('Done')
def init_beam_accum(self):
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
# Combine distributions from different models
def _combine_outputs(self, outputs):
if len(outputs) == 1:
return outputs[0]
if self.ensemble_op == "logSum":
output = (outputs[0])
# sum the log prob
for i in range(1, len(outputs)):
output += (outputs[i])
output.div_(len(outputs))
# output = torch.log(output)
output = F.log_softmax(output, dim=-1)
elif self.ensemble_op == "mean":
output = torch.exp(outputs[0])
# sum the log prob
for i in range(1, len(outputs)):
output += torch.exp(outputs[i])
output.div_(len(outputs))
# output = torch.log(output)
output = torch.log(output)
elif self.ensemble_op == "max":
output = outputs[0]
for i in range(1, len(outputs)):
output = torch.max(output,outputs[i])
elif self.ensemble_op == "min":
output = outputs[0]
for i in range(1, len(outputs)):
output = torch.min(output,outputs[i])
elif self.ensemble_op == 'gmean':
output = torch.exp(outputs[0])
# geometric mean of the probabilities
for i in range(1, len(outputs)):
output *= torch.exp(outputs[i])
# have to normalize
output.pow_(1.0 / float(len(outputs)))
norm_ = torch.norm(output, p=1, dim=-1)
output.div_(norm_.unsqueeze(-1))
output = torch.log(output)
else:
raise ValueError(
'Emsemble operator needs to be "mean" or "logSum", the current value is %s' % self.ensemble_op)
return output
# Take the average of attention scores
def _combine_attention(self, attns):
attn = attns[0]
for i in range(1, len(attns)):
attn += attns[i]
attn.div(len(attns))
return attn
def build_data(self, src_sents, tgt_sents):
# This needs to be the same as preprocess.py.
if self.start_with_bos:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD)
for b in src_sents]
else:
src_data = [self.src_dict.convertToIdx(b,
onmt.constants.UNK_WORD)
for b in src_sents]
tgt_bos_word = self.opt.bos_token
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
tgt_bos_word,
onmt.constants.EOS_WORD) for b in tgt_sents]
src_atbs = None
if self.attributes:
tgt_atbs = dict()
idx = 0
for i in self.atb_dict:
tgt_atbs[i] = [self.atb_dict[i].convertToIdx([self.attributes[idx]], onmt.constants.UNK_WORD)
for _ in src_sents]
idx = idx + 1
else:
tgt_atbs = None
return onmt.Dataset(src_data, tgt_data,
src_atbs=src_atbs, tgt_atbs=tgt_atbs,
batch_size_words=sys.maxsize,
data_type=self._type,
batch_size_sents=sys.maxsize)
def build_asr_data(self, src_data, tgt_sents):
# This needs to be the same as preprocess.py.
tgt_data = None
if tgt_sents:
tgt_data = [self.tgt_dict.convertToIdx(b,
onmt.constants.UNK_WORD,
onmt.constants.BOS_WORD,
onmt.constants.EOS_WORD) for b in tgt_sents]
return onmt.Dataset(src_data, tgt_data,
batch_size_words=sys.maxsize,
data_type=self._type, batch_size_sents=self.opt.batch_size)
def build_target_tokens(self, pred, src, attn):
tokens = self.tgt_dict.convertToLabels(pred, onmt.constants.EOS)
tokens = tokens[:-1] # EOS
return tokens
def rescore_batch(self, batch):
torch.set_grad_enabled(False)
# Batch size is in different location depending on data.
beam_size = self.opt.beam_size
batch_size = batch.size
gold_scores = batch.get('source').data.new(batch_size).float().zero_()
gold_words = 0
allgold_scores = []
if batch.has_target:
# Use the first model to decode
model_ = self.models[0]
gold_words, gold_scores, allgold_scores = model_.decode(batch)
torch.set_grad_enabled(True)
return gold_scores, gold_words, allgold_scores
def rescore(self, src_data, tgt_data):
# (1) convert words to indexes
dataset = self.build_data(src_data, tgt_data)
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
gold_score, gold_words, allgold_words = self.rescore_batch(batch)
return gold_score, gold_words, allgold_words
def rescore_asr(self, src_data, tgt_data):
# (1) convert words to indexes
dataset = self.build_asr_data(src_data, tgt_data)
# src, tgt = batch
batch = dataset.next()[0]
if self.cuda:
batch.cuda(fp16=self.fp16)
batch_size = batch.size
# (2) translate
gold_score, gold_words, allgold_words = self.rescore_batch(batch)
return gold_score, gold_words, allgold_words
| 10,889
| 32.100304
| 111
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/constants.py
|
import torch
PAD = 0
UNK = 1
BOS = 2
EOS = 3
PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
checkpointing = 0
static = False
residual_type = 'regular'
max_position_length = 8192
torch_version = float(torch.__version__[:3])
double_precision = False
recompute = False
neg_log_sigma1 = 0
neg_log_sigma2 = 4
prior_pi = 0.5
# global SRC_PAD
# global TGT_PAD
# global SRC_BOS
# global TGT_BOS
# global TGT_EOS
# global TGT_UNK
# global SRC_UNK
SRC_PAD_WORD = PAD_WORD
TGT_PAD_WORD = PAD_WORD
SRC_BOS_WORD = BOS_WORD
TGT_BOS_WORD = BOS_WORD
SRC_UNK_WORD = UNK_WORD
TGT_UNK_WORD = UNK_WORD
SRC_EOS_WORD = EOS_WORD
TGT_EOS_WORD = EOS_WORD
SRC_PAD = PAD
TGT_PAD = PAD
SRC_BOS = BOS
TGT_BOS = BOS
TGT_EOS = EOS
TGT_UNK = UNK
SRC_UNK = UNK
def add_tokenidx(opt, cons, dicts):
# the src_pad_word, tgt_pad_word etc are by default the same as before
# changed if we use roberta/bert
cons.SRC_PAD_WORD = opt.src_pad_word
cons.SRC_UNK_WORD = opt.src_unk_word
cons.SRC_BOS_WORD = opt.src_bos_word
cons.SRC_EOS_WORD = opt.src_eos_word
cons.TGT_PAD_WORD = opt.tgt_pad_word
cons.TGT_UNK_WORD = opt.tgt_unk_word
cons.TGT_BOS_WORD = opt.tgt_bos_word
cons.TGT_EOS_WORD = opt.tgt_eos_word
# In bilingual case there are two languages ("src" and "tgt")
# in the dictionary
if 'src' in dicts and 'tgt' in dicts:
src_dict = dicts['src']
cons.SRC_PAD = src_dict.labelToIdx[opt.src_pad_word]
cons.SRC_UNK = src_dict.labelToIdx[opt.src_unk_word]
cons.SRC_BOS = src_dict.labelToIdx[opt.src_bos_word]
cons.SRC_EOS = src_dict.labelToIdx[opt.src_eos_word]
tgt_dict = dicts['tgt']
cons.TGT_PAD = tgt_dict.labelToIdx[opt.tgt_pad_word]
cons.TGT_UNK = tgt_dict.labelToIdx[opt.tgt_unk_word]
cons.TGT_BOS = tgt_dict.labelToIdx[opt.tgt_bos_word]
cons.TGT_EOS = tgt_dict.labelToIdx[opt.tgt_eos_word]
# for speech recognition we don't have dicts['src']
elif 'tgt' in dicts:
src_dict = dicts['tgt']
cons.SRC_PAD = src_dict.labelToIdx[opt.src_pad_word] if opt.src_pad_word in src_dict.labelToIdx else 0
cons.SRC_UNK = src_dict.labelToIdx[opt.src_unk_word] if opt.src_unk_word in src_dict.labelToIdx else 1
cons.SRC_BOS = src_dict.labelToIdx[opt.src_bos_word] if opt.src_bos_word in src_dict.labelToIdx else 2
cons.SRC_EOS = src_dict.labelToIdx[opt.src_eos_word] if opt.src_eos_word in src_dict.labelToIdx else 3
tgt_dict = dicts['tgt']
cons.TGT_PAD = tgt_dict.labelToIdx[opt.tgt_pad_word] if opt.tgt_pad_word in tgt_dict.labelToIdx else 0
cons.TGT_UNK = tgt_dict.labelToIdx[opt.tgt_unk_word] if opt.tgt_unk_word in tgt_dict.labelToIdx else 1
cons.TGT_BOS = tgt_dict.labelToIdx[opt.tgt_bos_word] if opt.tgt_bos_word in tgt_dict.labelToIdx else 2
cons.TGT_EOS = tgt_dict.labelToIdx[opt.tgt_eos_word] if opt.tgt_eos_word in tgt_dict.labelToIdx else 3
else:
raise NotImplementedError
# print('[INFO] Target pad token is %s and pad id is %d' % (opt.tgt_pad_word, cons.TGT_PAD))
# print('[INFO] Target <s> token is %s and <s> id is %d' % (opt.tgt_bos_word, cons.TGT_BOS))
# print('[INFO] Target </s> token is %s and </s> id is %d' % (opt.tgt_eos_word, cons.TGT_EOS))
# print('[INFO] Target <unk> token is %s and <unk> id is %d' % (opt.tgt_unk_word, cons.TGT_UNK))
return cons
# # for Bert, both en and zh; also for roberta zh
# BERT_PAD = 0
# BERT_UNK = 100
# BERT_BOS = 101
# BERT_EOS = 102
# BERT_MASK = 103
#
#
# # for Roberta_en
# EN_ROBERTA_PAD = 1
# EN_ROBERTA_UNK = 3
# EN_ROBERTA_BOS = 0
# EN_ROBERTA_EOS = 2
#
#
# MASK_WORD = '[MASK]'
# PAD_WORD = '<blank>'
# UNK_WORD = '<unk>'
# BOS_WORD = '<s>'
# EOS_WORD = '</s>'
| 3,775
| 28.968254
| 110
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/optim.py
|
import math
import torch
import torch.optim as optim
from torch.optim.optimizer import Optimizer
class AdamWrapper(optim.Adam):
def step(self, closure=None, fake=False):
if fake:
return
else:
super(AdamWrapper, self).step(closure=closure)
class AdamWWrapper(optim.AdamW):
def step(self, closure=None, fake=False):
if fake:
return
else:
super(AdamWWrapper, self).step(closure=closure)
class Adafactor(torch.optim.Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on:
`Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate
depending on the *scale_parameter*, *relative_step* and
*warmup_init* options. To use a manual (external) learning rate
schedule you should set `scale_parameter=False` and
`relative_step=False`.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constans for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of
final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square
gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient
(default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of
parameter (default: True)
relative_step (bool): if True, time-dependent learning rate is computed
instead of external learning rate (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(
self,
params,
lr=None,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
scale_parameter=True,
relative_step=True,
warmup_init=False,
):
if lr is not None and relative_step:
raise ValueError("Cannot combine manual lr and relative_step options")
if warmup_init and not relative_step:
raise ValueError("warmup_init requires relative_step=True")
defaults = dict(
lr=lr,
eps=eps,
clip_threshold=clip_threshold,
decay_rate=decay_rate,
beta1=beta1,
weight_decay=weight_decay,
scale_parameter=scale_parameter,
relative_step=relative_step,
warmup_init=warmup_init,
)
super(Adafactor, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return False
def _get_lr(self, param_group, param_state):
rel_step_sz = param_group["lr"]
# this should override the rel_step_sz
if param_group["relative_step"]:
min_step = (
1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2
)
rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
param_scale = 1.0
if param_group["scale_parameter"]:
param_scale = max(param_group["eps"][1], param_state["RMS"])
return param_scale * rel_step_sz
def _get_options(self, param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group["beta1"] is not None
return factored, use_first_moment
def _rms(self, tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (
(exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True))
.rsqrt_()
.unsqueeze(-1)
)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError("Adafactor does not support sparse gradients.")
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state["step"] = 0
if use_first_moment:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(grad)
if factored:
state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
state["exp_avg_sq_col"] = torch.zeros(
grad_shape[:-2] + grad_shape[-1:]
).to(grad)
else:
state["exp_avg_sq"] = torch.zeros_like(grad)
state["RMS"] = 0
else:
if use_first_moment:
state["exp_avg"] = state["exp_avg"].to(grad)
if factored:
state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
else:
state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state["step"] += 1
state["RMS"] = self._rms(p_data_fp32)
group["lr"] = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
update = (grad ** 2) + group["eps"][0]
if factored:
exp_avg_sq_row = state["exp_avg_sq_row"]
exp_avg_sq_col = state["exp_avg_sq_col"]
exp_avg_sq_row.mul_(beta2t).add_(
update.mean(dim=-1), alpha=1.0 - beta2t
)
exp_avg_sq_col.mul_(beta2t).add_(
update.mean(dim=-2), alpha=1.0 - beta2t
)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_(
(self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)
)
update.mul_(group["lr"])
if use_first_moment:
exp_avg = state["exp_avg"]
exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"])
update = exp_avg
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
def normalize_gradients(parameters, denom):
""" early return if no need to normalize """
if denom == 1:
return
with torch.no_grad():
parameters = list(filter(lambda p: p.grad is not None, parameters))
denom = float(denom)
for p in parameters:
p.grad.data.div_(denom)
def detech_nan_inf(parameters):
parameters = list(filter(lambda p: p.grad is not None, parameters))
for p in parameters:
if torch.isinf(p.grad.data).any() or torch.isnan(p.grad.data).any():
return True
else:
continue
return False
def clip_grad_norm(parameters, max_norm, norm_type=2):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Variable]): an iterable of Variables that will have
gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
with torch.no_grad():
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
if max_norm > 0:
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
class Optim(object):
def set_parameters(self, params):
# params_ = filter(lambda p: p.requires_grad, params)
params_ = params # not sure about this
self.params = list(params_) # careful: params may be a generator
if self.method == 'sgd':
if not self.zeror:
self.optimizer = optim.SGD(self.params, lr=self.lr, weight_decay=self.weight_decay, momentum=0.0)
else:
from torch.distributed.optim import ZeroRedundancyOptimizer
optimizer = ZeroRedundancyOptimizer(
self.params,
optimizer_class=optim.SGD,
lr=self.lr, weight_decay=self.weight_decay, momentum=0.0
)
self.optimizer = optimizer
# elif self.method == 'multi_adam':
# from torch.optim._multi_tensor import Adam, AdamW
# if self.weight_decay > 0:
# self.optimizer = AdamW(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-9,
# weight_decay=self.weight_decay, amsgrad=self.amsgrad)
# else:
# self.optimizer = Adam(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-9,
# weight_decay=0.0, amsgrad=self.amsgrad)
elif self.method == 'adam':
if not self.zeror:
if self.weight_decay > 0:
self.optimizer = AdamWWrapper(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-9,
weight_decay=self.weight_decay, amsgrad=self.amsgrad)
else:
self.optimizer = AdamWrapper(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-9,
weight_decay=0.0, amsgrad=self.amsgrad)
else:
from torch.distributed.optim import ZeroRedundancyOptimizer
optimizer = ZeroRedundancyOptimizer(
self.params,
optimizer_class=optim.AdamW if self.weight_decay > 0 else optim.Adam,
lr=1e-5,
betas=(self.beta1, self.beta2), eps=1e-9,
weight_decay=self.weight_decay
)
self.optimizer = optimizer
# elif self.method == 'adafactor':
# relative_step = False if self.lr > 0 else True
# self.optimizer = Adafactor(self.params, lr=self.lr if self.lr > 0 else None,
# eps=(1e-30, 1e-3), beta1=None,
# weight_decay=self.weight_decay,
# relative_step=relative_step,
# scale_parameter=False if self.lr > 0 else True,
# warmup_init=relative_step)
elif self.method in ['fused_adam']:
fast_adam = True
try:
import fused_optim
if self.amsgrad:
print("Note: AMSGRAD is not compatible with Fused Adam")
from onmt.modules.optimized.fused_adam import FusedAdam
self.optimizer = FusedAdam(self.params, lr=self.lr,
betas=(self.beta1, self.beta2), eps=1e-9,
weight_decay=self.weight_decay, amsgrad=False,
set_grad_none=False)
except (RuntimeError, ModuleNotFoundError):
fast_adam = False
if not fast_adam:
self.optimizer = optim.Adam(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-9,
weight_decay=self.weight_decay, amsgrad=self.amsgrad)
# elif self.method in ['fused_lamb', 'fused_nvlamb']:
#
# from onmt.modules.optimized.fused_lamb import FusedLAMB
# self.optimizer = FusedLAMB(self.params, lr=self.lr,
# betas=(self.beta1, self.beta2), eps=1e-6,
# weight_decay=self.weight_decay, amsgrad=self.amsgrad,
# set_grad_none=False, max_grad_norm=self.max_grad_norm,
# use_nvlamb=(self.method == 'fused_nvlamb'))
# elif self.method in ['novograd']:
# try:
# import apex
# if self.amsgrad:
# print("Note: AMSGRAD is not compatible with Fused Novograd")
# self.optimizer = apex.optimizers.FusedNovoGrad(self.params, lr=self.lr,
# betas=(self.beta1, self.beta2), eps=1e-9,
# weight_decay=self.weight_decay, amsgrad=False,
# set_grad_none=False)
# except RuntimeError as e:
# raise e
else:
raise RuntimeError("Invalid optim method: " + self.method)
if self.zeror:
self._optim = self.optimizer.optim
else:
self._optim = self.optimizer
def __init__(self, opt):
self.optimizer = None
self._optim = None
self.params = None
self.lr = opt.learning_rate
self.model_size = opt.model_size
self.max_grad_norm = opt.max_grad_norm
self.update_method = opt.update_method
self.method = opt.optim
self.zeror = opt.zeror_optim
if self.lr > 0:
if 'noam' in self.update_method:
self.init_lr = self.model_size ** (-0.5) * self.lr
elif 'cosine' in self.update_method:
print("* Using Cosine learning rate schedule")
self.scheduler = None
self.eta_min = 0.0
self.max_step = opt.max_step if hasattr(opt, 'max_step') else 33333
self.init_lr = self.lr
else:
self.init_lr = self.lr
self.lr = self.init_lr
self._step = 0
self._first_step = 0
if self.update_method == 'noam_nowarmup':
self._step = opt.warmup_steps
if self.update_method == 'cosine':
self.min_lr = 0.00
self.warmup_steps = opt.warmup_steps
self.beta1 = opt.beta1
self.beta2 = opt.beta2
self.weight_decay = opt.weight_decay
self.amsgrad = opt.amsgrad
self.max_steps = opt.max_steps
def step(self, scaler=None, grad_denom=None, warmup=False):
"Normalize gradients by batch size"
"Compute gradients norm."
# grad_norm = clip_grad_norm(self.params, self.max_grad_norm).item()
overflow = False
# if gradients have NaN/inf: return (which will be zeroed afterwards)
# only do that if the scaler is None, i.e no mechanism to detect inf/nan implicitly
# for apex amp, only skip if overflow is not detected
# if detech_nan_inf(self.params):
# if scaler is None and not overflow:
# return
"Automatically scale learning rate over learning period if not overflow"
if not overflow:
self._step += 1
if 'noam' in self.update_method or 'cosine' in self.update_method:
self.update_learning_rate()
if scaler is not None:
result = scaler.step(self.optimizer)
else:
self.optimizer.step()
# return grad_norm
"""Reset the denom for normalization"""
def normalize_grad(self, denom=None):
if denom is None:
denom = 1
normalize_gradients(self.params, denom)
def update_learning_rate(self):
"""
Decay learning rate if val perf does not improve
or we hit the start_decay_at limit.
"""
if self.lr < 0:
return
if self.update_method in ['noam', 'noam_nowarmup', 'noam_half']:
if self._step <= self.warmup_steps:
self.lr = self.init_lr * self._step * self.warmup_steps ** (-1.5)
else:
self.lr = self.init_lr * self._step ** (-0.5)
if self.update_method == 'noam_half':
self.lr = self.lr / 2
self.optimizer.param_groups[0]['lr'] = self.lr
elif self.update_method in ['cosine']:
# if self.scheduler is None:
# self.scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, self.max_step,
# eta_min=self.eta_min)
#
# self.scheduler.step(self._step)
self.lr = self.min_lr + 0.5 * (self.init_lr - self.min_lr) * \
(1 + math.cos((self._step / self.max_step) * math.pi))
self._optim.param_groups[0]['lr'] = self.lr
elif self.update_method in ['regular', 'basic', 'none']:
" :) "
pass
# self.lr = self.optimizer.param_groups[0]['lr']
# self.optimizer.param_groups[0]['lr'] = self.lr
def set_learning_rate(self, lr):
self._optim.param_groups[0]['lr'] = lr
self.lr = lr
def get_learning_rate(self):
if self._optim.param_groups[0]['lr'] is None:
return self.lr
else:
return self._optim.param_groups[0]['lr']
def reset(self):
self._step = self._first_step
for group in self._optim.param_groups:
if 'step' in group:
group['step'] = self._first_step
def state_dict(self):
state_dict = self.optimizer.state_dict()
state_dict['_step'] = self._step
return state_dict
def load_state_dict(self, state_dict):
self._step = state_dict['_step']
state_dict['step'] = self._step
self._first_step = self._step
print("* Loading from step %d " % self._step)
state_dict.pop('_step', None)
self.optimizer.load_state_dict(state_dict)
def zero_grad(self, set_to_none=False):
self.optimizer.zero_grad(set_to_none=set_to_none)
def set_starting_step(self, step):
self._step = step
self._first_step = step
| 21,058
| 37.854244
| 116
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/model_factory.py
|
import torch
import torch.nn as nn
import onmt
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, MixedEncoder
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.relative_transformer import RelativeTransformer
from onmt.modules.copy_generator import CopyGenerator
from options import backward_compatible
import math
import json
from types import SimpleNamespace
init = torch.nn.init
MAX_LEN = onmt.constants.max_position_length # This should be the longest sentence from the dataset
def json_to_namespace(json_file):
with open(json_file) as f:
x = json.load(f, object_hook=lambda d: SimpleNamespace(**d))
for name in x.__dict__:
if x.__dict__[name] in ['False', 'True']:
x.__dict__[name] = (x.__dict__[name] == 'True')
return x
def remove_pretrain_weights(opt):
opt.dec_state_dict = ""
opt.enc_state_dict = ""
return opt
def build_model(opt, dicts, remove_pretrain=False, constants=None):
# adding missing options if the opt was built before. (for loading old models)
opt = backward_compatible(opt)
if remove_pretrain:
print("[INFO] Removing pretrained weights from opt")
opt = remove_pretrain_weights(opt)
onmt.constants.layer_norm = opt.layer_norm
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.activation_layer = opt.activation_layer
onmt.constants.version = 1.0
onmt.constants.attention_out = opt.attention_out
onmt.constants.residual_type = opt.residual_type
onmt.constants.fused_ffn = opt.fused_ffn
opt.nce = opt.nce_noise > 0
if 'langs' not in dicts:
dicts['langs'] = {'src': 0, 'tgt': 1}
opt.n_languages = len(dicts['langs'])
opt.n_attributes = len(dicts['atbs']) if 'atbs' in dicts else 0
if 'atbs' in dicts and 'nothingness' in dicts['atbs'] and len(dicts['atbs']) == 1:
opt.n_attributes = 0
if opt.bayes_by_backprop:
from onmt.bayesian_factory import build_model as build_bayesian_model
model = build_bayesian_model(opt, dicts)
return model
if not opt.fusion:
model = build_tm_model(opt, dicts, constants=constants)
else:
raise NotImplementedError
model = build_fusion(opt, dicts)
return model
def build_classifier(opt, dicts):
opt = backward_compatible(opt)
if 'langs' not in dicts:
dicts['langs'] = {'src': 0, 'tgt': 1}
opt.n_languages = len(dicts['langs'])
generators = [onmt.modules.base_seq2seq.Generator(opt.model_size, dicts['tgt'].size(),
fix_norm=opt.fix_norm_output_embedding)]
onmt.constants.init_value = opt.param_init
from onmt.models.speech_recognizer.relative_transformer import \
SpeechTransformerEncoder
from onmt.models.speech_recognizer.classifier import TransformerClassifier
if opt.model in ["wav2vec2", "wav2vec"]:
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2Vec, Wav2vecBERT
encoder = FairseqWav2Vec(opt, model_path=opt.wav2vec2_pretrained_model)
elif opt.model in ["LSTM", 'lstm']:
# print("LSTM")
onmt.constants.init_value = opt.param_init
from onmt.models.speech_recognizer.lstm import SpeechLSTMDecoder, SpeechLSTMEncoder, SpeechLSTMSeq2Seq
encoder = SpeechLSTMEncoder(opt, None, opt.encoder_type)
else:
encoder = SpeechTransformerEncoder(opt, None, None, opt.encoder_type)
model = TransformerClassifier(encoder, nn.ModuleList(generators), mpc=opt.mpc)
return model
def build_tm_model(opt, dicts, constants=None):
# onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
if constants is None:
constants = onmt.constants
# BUILD POSITIONAL ENCODING
if opt.time == 'positional_encoding':
positional_encoder = PositionalEncoding(opt.model_size, len_max=MAX_LEN)
else:
raise NotImplementedError
if opt.reconstruct:
# reconstruction is only compatible
assert opt.model == 'relative_transformer'
assert opt.encoder_type == 'text'
# BUILD GENERATOR
if opt.copy_generator:
if opt.nce_noise > 0:
print("[INFO] Copy generator overrides NCE.")
opt.nce = False
opt.nce_noise = 0
generators = [CopyGenerator(opt.model_size, dicts['tgt'].size(),
fix_norm=opt.fix_norm_output_embedding)]
elif opt.nce_noise > 0:
from onmt.modules.nce.nce_linear import NCELinear
from onmt.modules.nce.nce_utils import build_unigram_noise
noise_distribution = build_unigram_noise(torch.FloatTensor(list(dicts['tgt'].frequencies.values())))
generator = NCELinear(opt.model_size, dicts['tgt'].size(), fix_norm=opt.fix_norm_output_embedding,
noise_distribution=noise_distribution, noise_ratio=opt.nce_noise)
generators = [generator]
else:
generators = [onmt.modules.base_seq2seq.Generator(opt.model_size, dicts['tgt'].size(),
fix_norm=opt.fix_norm_output_embedding)]
# BUILD EMBEDDINGS
if 'src' in dicts:
if (not hasattr(opt, "enc_pretrained_model")) or (not opt.enc_pretrained_model):
embedding_src = nn.Embedding(dicts['src'].size(),
opt.model_size,
padding_idx=constants.SRC_PAD)
else:
embedding_src = None
if opt.join_embedding and embedding_src is not None:
embedding_tgt = embedding_src
print("* Joining the weights of encoder and decoder word embeddings")
elif not opt.dec_pretrained_model:
embedding_tgt = nn.Embedding(dicts['tgt'].size(),
opt.model_size,
padding_idx=constants.TGT_PAD)
else:
assert opt.model in ["pretrain_transformer", "wav2vec2_bert",
"wav2vec_mbart50", "quantize_wav2vec2_bert", "quantize_wav2vec2_mbart50"], \
"Expecting a pretrained model that has a " \
"separate Embedding initialization"
embedding_tgt = None
if opt.use_language_embedding:
print("* Create language embeddings with %d languages" % len(dicts['langs']))
language_embeddings = nn.Embedding(len(dicts['langs']), opt.model_size)
else:
language_embeddings = None
if opt.model in ['wav2vec2_bert', 'quantize_wav2vec2_bert', 'quantize_wav2vec2_mbart50']:
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2Vec, Wav2vecBERT
# if opt.model.startswith("quantize"):
# from pretrain_module.modeling_mbart import MBartDecoder, MBartEncoder
# from pretrain_module.configuration_mbart import MBartConfig
# enc_mbart_config = MBartConfig.from_json_file(opt.enc_config_file)
# discrete_encoder = MBartEncoder(enc_mbart_config, opt)
# # print("[INFO] Loading weights for mBART encoder from: %s ..." % opt.enc_state_dict)
# # enc_model_state_dict = torch.load(opt.enc_state_dict, map_location="cpu")
# # discrete_encoder.load_state_dict(enc_model_state_dict)
# else:
# discrete_encoder = None
# TODO: create a stacked encoder here
# if len(opt.dec_pretrained_model)
stacked_encoder = None
if len(opt.enc_stacked_pretrained_model) > 0:
if "mbart" in opt.enc_stacked_pretrained_model:
print("[INFO] Created a stacked encoder MBART-50")
from pretrain_module.modeling_mbart import MBartEncoder
from pretrain_module.configuration_mbart import MBartConfig
enc_mbart_config = MBartConfig.from_json_file(opt.enc_config_file)
stacked_encoder = MBartEncoder(enc_mbart_config, opt)
else:
raise NotImplementedError
if opt.enc_state_dict is not None and len(opt.enc_state_dict) > 1:
print("[INFO] Loading weights for stacked encoder from: %s ..." % opt.enc_state_dict)
enc_model_state_dict = torch.load(opt.enc_state_dict, map_location="cpu")
# load parameters from state dict to model (using huggingface's approach)
# decoder.from_pretrained(state_dict=dec_model_state_dict,
# model=decoder,
# output_loading_info=opt.verbose,
# model_prefix=opt.dec_pretrained_model
# )
# current_dict = decoder.state_dict()
#
# for key in current_dict:
# if key not in dec_model_state_dict:
# dec_model_state_dict[key] = current_dict[key]
stacked_encoder.load_state_dict(enc_model_state_dict)
print("[INFO] ... Done")
discrete_encoder = None
if "wavlm" in opt.enc_pretrained_model:
from onmt.models.speech_recognizer.wavlm import WavLMEncoder
encoder = WavLMEncoder(opt, opt.wav2vec2_pretrained_model)
else:
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2Vec
encoder = FairseqWav2Vec(opt, model_path=opt.wav2vec2_pretrained_model,
discrete_encoder=discrete_encoder, stacked_encoder=stacked_encoder)
if "s4" in opt.enc_pretrained_model: # wav2vec_s4
s4_config = json_to_namespace(opt.s4_config_file)
print("[INFO] Replacing self attn in encoder with s4")
encoder.wav2vec_encoder.replace_attn_with_s4(s4_config)
sub_encoder = None
if "mbart" in opt.dec_pretrained_model:
from pretrain_module.configuration_mbart import MBartConfig
from pretrain_module.modeling_mbart import MBartDecoder, MBartEncoder
print("[INFO] Created MBART decoder from: %s ..." % opt.dec_config_file)
dec_mbart_config = MBartConfig.from_json_file(opt.dec_config_file)
decoder = MBartDecoder(dec_mbart_config, opt)
if opt.freeze_embedding:
decoder.embed_tokens.weight.requires_grad = False
# if opt.enc_config_file:
# enc_mbart_config = MBartConfig.from_json_file(opt.enc_config_file)
# sub_encoder = MBartEncoder(enc_mbart_config, opt)
elif opt.dec_pretrained_model in ['deltalm']:
print("[INFO] Created DeltaLM decoder from: %s ..." % opt.dec_config_file)
from onmt.models.deltalm.deltalm import DeltaLMDecoder
deltalm_config = json_to_namespace(opt.dec_config_file)
embedding_tgt = nn.Embedding(dicts['tgt'].size(),
deltalm_config.decoder_embed_dim,
padding_idx=constants.TGT_PAD)
decoder = DeltaLMDecoder(deltalm_config, embedding_tgt, opt=opt)
# share all embeddings
generators[0].linear.weight = decoder.embed_tokens.weight
if opt.freeze_embedding:
decoder.embed_tokens.weight.requires_grad = False
elif opt.dec_pretrained_model == "bart":
from pretrain_module.configuration_bart import BartConfig
from pretrain_module.modeling_bart import BartDecoder
dec_bart_config = BartConfig.from_json_file(opt.dec_config_file)
decoder = BartDecoder(dec_bart_config, opt)
if opt.dec_state_dict is not None and len(opt.dec_state_dict) > 1:
print("[INFO] Loading weights for decoder from: %s ..." % opt.dec_state_dict)
dec_model_state_dict = torch.load(opt.dec_state_dict, map_location="cpu")
# load parameters from state dict to model (using huggingface's approach)
# decoder.from_pretrained(state_dict=dec_model_state_dict,
# model=decoder,
# output_loading_info=opt.verbose,
# model_prefix=opt.dec_pretrained_model
# )
current_dict = decoder.state_dict()
for key in current_dict:
if key not in dec_model_state_dict:
dec_model_state_dict[key] = current_dict[key]
decoder.load_state_dict(dec_model_state_dict)
print("[INFO] ... Done")
# if len(opt.enc_state_dict) > 1:
# print("[INFO] Loading weights for mBART encoder from: %s ..." % opt.enc_state_dict)
# enc_model_state_dict = torch.load(opt.enc_state_dict, map_location="cpu")
# sub_encoder.load_state_dict(enc_model_state_dict)
# for parameter in sub_encoder.parameters():
# parameter.requires_grad = False # don't update these guys
# sub_encoder.embed_tokens = decoder.embed_tokens # and reduce memory usage
decoder.dec_pretrained_model = opt.dec_pretrained_model
if opt.freeze_embedding:
generators[0].linear.bias.requires_grad = False
model = Wav2vecBERT(encoder, decoder, nn.ModuleList(generators), mirror=opt.mirror_loss, ctc=opt.ctc_loss > 0.0,
sub_encoder=sub_encoder)
# TODO: share the ctc_loss weight with the decoder weights
elif opt.model in ['wav2vec2_transformer']:
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2Vec, Wav2vecTransformer
from onmt.models.speech_recognizer.relative_transformer import SpeechTransformerDecoder
encoder = FairseqWav2Vec(opt, model_path=opt.wav2vec2_pretrained_model)
decoder = SpeechTransformerDecoder(opt, embedding_tgt, positional_encoder,
language_embeddings=language_embeddings)
model = Wav2vecTransformer(encoder, decoder, nn.ModuleList(generators),
mirror=opt.mirror_loss, ctc=opt.ctc_loss > 0.0)
elif opt.model in ['discourse_speech_transformer']:
from onmt.models.discourse.discourse_transformer import DiscourseTransformerEncoder, DiscourseTransformer
from onmt.models.speech_recognizer.relative_transformer import \
SpeechTransformerEncoder, SpeechTransformerDecoder
encoder = SpeechTransformerEncoder(opt, None, positional_encoder, opt.encoder_type)
decoder = SpeechTransformerDecoder(opt, embedding_tgt, positional_encoder,
language_embeddings=language_embeddings)
encoder = DiscourseTransformerEncoder(opt, encoder=encoder)
model = DiscourseTransformer(encoder, decoder, nn.ModuleList(generators),
None, None, mirror=opt.mirror_loss, ctc=opt.ctc_loss > 0.0)
elif opt.model in ['discourse_translator']:
from onmt.models.discourse.discourse_transformer import DiscourseTransformerEncoder, DiscourseTransformer
onmt.constants.init_value = opt.param_init
from onmt.models.multilingual_translator.relative_transformer import \
RelativeTransformerEncoder, RelativeTransformerDecoder
encoder = RelativeTransformerEncoder(opt, embedding_src, None,
opt.encoder_type, language_embeddings=language_embeddings)
decoder = RelativeTransformerDecoder(opt, embedding_tgt, None, language_embeddings=language_embeddings)
encoder = DiscourseTransformerEncoder(opt, encoder=encoder)
model = DiscourseTransformer(encoder, decoder, nn.ModuleList(generators),
None, None, mirror=opt.mirror_loss)
elif opt.model in ['conformer', 'speech_transformer', 'hybrid_transformer']:
onmt.constants.init_value = opt.param_init
from onmt.models.speech_recognizer.relative_transformer import \
SpeechTransformerEncoder, SpeechTransformerDecoder
if opt.model == 'conformer':
from onmt.models.speech_recognizer.conformer import ConformerEncoder
from onmt.models.speech_recognizer.lstm import SpeechLSTMDecoder
opt.cnn_downsampling = True # force this bool to have masking at decoder to be corrected
encoder = ConformerEncoder(opt, None, None, 'audio')
# decoder = SpeechLSTMDecoder(opt, embedding_tgt, language_embeddings=language_embeddings)
decoder = SpeechTransformerDecoder(opt, embedding_tgt, positional_encoder,
language_embeddings=language_embeddings)
# model = Conformer(encoder, decoder, nn.ModuleList(generators), ctc=opt.ctc_loss > 0.0)
model = RelativeTransformer(encoder, decoder, nn.ModuleList(generators),
None, None, mirror=opt.mirror_loss, ctc=opt.ctc_loss > 0.0)
elif opt.model == 'hybrid_transformer':
from onmt.models.speech_recognizer.lstm import SpeechLSTMDecoder, SpeechLSTMEncoder, SpeechLSTMSeq2Seq
encoder = SpeechTransformerEncoder(opt, None, positional_encoder, opt.encoder_type)
decoder = SpeechLSTMDecoder(opt, embedding_tgt, language_embeddings=language_embeddings)
model = SpeechLSTMSeq2Seq(encoder, decoder, nn.ModuleList(generators), ctc=opt.ctc_loss > 0.0)
else:
encoder = SpeechTransformerEncoder(opt, None, positional_encoder, opt.encoder_type)
decoder = SpeechTransformerDecoder(opt, embedding_tgt, positional_encoder,
language_embeddings=language_embeddings)
model = RelativeTransformer(encoder, decoder, nn.ModuleList(generators),
None, None, mirror=opt.mirror_loss, ctc=opt.ctc_loss > 0.0)
# If we use the multilingual model and weights are partitioned:
if opt.multilingual_partitioned_weights:
# this is basically the language embeddings
factor_embeddings = nn.Embedding(len(dicts['langs']), opt.mpw_factor_size)
encoder.factor_embeddings = factor_embeddings
decoder.factor_embeddings = factor_embeddings
elif opt.model in ["LSTM", 'lstm']:
# print("LSTM")
onmt.constants.init_value = opt.param_init
from onmt.models.speech_recognizer.lstm import SpeechLSTMDecoder, SpeechLSTMEncoder, SpeechLSTMSeq2Seq
encoder = SpeechLSTMEncoder(opt, None, opt.encoder_type)
decoder = SpeechLSTMDecoder(opt, embedding_tgt, language_embeddings=language_embeddings)
model = SpeechLSTMSeq2Seq(encoder, decoder, nn.ModuleList(generators), ctc=opt.ctc_loss > 0.0)
elif opt.model in ['multilingual_translator', 'translator']:
onmt.constants.init_value = opt.param_init
from onmt.models.multilingual_translator.relative_transformer import \
RelativeTransformerEncoder, RelativeTransformerDecoder
encoder = RelativeTransformerEncoder(opt, embedding_src, None,
opt.encoder_type, language_embeddings=language_embeddings)
decoder = RelativeTransformerDecoder(opt, embedding_tgt, None, language_embeddings=language_embeddings)
model = RelativeTransformer(encoder, decoder, nn.ModuleList(generators),
None, None, mirror=opt.mirror_loss)
elif opt.model in ['transformer', 'stochastic_transformer']:
onmt.constants.init_value = opt.param_init
if opt.encoder_type == "text":
encoder = TransformerEncoder(opt, embedding_src, positional_encoder,
opt.encoder_type, language_embeddings=language_embeddings)
elif opt.encoder_type == "audio":
encoder = TransformerEncoder(opt, None, positional_encoder, opt.encoder_type)
elif opt.encoder_type == "mix":
text_encoder = TransformerEncoder(opt, embedding_src, positional_encoder,
"text", language_embeddings=language_embeddings)
audio_encoder = TransformerEncoder(opt, None, positional_encoder, "audio")
encoder = MixedEncoder(text_encoder, audio_encoder)
else:
print("Unknown encoder type:", opt.encoder_type)
exit(-1)
decoder = TransformerDecoder(opt, embedding_tgt, positional_encoder, language_embeddings=language_embeddings)
model = Transformer(encoder, decoder, nn.ModuleList(generators), mirror=opt.mirror_loss)
elif opt.model == 'relative_transformer':
from onmt.models.relative_transformer import \
RelativeTransformerEncoder, RelativeTransformerDecoder
if opt.encoder_type == "text":
encoder = RelativeTransformerEncoder(opt, embedding_src, None,
opt.encoder_type, language_embeddings=language_embeddings)
if opt.encoder_type == "audio":
# raise NotImplementedError
encoder = RelativeTransformerEncoder(opt, None, None, encoder_type=opt.encoder_type,
language_embeddings=language_embeddings)
generator = nn.ModuleList(generators)
decoder = RelativeTransformerDecoder(opt, embedding_tgt, None, language_embeddings=language_embeddings)
if opt.reconstruct:
rev_decoder = RelativeTransformerDecoder(opt, embedding_src, None, language_embeddings=language_embeddings)
rev_generator = [onmt.modules.base_seq2seq.Generator(opt.model_size, dicts['src'].size(),
fix_norm=opt.fix_norm_output_embedding)]
rev_generator = nn.ModuleList(rev_generator)
else:
rev_decoder = None
rev_generator = None
model = RelativeTransformer(encoder, decoder, generator, rev_decoder, rev_generator, mirror=opt.mirror_loss)
elif opt.model == 'universal_transformer':
from onmt.legacy.old_models.universal_transformer import UniversalTransformerDecoder, \
UniversalTransformerEncoder
generator = nn.ModuleList(generators)
if opt.encoder_type == "text":
encoder = UniversalTransformerEncoder(opt, embedding_src, positional_encoder,
opt.encoder_type, language_embeddings=language_embeddings)
elif opt.encoder_type == "audio":
encoder = UniversalTransformerEncoder(opt, None, positional_encoder, opt.encoder_type)
decoder = UniversalTransformerDecoder(opt, embedding_tgt, positional_encoder,
language_embeddings=language_embeddings)
model = Transformer(encoder, decoder, generator, mirror=opt.mirror_loss)
elif opt.model == 'pretrain_transformer':
assert (opt.enc_pretrained_model or opt.dec_pretrained_model)
from onmt.models.pretrain_transformer import PretrainTransformer
if opt.enc_pretrained_model in ["mbart", "mbart50"]:
from pretrain_module.configuration_mbart import MBartConfig
from pretrain_module.modeling_mbart import MBartEncoder
enc_mbart_config = MBartConfig.from_json_file(opt.enc_config_file)
encoder = MBartEncoder(enc_mbart_config, opt)
elif opt.enc_pretrained_model in ["m2m", "m2m100"]:
from pretrain_module.configuration_m2m100 import M2M100Config
from pretrain_module.modeling_m2m100 import M2M100Encoder
enc_mbart_config = M2M100Config.from_json_file(opt.enc_config_file)
encoder = M2M100Encoder(enc_mbart_config, opt)
elif opt.enc_pretrained_model in ["deltalm"]:
from onmt.models.deltalm.deltalm import DeltaLMEncoder
deltalm_config = json_to_namespace(opt.dec_config_file)
embedding_src = nn.Embedding(dicts['src'].size(),
deltalm_config.encoder_embed_dim,
padding_idx=constants.SRC_PAD)
encoder = DeltaLMEncoder(deltalm_config, embedding_src)
elif not opt.enc_pretrained_model:
print(" Encoder is not from pretrained model")
encoder = TransformerEncoder(opt, embedding_src, positional_encoder,
opt.encoder_type, language_embeddings=language_embeddings)
else:
print("Pretrained Encoder type not supported")
exit(-1)
if opt.load_from or not opt.enc_state_dict:
if opt.verbose:
print(" No weights loading from {} for encoder".format(opt.enc_pretrained_model))
elif opt.enc_pretrained_model:
print("[INFO] Loading weights for encoder from: \n", opt.enc_state_dict)
enc_model_state_dict = torch.load(opt.enc_state_dict, map_location="cpu")
if opt.enc_pretrained_model in ["m2m"]:
enc_model_state_dict["embed_positions.weights"] = encoder.embed_positions.weights
encoder.load_state_dict(enc_model_state_dict)
print("Done")
elif opt.enc_pretrained_model not in ["deltalm"]:
encoder.from_pretrained(state_dict=enc_model_state_dict,
model=encoder,
output_loading_info=opt.verbose,
model_prefix=opt.enc_pretrained_model
)
if opt.dec_pretrained_model:
print("* Build decoder with dec_pretrained_model: {}".format(opt.dec_pretrained_model))
if opt.dec_pretrained_model == "bert":
if opt.enc_pretrained_model != "bert":
from pretrain_module.configuration_bert import BertConfig
from pretrain_module.modeling_bert import BertModel
dec_bert_config = BertConfig.from_json_file(opt.dec_config_file)
decoder = BertModel(dec_bert_config,
bert_word_dropout=opt.dec_pretrain_word_dropout,
bert_emb_dropout=opt.dec_pretrain_emb_dropout,
bert_atten_dropout=opt.dec_pretrain_attn_dropout,
bert_hidden_dropout=opt.dec_pretrain_hidden_dropout,
bert_hidden_size=opt.dec_pretrain_hidden_size,
is_decoder=True,
max_pos_len=opt.max_pos_length,
pos_emb_type=opt.pos_emb_type,
)
elif opt.dec_pretrained_model in ["mbart", "mbart50"]:
if opt.enc_pretrained_model not in ["mbart", "mbart50"]:
from pretrain_module.configuration_mbart import MBartConfig
from pretrain_module.modeling_mbart import MBartDecoder
dec_config = MBartConfig.from_json_file(opt.dec_config_file)
decoder = MBartDecoder(dec_config, opt)
decoder.embed_tokens.weight = encoder.embed_tokens.weight
generators[0].linear.weight = encoder.embed_tokens.weight
encoder.embed_tokens.weight.requires_grad = False
decoder.embed_tokens.weight.requires_grad = False
generators[0].linear.bias.requires_grad = False
elif opt.dec_pretrained_model in ["m2m", "m2m100"]:
if opt.enc_pretrained_model not in ["m2m", "m2m100"]:
from pretrain_module.configuration_m2m100 import M2M100Config
from pretrain_module.modeling_m2m100 import M2M100Decoder
dec_config = M2M100Config.from_json_file(opt.dec_config_file)
decoder = M2M100Decoder(dec_config, opt)
decoder.embed_tokens.weight = encoder.embed_tokens.weight
generators[0].linear.weight = encoder.embed_tokens.weight
# encoder.embed_tokens.weight.requires_grad = False
# decoder.embed_tokens.weight.requires_grad = False
# generators[0].linear.bias.requires_grad = False
elif opt.dec_pretrained_model in ["deltalm"]:
from onmt.models.deltalm.deltalm import DeltaLMDecoder
deltalm_config = json_to_namespace(opt.dec_config_file)
embedding_tgt = nn.Embedding(dicts['tgt'].size(),
deltalm_config.decoder_embed_dim,
padding_idx=constants.TGT_PAD)
decoder = DeltaLMDecoder(deltalm_config, embedding_tgt, opt=opt)
# share all embeddings
decoder.embed_tokens.weight = encoder.embed_tokens.weight
generators[0].linear.weight = encoder.embed_tokens.weight
if opt.freeze_embedding:
decoder.embed_tokens.weight.requires_grad = False
# generators[0].linear.bias.requires_grad = False
elif not opt.dec_pretrained_model:
print(" Decoder is not from pretrained model")
decoder = TransformerDecoder(opt, embedding_tgt, positional_encoder,
language_embeddings=language_embeddings)
else:
print("Warning: only bert and roberta are implemented for decoder")
exit(-1)
if opt.load_from or not opt.dec_state_dict:
if opt.verbose:
print(" No weights loading from {} for decoder".format(opt.dec_pretrained_model))
elif opt.dec_pretrained_model:
print(" Loading weights for decoder from: \n", opt.dec_state_dict)
dec_model_state_dict = torch.load(opt.dec_state_dict, map_location="cpu")
if opt.dec_pretrained_model in ["m2m"]:
dec_model_state_dict["embed_positions.weights"] = decoder.embed_positions.weights
decoder.load_state_dict(dec_model_state_dict)
print("Done")
elif opt.dec_pretrained_model not in ["deltalm"]:
decoder.from_pretrained(state_dict=dec_model_state_dict,
model=decoder,
output_loading_info=opt.verbose,
model_prefix=opt.dec_pretrained_model
)
encoder.enc_pretrained_model = opt.enc_pretrained_model
decoder.dec_pretrained_model = opt.dec_pretrained_model
print(encoder.enc_pretrained_model, decoder.dec_pretrained_model)
encoder.input_type = opt.encoder_type
model = PretrainTransformer(encoder, decoder, nn.ModuleList(generators))
else:
raise NotImplementedError
if opt.tie_weights:
print("* Joining the weights of decoder input and output embeddings")
model.tie_weights()
return model
def init_model_parameters(model, opt):
"""
Initializing model parameters. Mostly using normal distribution (0, std)
"""
init_std = 0.02 # magical number
# opt.init something ...
def init_weight(weight):
if opt.init == 'normal':
if len(weight.shape) == 2:
std_ = math.sqrt(2.0 / (weight.shape[0] + weight.shape[1]))
nn.init.normal_(weight, 0.0, std_)
else:
nn.init.normal_(weight, 0.0, init_std)
elif opt.init == 'uniform':
if len(weight.shape) == 2:
nn.init.xavier_uniform_(weight)
else:
nn.init.uniform_(weight, -init_std, init_std)
def init_embed(weight, padding_idx=0):
# The embedding is intialized as in "Attention is all you need" and "Ada-factor" paper
std_ = opt.model_size ** -0.5 if not opt.rezero else 0.05
if opt.init_embedding == 'normal':
nn.init.normal_(weight, 0.0, std_)
if opt.init_embedding == 'fixed':
nn.init.normal_(weight, 0.0, 0.01)
else: # uni form
nn.init.uniform_(weight, -std_, std_)
# don't uncomment the next lines...
# for some reason normalizing the weights at fp16 doesnt work when setting the padding to 0
# if not opt.fix_norm_output_embedding:
# nn.init.constant_(weight[padding_idx], 0)
def init_bias(bias):
nn.init.constant_(bias, 0.0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
# pass
elif classname.find('Embedding') != -1:
initialize = True
if hasattr(m, "no_need_to_initialize"):
if m.no_need_to_initialize:
initialize = False
if initialize:
if hasattr(m, 'weight') and hasattr(m, 'padding_idx'):
init_embed(m.weight, m.padding_idx)
# nn.init.constant_(m.weight[m.padding_idx], 0.0)
elif classname.find('LayerNorm') != -1 or classname.find('FusedLayerNorm') != -1:
if hasattr(m, 'weight'):
# if opt.init == 'normal':
# nn.init.normal_(m.weight, 1.0, 0)
# else:
# nn.init.uniform_(m.weight, 1.0 - init_std, 1.0 + init_std)
nn.init.constant_(m.weight, 1.0)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
# pass
elif classname.find('RelativeTransformerEncoder') != -1:
if hasattr(m, 'r_emb'):
init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias)
elif classname.find('RelativeTransformerDecoder') != -1:
if hasattr(m, 'r_emb'):
init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias)
elif classname.find('RelPartialLearnableMultiHeadAttn') != -1:
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
elif classname.find('EncdecMultiheadAttn') != -1:
m.reset_parameters(init=opt.init)
elif classname.find('RelativeSelfMultiheadAttn') != -1:
m.reset_parameters(init=opt.init)
elif classname.find('PositionWiseFeedForward') != -1:
m.reset_parameters(init=opt.init)
if opt.model not in ["pretrain_transformer", "wav2vec2_transformer", "wav2vec2_bert", "wav2vec2"]:
print('[INFO] Initializing entire model parameters')
model.apply(weights_init)
elif opt.model in ['wav2vec2_transformer']:
print('[INFO] Initializing only decoder parameters')
model.decoder.apply(weights_init)
elif opt.model in ['wav2vec2_bert']:
print("[INFO] Both encoder and decoder are using pretrained weights")
# freeze the embedding parameters?
else:
if opt.enc_pretrained_model and not opt.dec_pretrained_model:
print('[INFO] Initializing only decoder parameters')
model.decoder.apply(weights_init)
if not opt.enc_pretrained_model and opt.dec_pretrained_model:
print('[INFO] Initializing only encoder parameters')
model.encoder.apply(weights_init)
if hasattr(model, 'decoder'):
if not opt.dec_pretrained_model:
model.decoder.word_lut.apply(weights_init)
else:
if hasattr(model, 'tgt_embedding'):
model.tgt_embedding.apply(weights_init)
if opt.multilingual_partitioned_weights:
factor_embeddings = model.encoder.factor_embeddings
# this embedding scheme avoids a large initial perplexity
# basically an on-off switch to start with
with torch.no_grad():
# factor_embeddings.weight.bernoulli_(0.5).mul_(-2).add_(1)
factor_embeddings.weight.uniform_(-1, 1)
return
def build_language_model(opt, dicts):
opt = backward_compatible(opt)
onmt.constants.layer_norm = opt.layer_norm
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.activation_layer = opt.activation_layer
onmt.constants.version = 1.0
onmt.constants.attention_out = opt.attention_out
onmt.constants.residual_type = opt.residual_type
from onmt.models.transformer_xl import TransformerXL
embedding_tgt = nn.Embedding(dicts['tgt'].size(),
opt.model_size,
padding_idx=onmt.constants.TGT_PAD)
if opt.use_language_embedding:
print("* Create language embeddings with %d languages" % len(dicts['langs']))
language_embeddings = nn.Embedding(len(dicts['langs']), opt.model_size)
else:
language_embeddings = None
generators = [onmt.modules.base_seq2seq.Generator(opt.model_size, dicts['tgt'].size())]
model = TransformerXL(opt, embedding_tgt, nn.ModuleList(generators), language_embeddings=language_embeddings)
model.tgt_dict = dicts['tgt']
if opt.tie_weights:
print("* Joining the weights of decoder input and output embeddings")
model.tie_weights()
return model
def build_fusion(opt, dicts):
# the fusion model requires a pretrained language model
print("Loading pre-trained language model from %s" % opt.lm_checkpoint)
lm_checkpoint = torch.load(opt.lm_checkpoint, map_location=lambda storage, loc: storage)
# first we build the lm model and lm checkpoint
lm_opt = lm_checkpoint['opt']
lm_model = build_language_model(lm_opt, dicts)
# load parameter for pretrained model
lm_model.load_state_dict(lm_checkpoint['model'])
# main model for seq2seq (translation, asr)
tm_model = build_tm_model(opt, dicts)
from onmt.legacy.FusionNetwork.Models import FusionNetwork
model = FusionNetwork(tm_model, lm_model)
return model
def optimize_model(model, fp16=True, distributed=False):
"""
Used to potentially upgrade the components with more optimized counterparts in the future
"""
def replace_layer_norm(m, name):
replacable = True
try:
# from apex.normalization.fused_layer_norm import FusedLayerNorm
import importlib
from apex.normalization.fused_layer_norm import FusedLayerNorm
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
except ModuleNotFoundError:
replacable = False
if replacable:
for attr_str in dir(m):
target_attr = getattr(m, attr_str)
if type(target_attr) == torch.nn.LayerNorm:
setattr(m, attr_str, FusedLayerNorm(target_attr.normalized_shape,
eps=target_attr.eps,
elementwise_affine=target_attr.elementwise_affine))
for n, ch in m.named_children():
replace_layer_norm(ch, n)
def convert_fast_attention(m, name):
def convert(m_):
classname = m_.__class__.__name__
if classname.find('MultiheadAttention') != -1:
m_.convert_fast_attention()
elif classname.find('MBartAttention') != -1:
m_.convert_fast_attention()
elif classname.find('MBartCrossAttention') != -1:
m_.convert_fast_attention()
m.apply(convert)
convert_fast_attention(model, "Transformer")
def optimize_model_test(model):
"""
Used to potentially upgrade the components with more optimized counterparts in the future
"""
pass
def freeze_model_specialized_weights(model):
def freeze(m):
classname = m.__class__.__name__
if classname in ['MFWPositionWiseFeedForward',
"MFWEncdecMultiheadAttn",
"MFWRelativeSelfMultiheadAttn"]:
m.freeze()
model.apply(freeze)
return
def unfreeze_model_speciailized_weights(model):
def unfreeze(m):
classname = m.__class__.__name__
if classname in ['MFWPositionWiseFeedForward',
"MFWEncdecMultiheadAttn",
"MFWRelativeSelfMultiheadAttn"]:
m.unfreeze()
model.apply(unfreeze)
return
| 41,066
| 44.277839
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/Dict.py
|
import torch
import math
import random, string
from multiprocessing import Pool
from collections import Counter
import os
from onmt.utils import safe_readline
class Dict(object):
def __init__(self, data=None, lower=False):
self.idxToLabel = {}
self.labelToIdx = {}
self.frequencies = {}
self.lower = lower
self.vocab_mask = None
# Special entries will not be pruned.
self.special = []
if data is not None:
if type(data) == str:
self.loadFile(data)
else:
self.addSpecials(data)
def size(self):
return len(self.idxToLabel)
def loadFile(self, filename):
"Load entries from a file."
for line in open(filename):
# NOTE: a vocab entry might be a space
# so we want to find the right most space index in the line
# the left part is the label
# the right part is the index
right_space_idx = line.rfind(' ')
label = line[:right_space_idx]
idx = int(line[right_space_idx+1:])
# print(label, idx)
self.add(label, None)
def writeFile(self, filename):
"Write entries to a file."
with open(filename, 'w') as file:
for i in range(self.size()):
label = self.idxToLabel[i]
file.write('%s %d\n' % (label, i))
file.close()
def lookup(self, key, default=None):
key = key.lower() if self.lower else key
try:
return self.labelToIdx[key]
except KeyError:
return default
def getLabel(self, idx, default=None):
try:
return self.idxToLabel[idx]
except KeyError:
return default
def addSpecial(self, label, idx=None):
"Mark this `label` and `idx` as special (i.e. will not be pruned)."
idx = self.add(label, idx)
self.special += [idx]
def addSpecials(self, labels):
"Mark all labels in `labels` as specials (i.e. will not be pruned)."
for label in labels:
self.addSpecial(label)
def add(self, label, idx=None, num=1):
"Add `label` in the dictionary. Use `idx` as its index if given."
label = label.lower() if self.lower else label
if idx is not None:
self.idxToLabel[idx] = label
self.labelToIdx[label] = idx
else:
if label in self.labelToIdx:
idx = self.labelToIdx[label]
else:
idx = len(self.idxToLabel)
self.idxToLabel[idx] = label
self.labelToIdx[label] = idx
if idx not in self.frequencies:
self.frequencies[idx] = num
else:
self.frequencies[idx] += num
return idx
def prune(self, size):
"Return a new dictionary with the `size` most frequent entries."
if size >= self.size():
return self
# Only keep the `size` most frequent entries.
freq = torch.Tensor(
[self.frequencies[i] for i in range(len(self.frequencies))])
_, idx = torch.sort(freq, 0, True)
newDict = Dict()
newDict.lower = self.lower
count = 0
# Add special entries in all cases.
for i in self.special:
newDict.addSpecial(self.idxToLabel[i])
count = count + 1
for i in idx.tolist():
newDict.add(self.idxToLabel[i])
count = count + 1
if count >= size:
break
return newDict
def convertToIdx(self, labels, unkWord, bos_word=None, eos_word=None, type='int64'):
"""
Convert `labels` to indices. Use `unkWord` if not found.
Optionally insert `bos_word` at the beginning and `eos_word` at the .
"""
vec = []
if bos_word is not None:
vec += [self.lookup(bos_word)]
unk = self.lookup(unkWord)
for label in labels:
vec.append(self.lookup(label, default=unk))
# vec += [self.lookup(label, default=unk) for label in labels]
if eos_word is not None:
vec += [self.lookup(eos_word)]
if type == 'int64':
try:
return torch.LongTensor(vec)
except TypeError as e:
print("Type Error", e)
print(labels)
print(vec)
exit()
elif type == 'int32' or type == 'int':
return torch.IntTensor(vec)
elif type == 'int16':
return torch.ShortTensor(vec)
else:
raise NotImplementedError
def convertToIdx2(self, labels, unkWord, bos_word=None, eos_word=None):
"""
Convert `labels` to indices. Use `unkWord` if not found.
Optionally insert `bos_word` at the beginning and `eos_word` at the .
"""
vec = []
if bos_word is not None:
vec += [self.lookup(bos_word)]
unk = self.lookup(unkWord)
vec += [self.lookup(label, default=unk) for label in labels]
if eos_word is not None:
vec += [self.lookup(eos_word)]
return torch.LongTensor(vec)
def convertToLabels(self, idx, stop, including_stop=True):
"""
Convert `idx` to labels.
If index `stop` is reached, convert it and return.
"""
labels = []
for i in idx:
if not including_stop:
if i == stop:
break
word = self.getLabel(int(i))
labels += [word]
if i == stop:
break
return labels
# Adding crap stuff so that the vocab size divides by the multiplier
# Help computation with tensor cores
# This may create bad effect with label smoothing
# But who knows?
def patch(self, multiplier=8):
size = self.size()
original_size = size
# number of words to be patched
n_words = (math.ceil(size / multiplier) * multiplier) - size
for i in range(n_words):
while True:
l_ = 6
random_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(l_))
if random_string in self.labelToIdx:
continue
else:
self.add(random_string)
self.frequencies[random_string] = 0
break
# All elements should be false (so masked_fill_ won't touch)
# The new elements added should be True
self.vocab_mask = torch.BoolTensor(self.size()).fill_(True)
self.vocab_mask.narrow(0, 0, original_size).fill_(False)
print("Vocabulary size after patching: %d" % self.size())
@staticmethod
def count_file(filename, tokenizer, worker_id=0, num_workers=1):
counter = Counter()
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
line = f.readline()
count = 0
while line:
tokenized_words = tokenizer.tokenize(line)
for word in tokenized_words:
counter.update([word])
if f.tell() > end:
break
line = f.readline()
count += 1
if count % 100000 == 0:
print("[INFO] Thread %d processed %d lines." % (worker_id, count))
return counter
@staticmethod
def gen_dict_from_file(filename, dict, tokenizer, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
# dict.add_symbol(w, c)
dict.add(w, num=c)
if num_workers > 1:
pool = Pool(processes = num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(
Dict.count_file,
(filename, tokenizer, worker_id, num_workers)
))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
counts = Dict.count_file(filename, tokenizer)
merge_result(counts)
| 8,644
| 28.810345
| 113
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/logging.py
|
# coding=utf-8
# Copyright 2020 Optuna, Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Logging utilities. """
import logging
import os
import sys
import threading
from logging import CRITICAL # NOQA
from logging import DEBUG # NOQA
from logging import ERROR # NOQA
from logging import FATAL # NOQA
from logging import INFO # NOQA
from logging import NOTSET # NOQA
from logging import WARN # NOQA
from logging import WARNING # NOQA
from typing import Optional
_lock = threading.Lock()
_default_handler: Optional[logging.Handler] = None
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_default_log_level = logging.WARNING
def _get_default_logging_level():
"""
If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
not - fall back to `_default_log_level`
"""
env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }"
)
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_default_handler = logging.StreamHandler() # Set sys.stderr as stream.
_default_handler.flush = sys.stderr.flush
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
library_root_logger.propagate = False
def _reset_library_root_logger() -> None:
global _default_handler
with _lock:
if not _default_handler:
return
library_root_logger = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_default_handler = None
def get_log_levels_dict():
return log_levels
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""
Return a logger with the specified name.
This function is not supposed to be directly accessed unless you are writing a custom transformers module.
"""
if name is None:
name = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(name)
def get_verbosity() -> int:
"""
Return the current level for the 🤗 Transformers's root logger as an int.
Returns:
`int`: The logging level.
<Tip>
🤗 Transformers has following logging levels:
- 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL`
- 40: `transformers.logging.ERROR`
- 30: `transformers.logging.WARNING` or `transformers.logging.WARN`
- 20: `transformers.logging.INFO`
- 10: `transformers.logging.DEBUG`
</Tip>"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""
Set the verbosity level for the 🤗 Transformers's root logger.
Args:
verbosity (`int`):
Logging level, e.g., one of:
- `transformers.logging.CRITICAL` or `transformers.logging.FATAL`
- `transformers.logging.ERROR`
- `transformers.logging.WARNING` or `transformers.logging.WARN`
- `transformers.logging.INFO`
- `transformers.logging.DEBUG`
"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(verbosity)
def set_verbosity_info():
"""Set the verbosity to the `INFO` level."""
return set_verbosity(INFO)
def set_verbosity_warning():
"""Set the verbosity to the `WARNING` level."""
return set_verbosity(WARNING)
def set_verbosity_debug():
"""Set the verbosity to the `DEBUG` level."""
return set_verbosity(DEBUG)
def set_verbosity_error():
"""Set the verbosity to the `ERROR` level."""
return set_verbosity(ERROR)
def disable_default_handler() -> None:
"""Disable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def enable_default_handler() -> None:
"""Enable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def add_handler(handler: logging.Handler) -> None:
"""adds a handler to the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
"""removes given handler from the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(handler)
def disable_propagation() -> None:
"""
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""
Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to
prevent double logging if the root logger has been configured.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = True
def enable_explicit_format() -> None:
"""
Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows:
::
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
handler.setFormatter(formatter)
def reset_format() -> None:
"""
Resets the formatting for HuggingFace Transformers's loggers.
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(None)
def warning_advice(self, *args, **kwargs):
"""
This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this
warning will not be printed
"""
no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False)
if no_advisory_warnings:
return
self.warning(*args, **kwargs)
logging.Logger.warning_advice = warning_advice
| 8,027
| 27.671429
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/utils.py
|
import logging, traceback
import os, re
import torch
import torchaudio
import math
import soundfile as sf
import torch
import torch.nn.functional as F
# this function is borrowed from Facebook
# avoid jumping into the middle of a character
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
def pad_tensor(x, min_length=2400):
if x.size(0) < min_length:
x_ = x.new_zeros((min_length, x.size(1)))
x_[:x.size(0), :] = x
x = x_
return x
# this function reads wav file based on the timestamp in seconds
def safe_readaudio(wav_path, start=0.0, end=0.0, sample_rate=16000):
offset = math.floor(sample_rate * start)
num_frames = -1 if end <= start else math.ceil(sample_rate * (end - start))
# by default torchaudio normalizes the read tensor
tensor, _ = torchaudio.load(wav_path, frame_offset=offset, num_frames=num_frames,
normalize=True, channels_first=False)
tensor = tensor[:, 0].unsqueeze(1)
# tensor has size [length, num_channel] in which channel should be 1 for wav2vec
return tensor
# this function is borrowed from fairseq
# https://github.com/pytorch/fairseq/blob/master/fairseq/utils.py
def checkpoint_paths(path, pattern=r'model_ppl_(\d+).(\d+)\_e(\d+).(\d+).pt'):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = []
# remove directories or files that don't contain "ppl"
for fname in os.listdir(path):
cur_path = os.path.join(path, fname)
if os.path.isdir(cur_path):
continue
elif "ppl" in fname:
files.append(fname)
# sort py perplexity (ascending)
files = sorted(files, key=lambda s: float(s.split("_")[2]))
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = int(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
# return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
return [os.path.join(path, x[1]) for x in entries]
def normalize_gradients(parameters, denom=1.0):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
for p in parameters:
p.grad.detach().div_(denom)
return
# flip a tensor on certain dimension
def flip(x, dim=0):
dim = x.dim() + dim if dim < 0 else dim
inds = tuple(slice(None, None) if i != dim
else x.new(torch.arange(x.size(i) - 1, -1, -1).tolist()).long()
for i in range(x.dim()))
return x[inds]
# Stochastic expected length
def expected_length(length, death_rate):
e_length = 0
for l in range(length):
survival_rate = 1.0 - (l + 1) / length * death_rate
e_length += survival_rate
return e_length
from typing import Union, Iterable
try:
from torch._six import inf
except ModuleNotFoundError:
from torch import inf
_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]]
def clip_grad_norm(
parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0,
error_if_nonfinite: bool = False) -> torch.Tensor:
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Args:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
error_if_nonfinite (bool): if True, an error is thrown if the total
norm of the gradients from :attr:``parameters`` is ``nan``,
``inf``, or ``-inf``. Default: False (will switch to True in the future)
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
max_norm = float(max_norm)
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
norms = [p.grad.detach().abs().max().to(device) for p in parameters]
total_norm = norms[0] if len(norms) == 1 else torch.max(torch.stack(norms))
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()):
raise RuntimeError(
f'The total norm of order {norm_type} for gradients from '
'`parameters` is non-finite, so it cannot be clipped. To disable '
'this error and scale the gradients by the non-finite norm anyway, '
'set `error_if_nonfinite=False`')
if max_norm > 0:
clip_coef = max_norm / (total_norm + 1e-6)
# Note: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so
# avoids a `if clip_coef < 1:` conditional which can require a CPU <=> device synchronization
# when the gradients do not reside in CPU memory.
clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
for p in parameters:
p.grad.detach().mul_(clip_coef_clamped.to(p.grad.device))
return total_norm
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indices):
# ctx.save_for_backward(indices)
ctx.first_axis_dim = input.shape[0]
assert input.ndim == 2
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
# return input[indices]
d = input.shape[1]
repeated_indices = indices.repeat(d, 1).view(indices.size(0), d)
ctx.save_for_backward(repeated_indices)
return torch.gather(input, 0, repeated_indices)
@staticmethod
def backward(ctx, grad_output):
# indices, = ctx.saved_tensors
grad_input = torch.zeros([ctx.first_axis_dim, *grad_output.shape[1:]],
device=grad_output.device, dtype=grad_output.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
# grad_input[indices] = grad_output
repeated_indices, = ctx.saved_tensors
grad_input.scatter_(0, repeated_indices, grad_output)
return grad_input, None
index_first_axis = IndexFirstAxis.apply
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, values, indices, first_axis_dim):
# ctx.save_for_backward(indices)
assert indices.ndim == 1
assert values.ndim == 2
output = torch.zeros(first_axis_dim, values.shape[1], device=values.device,
dtype=values.dtype)
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
# output[indices] = values
d = values.shape[1]
repeated_indices = indices.repeat(d, 1).view(indices.size(0), d)
ctx.save_for_backward(repeated_indices)
# output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
output.scatter_(0, repeated_indices, values)
return output
@staticmethod
def backward(ctx, grad_output):
repeated_indices, = ctx.saved_tensors
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
# grad_values = grad_output[indices]
# grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
grad_values = torch.gather(grad_output, 0, repeated_indices)
return grad_values, None, None
index_put_first_axis = IndexPutFirstAxis.apply
def unpad_input(hidden_states, indices):
"""
Arguments:
hidden_states: (batch, seqlen, dim)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
Return:
hidden_states: (total_nnz, dim), where total_nnz = number of tokens in selected in attention_mask.
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
max_seqlen_in_batch: int
"""
# seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
# indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
# max_seqlen_in_batch = seqlens_in_batch.max().item()
# cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
# so we write custom forward and backward to make it a bit faster.
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
return index_first_axis(hidden_states, indices)
def pad_input(hidden_states, indices, batch, seqlen):
"""
Arguments:
hidden_states: (total_nnz, dim), where total_nnz = number of tokens in selected in attention_mask.
indices: (total_nnz)
Return:
hidden_states: (batch, seqlen, dim)
"""
# dim = hidden_states.shape[-1]
# output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
# output[indices] = hidden_states
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
# return rearrange(output, '(b s) d -> b s d', b=batch)
output = output.view(batch, seqlen, output.size(-1))
return output
| 10,432
| 36.128114
| 128
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/markdown.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
class MarkdownHelpFormatter(argparse.HelpFormatter):
"""A really bare-bones argparse help formatter that generates valid markdown.
This will generate something like:
usage
# **section heading**:
## **--argument-one**
```
argument-one help text
```
"""
def _format_usage(self, usage, actions, groups, prefix):
usage_text = super(MarkdownHelpFormatter, self)._format_usage(
usage, actions, groups, prefix)
return '\n```\n%s\n```\n\n' % usage_text
def format_help(self):
self._root_section.heading = '# %s' % self._prog
return super(MarkdownHelpFormatter, self).format_help()
def start_section(self, heading):
super(MarkdownHelpFormatter, self).start_section('## **%s**' % heading)
def _format_action(self, action):
lines = []
action_header = self._format_action_invocation(action)
lines.append('### **%s** ' % action_header)
if action.help:
lines.append('')
lines.append('```')
help_text = self._expand_help(action)
lines.extend(self._split_lines(help_text, 80))
lines.append('```')
lines.extend(['', ''])
return '\n'.join(lines)
class MarkdownHelpAction(argparse.Action):
def __init__(self, option_strings,
dest=argparse.SUPPRESS, default=argparse.SUPPRESS,
**kwargs):
super(MarkdownHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser.formatter_class = MarkdownHelpFormatter
parser.print_help()
parser.exit()
def add_md_help_argument(parser):
parser.add_argument('-md', action=MarkdownHelpAction,
help='print Markdown-formatted help text and exit.')
| 2,164
| 32.828125
| 81
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/online_translator.py
|
import onmt
import onmt.modules
from collections import defaultdict
try:
from mosestokenizer import MosesDetokenizer, MosesTokenizer
except ImportError:
# print("[WARNING] Moses tokenizer is not installed. Models with 'detokenize' option won't have Moses-detokenized outputs")
MosesDetokenizer = None
MosesTokenizer = None
class TranslatorParameter(object):
def __init__(self, filename):
self.model = ""
self.src = "<stdin>"
self.src_img_dir = ""
self.tgt = ""
self.output = "<stdout>"
self.beam_size = 1
self.batch_size = 1
self.max_sent_length = 512
self.min_sent_length = 1
self.dump_beam = ""
self.n_best = self.beam_size
self.replace_unk = False
self.gpu = -1
self.cuda = 0
self.verbose = False
self.normalize = True
self.beta = 0.0
self.alpha = 0.0
self.start_with_bos = False
self.fp16 = False
self.ensemble_op = 'mean'
self.autoencoder = None
self.encoder_type = 'text'
self.lm = None
self.src_lang = 'src'
self.tgt_lang = 'tgt'
self.bos_token = onmt.constants.BOS_WORD
self.sampling = False
self.attributes = None
self.no_bos_gold = False
self.no_repeat_ngram_size = 0
self.no_buffering = False
self.src_align_right = False
self.dynamic_quantile = 0
self.vocab_list = ""
self.sub_model = ""
self.sub_src = ""
self.ensemble_weight = ""
self.fast_translate = True
self.vocab_id_list = None # to be added if necessary
self.pretrained_classifier = None
self.detokenize = False
self.external_tokenizer = "facebook/mbart-large-50"
self.force_bos = False
self.use_tgt_lang_as_source = False
self.anti_prefix = ""
self.read_file(filename)
def read_file(self, filename):
f = open(filename)
line = f.readline()
while line:
w = line.strip().split()
if w[0] == "model":
self.model = w[1]
elif w[0] == "beam_size":
self.beam_size = int(w[1])
self.n_best = self.beam_size
elif w[0] == "src_lang":
self.src_lang = w[1]
elif w[0] == "tgt_lang":
self.tgt_lang = w[1]
elif w[0] == "no_repeat_ngram_size":
self.no_repeat_ngram_size = int(w[1])
elif w[0] == "dynamic_quantile":
self.dynamic_quantile = int(w[1])
elif w[0] == "fp16":
self.fp16 = True
elif w[0] == "gpu":
self.gpu = int(w[1])
self.cuda = True
elif w[0] == "detokenize":
self.detokenize = True
elif w[0] == "vocab_list":
self.vocab_list = w[1]
elif w[0] == "facebook/mbart-large-50":
self.external_tokenizer = w[1]
elif w[0] == "force_bos":
self.force_bos = True
elif w[0] == "use_tgt_lang_as_source":
self.use_tgt_lang_as_source = True
elif w[0] == "max_sent_length":
self.max_sent_length = int(w[1])
elif w[0] == "min_sent_length":
self.min_sent_length = int(w[1])
elif w[0] == "anti_prefix":
self.anti_prefix = w[1]
line = f.readline()
class RecognizerParameter(TranslatorParameter):
def __init__(self, filename):
super(RecognizerParameter, self).__init__(filename)
# Lazy version of this
self.src_lang = '<s>'
self.tgt_lang = '<s>'
self.bos_token = '<s>'
self.external_tokenizer = "facebook/mbart-large-50"
self.asr_format = "wav"
self.encoder_type = "audio"
class OnlineTranslator(object):
def __init__(self, model):
opt = TranslatorParameter(model)
from onmt.inference.fast_translator import FastTranslator
self.translator = FastTranslator(opt)
self.src_lang = "en"
self.tgt_lang = "en"
self.detokenize = opt.detokenize
self.external_tokenizer = opt.external_tokenizer
self.anti_prefix = opt.anti_prefix
# def translate(self, input):
# predBatch, predScore, predLength, goldScore, numGoldWords, allGoldScores = \
# self.translator.translate([input.split()], [])
#
# return " ".join(predBatch[0][0])
self.use_tgt_lang_as_source = opt.use_tgt_lang_as_source
def set_language(self, input_language, output_language, language_code_system="mbart50"):
# override the input_language
if self.use_tgt_lang_as_source:
input_language = output_language
if language_code_system == "mbart50":
language_map_dict = {"en": "en_XX", "de": "de_DE", "fr": "fr_XX", "es": "es_XX",
"pt": "pt_XX", "it": "it_IT", "nl": "nl_XX", "None": "<s>",
"ja": "ja_XX", "zh": "zh_CN", "vn": "vi_VN"}
else:
language_map_dict = defaultdict(lambda self, missing_key: missing_key)
input_lang = language_map_dict[input_language]
output_lang = language_map_dict[output_language]
self.translator.change_language(new_src_lang=input_lang, new_tgt_lang=output_lang, use_srclang_as_bos=False)
self.src_lang = input_language
self.tgt_lang = output_language
def translate(self, input, prefix):
"""
Args:
prefix:
input: audio segment (torch.Tensor)
Returns:
"""
input = input.strip().split()
if self.detokenize:
prefixes = []
for _prefix in prefix:
if _prefix is not None:
with MosesTokenizer(self.tgt_lang) as tokenize:
__prefix = tokenize(_prefix)
__prefix = " ".join(__prefix)
_prefix = __prefix
prefixes.append(_prefix)
prefix = prefixes
# 2 lists because the translator is designed to run with 1 audio and potentially 1 text
src_batches = [[input]] # ... about the input
tgt_batch = []
sub_src_batch = []
past_src_batches = []
if all(v is None for v in prefix):
prefix = None
anti_prefix = self.anti_prefix if len(self.anti_prefix) > 0 else None
# perform beam search in the model
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = self.translator.translate(
src_batches, tgt_batch,
prefix=prefix, anti_prefix=anti_prefix)
# use the external sentencepiece model
external_tokenizer = self.translator.external_tokenizer
output_sentence = get_sentence_from_tokens(pred_batch[0][0], pred_ids[0][0], "word", external_tokenizer)
# here if we want to use mosestokenizer, probably we need to split the sentence AFTER the sentencepiece/bpe
# model applies their de-tokenization
if self.detokenize and MosesDetokenizer is not None:
output_sentence_parts = output_sentence.split()
with MosesDetokenizer(self.tgt_lang) as detokenize:
output_sentence = detokenize(output_sentence_parts)
return output_sentence
def translate_batch(self, inputs, prefixes):
"""
Args:
inputs: list of audio tensors
prefixes: list of prefixes
Returns:
"""
inputs = [_input.strip().split() for _input in inputs]
if self.detokenize:
new_prefixes = []
for _prefix in prefixes:
if _prefix is not None:
with MosesTokenizer(self.tgt_lang) as tokenize:
tokenized_sentence = tokenize(_prefix)
tokenized_sentence = " ".join(tokenized_sentence)
_prefix = tokenized_sentence
new_prefixes.append(_prefix)
prefixes = new_prefixes
# 2 list because the translator is designed to run with 1 audio and potentially 1 text
src_batches = [inputs] # ... about the input
tgt_batch = []
sub_src_batch = []
past_src_batches = []
if all(v is None for v in prefixes):
prefixes = None
anti_prefix = self.anti_prefix if len(self.anti_prefix) > 0 else None
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = self.translator.translate(
src_batches, tgt_batch,
prefix=prefixes, anti_prefix=anti_prefix)
external_tokenizer = self.translator.external_tokenizer
outputs = list()
for pred, pred_id in zip(pred_batch, pred_ids):
outputs.append(get_sentence_from_tokens(pred[0], pred_id[0], "word", external_tokenizer))
if self.detokenize and MosesDetokenizer is not None:
outputs_detok = []
for output_sentence in outputs:
# here if we want to use mosestokenizer, probably we need to split the sentence AFTER the sentencepiece/bpe
# model applies their de-tokenization
output_sentence_parts = output_sentence.split()
with MosesDetokenizer(self.tgt_lang) as detokenize:
output_sentence = detokenize(output_sentence_parts)
outputs_detok.append(output_sentence)
return outputs_detok
return outputs
# Checklist to integrate:
# 1. model file (model.averaged.pt)
# 2. the w2v and mbart50 config
# 3. mbart50 tokenizer
# 4. interface to translate with
def get_sentence_from_tokens(tokens, ids, input_type, external_tokenizer=None):
if external_tokenizer is None:
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
else:
sent = external_tokenizer.decode(ids, True, True).strip()
return sent
class ASROnlineTranslator(object):
def __init__(self, model):
opt = RecognizerParameter(model)
from onmt.inference.fast_translator import FastTranslator
self.translator = FastTranslator(opt)
self.src_lang = "en"
self.tgt_lang = "en"
self.detokenize = opt.detokenize
self.anti_prefix = opt.anti_prefix
def set_language(self, input_language, output_language, language_code_system="mbart50"):
if language_code_system == "mbart50":
language_map_dict = {"en": "en_XX", "de": "de_DE", "fr": "fr_XX", "es": "es_XX",
"pt": "pt_XX", "it": "it_IT", "nl": "nl_XX", "None": "<s>"}
else:
language_map_dict = defaultdict(lambda self, missing_key: missing_key)
input_lang = language_map_dict[input_language]
output_lang = language_map_dict[output_language]
self.translator.change_language(new_src_lang=input_lang, new_tgt_lang=output_lang)
self.src_lang = input_language
self.tgt_lang = output_language
def translate(self, input, prefix):
"""
Args:
prefix:
input: audio segment (torch.Tensor)
Returns:
"""
if self.detokenize:
prefixes = []
for _prefix in prefix:
if _prefix is not None:
with MosesTokenizer(self.tgt_lang) as tokenize:
__prefix = tokenize(_prefix)
__prefix = " ".join(__prefix)
_prefix = __prefix
prefixes.append(_prefix)
prefix = prefixes
# 2 lists because the translator is designed to run with 1 audio and potentially 1 text
src_batches = [[input]] # ... about the input
tgt_batch = []
sub_src_batch = []
past_src_batches = []
anti_prefix = self.anti_prefix if len(self.anti_prefix) > 0 else None
print("anti prefix:", anti_prefix)
# perform beam search in the model
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = self.translator.translate(
src_batches, tgt_batch, type='asr',
prefix=prefix, anti_prefix=anti_prefix)
# use the external sentencepiece model
external_tokenizer = self.translator.external_tokenizer
output_sentence = get_sentence_from_tokens(pred_batch[0][0], pred_ids[0][0], "word", external_tokenizer)
# here if we want to use mosestokenizer, probably we need to split the sentence AFTER the sentencepiece/bpe
# model applies their de-tokenization
if self.detokenize and MosesDetokenizer is not None:
output_sentence_parts = output_sentence.split()
with MosesDetokenizer(self.tgt_lang) as detokenize:
output_sentence = detokenize(output_sentence_parts)
print(pred_ids[0][0], output_sentence)
return output_sentence
def translate_batch(self, inputs, prefixes):
"""
Args:
inputs: list of audio tensors
prefixes: list of prefixes
Returns:
"""
if self.detokenize:
new_prefixes = []
for _prefix in prefixes:
if _prefix is not None:
with MosesTokenizer(self.tgt_lang) as tokenize:
tokenized_sentence = tokenize(_prefix)
tokenized_sentence = " ".join(tokenized_sentence)
_prefix = tokenized_sentence
new_prefixes.append(_prefix)
prefixes = new_prefixes
# 2 list because the translator is designed to run with 1 audio and potentially 1 text
src_batches = [inputs] # ... about the input
tgt_batch = []
sub_src_batch = []
past_src_batches = []
# pred_score, pred_length, gold_score, num_gold_words, all_gold_scores = self.translator.translate(
# src_batches, tgt_batch,
# type='asr',
# prefix=prefix)
anti_prefix = self.anti_prefix if len(self.anti_prefix) > 0 else None
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = self.translator.translate(
src_batches, tgt_batch, type='asr',
prefix=prefixes, anti_prefix=anti_prefix)
external_tokenizer = self.translator.external_tokenizer
outputs = list()
for pred, pred_id in zip(pred_batch, pred_ids):
outputs.append(get_sentence_from_tokens(pred[0], pred_id[0], "word", external_tokenizer))
if self.detokenize and MosesDetokenizer is not None:
outputs_detok = []
for output_sentence in outputs:
# here if we want to use mosestokenizer, probably we need to split the sentence AFTER the sentencepiece/bpe
# model applies their de-tokenization
output_sentence_parts = output_sentence.split()
with MosesDetokenizer(self.tgt_lang) as detokenize:
output_sentence = detokenize(output_sentence_parts)
outputs_detok.append(output_sentence)
return outputs_detok
print(pred, outputs)
return outputs
| 15,658
| 33.339912
| 127
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/__init__.py
|
import onmt.constants
from onmt.inference.translator import Translator
from onmt.Rescorer import Rescorer
from onmt.online_translator import OnlineTranslator
from onmt.data.dataset import Dataset
from onmt.data.stream_dataset import StreamDataset
from onmt.optim import Optim
from onmt.Dict import Dict as Dict
from onmt.inference.Beam import Beam
from onmt.data.tokenizer import Tokenizer
# For flake8 compatibility.
__all__ = [onmt.constants, Translator, Rescorer, OnlineTranslator, Dataset, StreamDataset, Optim, Dict, Beam, Tokenizer]
| 540
| 37.642857
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/bayesian_factory.py
|
import torch
import torch.nn as nn
import onmt
from onmt.models.bayes_by_backprop.relative_transformer import \
RelativeTransformerEncoder, RelativeTransformerDecoder, BayesianTransformer
from onmt.models.transformer_layers import PositionalEncoding
from onmt.modules.copy_generator import CopyGenerator
from options import backward_compatible
init = torch.nn.init
MAX_LEN = onmt.constants.max_position_length # This should be the longest sentence from the dataset
def build_model(opt, dicts):
opt = backward_compatible(opt)
onmt.constants.layer_norm = opt.layer_norm
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.activation_layer = opt.activation_layer
onmt.constants.version = 1.0
onmt.constants.attention_out = opt.attention_out
onmt.constants.residual_type = opt.residual_type
if not opt.fusion:
model = build_tm_model(opt, dicts)
else:
raise NotImplementedError
model = build_fusion(opt, dicts)
return model
def build_tm_model(opt, dicts):
onmt.constants.neg_log_sigma1 = opt.neg_log_sigma1
onmt.constants.neg_log_sigma2 = opt.neg_log_sigma2
onmt.constants.prior_pi = opt.prior_pi
# BUILD POSITIONAL ENCODING
if opt.time == 'positional_encoding':
positional_encoder = PositionalEncoding(opt.model_size, len_max=MAX_LEN)
else:
raise NotImplementedError
# BUILD GENERATOR
if opt.copy_generator:
generators = [CopyGenerator(opt.model_size, dicts['tgt'].size(),
fix_norm=opt.fix_norm_output_embedding)]
else:
generators = [onmt.modules.base_seq2seq.Generator(opt.model_size, dicts['tgt'].size(),
fix_norm=opt.fix_norm_output_embedding)]
# BUILD EMBEDDINGS
if 'src' in dicts:
embedding_src = nn.Embedding(dicts['src'].size(),
opt.model_size,
padding_idx=onmt.constants.PAD)
else:
embedding_src = None
if opt.join_embedding and embedding_src is not None:
embedding_tgt = embedding_src
print("* Joining the weights of encoder and decoder word embeddings")
else:
embedding_tgt = nn.Embedding(dicts['tgt'].size(),
opt.model_size,
padding_idx=onmt.constants.PAD)
if opt.use_language_embedding:
print("* Create language embeddings with %d languages" % len(dicts['langs']))
language_embeddings = nn.Embedding(len(dicts['langs']), opt.model_size)
else:
language_embeddings = None
if opt.encoder_type == "text":
encoder = RelativeTransformerEncoder(opt, embedding_src, None,
opt.encoder_type, language_embeddings=language_embeddings)
if opt.encoder_type == "audio":
# raise NotImplementedError
encoder = RelativeTransformerEncoder(opt, None, None, encoder_type=opt.encoder_type,
language_embeddings=language_embeddings)
generator = nn.ModuleList(generators)
decoder = RelativeTransformerDecoder(opt, embedding_tgt, None, language_embeddings=language_embeddings)
if opt.reconstruct:
rev_decoder = RelativeTransformerDecoder(opt, embedding_src, None, language_embeddings=language_embeddings)
rev_generator = [onmt.modules.base_seq2seq.Generator(opt.model_size, dicts['src'].size(),
fix_norm=opt.fix_norm_output_embedding)]
rev_generator = nn.ModuleList(rev_generator)
else:
rev_decoder = None
rev_generator = None
model = BayesianTransformer(encoder, decoder, generator, rev_decoder, rev_generator, mirror=opt.mirror_loss)
if opt.tie_weights:
print("* Joining the weights of decoder input and output embeddings")
model.tie_weights()
return model
| 4,008
| 37.92233
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/identity.py
|
import torch
from torch import Tensor
import torch.nn as nn
class Identity(torch.nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
Args:
args: any argument (unused)
kwargs: any keyword argument (unused)
Examples::
>>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 20])
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input: Tensor, *args, **kwargs) -> Tensor:
return input
| 665
| 23.666667
| 77
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/checkpoint.py
|
import torch
import warnings
from torch.utils.checkpoint import get_device_states, set_device_states, check_backward_validity
def detach_variable(inputs):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
x = inp.detach()
x.requires_grad = inp.requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, preserve_rng_state, *args):
check_backward_validity(args)
ctx.run_function = run_function
ctx.preserve_rng_state = preserve_rng_state
ctx.had_autocast_in_fwd = torch.is_autocast_enabled()
ctx.input_tensors = list(args)
if preserve_rng_state:
ctx.fwd_cpu_state = torch.get_rng_state()
# Don't eagerly initialize the cuda context by accident.
# (If the user intends that the context is initialized later, within their
# run_function, we SHOULD actually stash the cuda state here. Unfortunately,
# we have no way to anticipate this will happen before we run the function.)
ctx.had_cuda_in_fwd = False
if torch.cuda._initialized:
ctx.had_cuda_in_fwd = True
ctx.fwd_gpu_devices, ctx.fwd_gpu_states = get_device_states(*args)
# ctx.save_for_backward(*args)
with torch.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), please use .backward() if possible")
require_grad_indices = list()
non_grad_indices = list()
for i in range(len(ctx.input_tensors)):
temp = ctx.input_tensors[i]
ctx.input_tensors[i] = temp.detach()
ctx.input_tensors[i].requires_grad = temp.requires_grad # temp.requires_grad
# require_grad_list[i] = temp.requires_grad
if temp.requires_grad:
require_grad_indices.append(i)
else:
non_grad_indices.append(i)
# Stash the surrounding rng state, and mimic the state that was
# present at this time during forward. Restore the surrounding state
# when we're done.
rng_devices = []
if ctx.preserve_rng_state and ctx.had_cuda_in_fwd:
rng_devices = ctx.fwd_gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state):
if ctx.preserve_rng_state:
torch.set_rng_state(ctx.fwd_cpu_state)
if ctx.had_cuda_in_fwd:
set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states)
with torch.enable_grad(), torch.cuda.amp.autocast(ctx.had_autocast_in_fwd):
output_tensors = ctx.run_function(*ctx.input_tensors)
# if isinstance(outputs, torch.Tensor):
# outputs = (outputs,)
# # run backward() with only tensor that requires grad
# outputs_with_grad = []
# args_with_grad = []
# for i in range(len(outputs)):
# if outputs[i].requires_grad:
# outputs_with_grad.append(outputs[i])
# args_with_grad.append(args[i])
# if len(outputs_with_grad) == 0:
# raise RuntimeError(
# "none of output has requires_grad=True,"
# " this checkpoint() is not necessary")
# torch.autograd.backward(outputs_with_grad, args_with_grad)
# grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp
# for inp in detached_inputs)
input_tensors_with_grad = list()
for i in range(len(ctx.input_tensors)):
if i in require_grad_indices:
input_tensors_with_grad.append(ctx.input_tensors[i])
input_grads = torch.autograd.grad(output_tensors, input_tensors_with_grad, output_grads, allow_unused=True)
return_input_grads = list()
j = 0
for i in range(len(ctx.input_tensors)):
if i in require_grad_indices:
return_input_grads.append(input_grads[j])
j = j + 1
else:
return_input_grads.append(None)
return (None, None) + tuple(return_input_grads)
def checkpoint(function, *args, **kwargs):
r"""Checkpoint a model or part of the model
Checkpointing works by trading compute for memory. Rather than storing all
intermediate activations of the entire computation graph for computing
backward, the checkpointed part does **not** save intermediate activations,
and instead recomputes them in backward pass. It can be applied on any part
of a model.
Specifically, in the forward pass, :attr:`function` will run in
:func:`torch.no_grad` manner, i.e., not storing the intermediate
activations. Instead, the forward pass saves the inputs tuple and the
:attr:`function` parameter. In the backwards pass, the saved inputs and
:attr:`function` is retrieved, and the forward pass is computed on
:attr:`function` again, now tracking the intermediate activations, and then
the gradients are calculated using these activation values.
.. warning::
Checkpointing doesn't work with :func:`torch.autograd.grad`, but only
with :func:`torch.autograd.backward`.
.. warning::
If :attr:`function` invocation during backward does anything different
than the one during forward, e.g., due to some global variable, the
checkpointed version won't be equivalent, and unfortunately it can't be
detected.
.. warning::
If checkpointed segment contains tensors detached from the computational
graph by `detach()` or `torch.no_grad()`, the backward pass will raise an
error. This is because `checkpoint` makes all the outputs require
gradients which causes issues when a tensor is defined to have no
gradient in the model. To circumvent this, detach the tensors outside of
the `checkpoint` function.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients. At least one of the outputs needs to have
:code:`requires_grad=True` as well.
Args:
function: describes what to run in the forward pass of the model or
part of the model. It should also know how to handle the inputs
passed as the tuple. For example, in LSTM, if user passes
``(activation, hidden)``, :attr:`function` should correctly use the
first input as ``activation`` and the second input as ``hidden``
preserve_rng_state(bool, optional, default=True): Omit stashing and restoring
the RNG state during each checkpoint.
args: tuple containing inputs to the :attr:`function`
Returns:
Output of running :attr:`function` on :attr:`*args`
"""
# Hack to mix *args with **kwargs in a python 2.7-compliant way
preserve = kwargs.pop('preserve_rng_state', True)
if kwargs:
raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs))
return CheckpointFunction.apply(function, preserve, *args)
| 7,659
| 43.277457
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/pre_post_processing.py
|
import torch
import torch.nn as nn
from .layer_norm import LayerNorm, MultilingualLayerNorm
import onmt
from onmt.modules.dropout import VariationalDropout
from onmt.modules.bottle import Bottle
# from onmt.modules.optimized.dropout_add import fused_dropout_add
class PrePostProcessing(nn.Module):
"""Applies processing to tensors
Args:
d_model: dimension of model
p: dropout probabolity
sequence of processing steps:
n = normalization
d = dropout
a = adding previous input to output (residual)
"""
def __init__(self, d_model, dropout_p, sequence='nda', variational=False, elementwise_affine=True,
multilingual=False, n_languages=1):
super(PrePostProcessing, self).__init__()
self.d_model = d_model
self.dropout_p = dropout_p
self.multilingual = multilingual
self.variational = variational
self.steps = list(sequence)
#
# if onmt.constants.residual_type == 'gated':
# # gated residual
# # initialize k with one
# self.k = nn.Parameter(torch.ones(1))
if 'n' in self.steps:
if not multilingual:
ln = LayerNorm((self.d_model,), elementwise_affine=elementwise_affine)
self.layer_norm = Bottle(ln)
else:
ln = MultilingualLayerNorm((self.d_model,), eps=1e-5, elementwise_affine=True, n_languages=n_languages)
self.layer_norm = ln
if 'd' in self.steps:
if variational:
self.dropout = VariationalDropout(self.dropout_p, batch_first=False)
else:
self.dropout = nn.Dropout(self.dropout_p, inplace=False)
if 'z' in self.steps:
# Rezero residual method
self.g = nn.Parameter(torch.tensor(0.0))
def forward(self, tensor, input_tensor=None, mask=None, factor=None):
"""
:param tensor: input tensor [BxTxH] or [TxBxH (most likely)]
:param input_tensor: previous tensor for residual
:param mask: unused
:param factor: tensor size 1, for multilingual
:return:
"""
output = tensor
i = 0
while i < len(self.steps):
step = self.steps[i]
if step == 'n':
# this cast is needed for O1 and FusedLayerNorm
if self.multilingual:
output = self.layer_norm(output, factor)
output = output
else:
output = self.layer_norm(output)
if step == 'd':
output = self.dropout(output)
if step == 'a':
if input_tensor is not None:
output = output + input_tensor
if step == 'z': # rezero-residual but scaling the output with initially small g
output = output * self.g
if input_tensor is not None:
output = output + input_tensor
i = i + 1
return output
| 3,085
| 36.180723
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/rotary_postional_encodings.py
|
import torch
from torch import nn, einsum
# from einops import rearrange, repeat
class SinusoidalEmbeddings(torch.nn.Module):
def __init__(self, dim, base=10000):
super().__init__()
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
self.seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
# def forward(self, x=None, length=0, timestep=-1):
# """
# :param timestep:
# :param length:
# :param x: [time x bsz x hidden]
# :return:
# """
# # actually this module doesn't care about anything of x except x.size(1)
#
# if x is not None:
# assert length == 0 and timestep == -1
# n = x.shape[0] # time dimension
# elif length > 0:
# assert timestep == -1
# n = length
# elif timestep >= 0:
# n = timestep + 1
#
# t = torch.arange(n, device=self.inv_freq.device).type_as(self.inv_freq)
# sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
# emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
# return emb
def forward(self, x, seq_dim=1):
seq_len = x.shape[seq_dim]
if seq_len != self.seq_len_cached:
self.seq_len_cached = seq_len
t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq)
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
if seq_len == 0:
self.cos_cached = emb.cos()[:, None, :]
self.sin_cached = emb.sin()[:, None, :]
elif seq_len == 1:
self.cos_cached = emb.cos()[None, :, :]
self.sin_cached = emb.sin()[None, :, :]
else:
raise NotImplementedError
return self.cos_cached, self.sin_cached
| 2,003
| 35.436364
| 86
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/lru.py
|
import torch
import torch.nn as nn
import numpy as np
class LRU(nn.Module):
def __init__(self, H, N, reverse=False, r_min=0, r_max=1, max_phase=2 * np.pi):
super().__init__()
"""Initialize parameters of the LRU layer."""
# N: state dimension, H: model dimension
# Initialization of Lambda is complex valued distributed uniformly on ring
# between r_min and r_max, with phase in [0, max_phase].
u1 = torch.rand((N,)) # N
self.nu_log = nn.Parameter(torch.log(-0.5 * torch.log(u1 * (r_max ** 2 - r_min ** 2) + r_min ** 2))) # N
u2 = torch.rand((N,)) # N
self.theta_log = nn.Parameter(torch.log(max_phase * u2)) # N
# Glorot initialized Input/Output projection matrices
B = torch.rand((H, N)) / np.sqrt(2 * H) + 1j * torch.rand((H, N)) / np.sqrt(2 * H) # H x N
self.C = nn.Parameter(torch.rand((N, H)) / np.sqrt(N) + 1j * torch.rand((N, H)) / np.sqrt(N)) # N x H
# Normalization factor
diag_lambda = torch.exp(-torch.exp(self.nu_log) + 1j * torch.exp(self.theta_log)) # N
gamma_log = torch.log(torch.sqrt(1 - torch.abs(diag_lambda) ** 2)) # N
self.B = nn.Parameter(B * gamma_log) # H x N
self.reverse = reverse
def forward(self, u, lenghts):
"""Forward pass of the LRU layer. Output sequence y and input_sequence u are of shape (B, L, H)."""
Lambda = torch.exp(-torch.exp(self.nu_log) + 1j * torch.exp(self.theta_log)) # N
if not self.reverse:
exp = torch.arange(-1, -u.shape[1] - 1, -1, dtype=torch.float32).view(1, -1, 1).expand(u.shape[0], -1,
-1) # B
# x L x 1
exp = exp + lenghts.view(-1, 1, 1) # B x L x 1
exp.clamp_(min=0)
else:
exp = torch.arange(u.shape[1], dtype=torch.float32).view(1, -1, 1).expand(u.shape[0], -1, -1) # B x L x 1
Lambda_exp = Lambda.pow(exp) # B x L x N
# Bu = torch.matmul(u.to(torch.complex32 if u.dtype==torch.float16 else torch.complex64), self.B) # B x L x N
Bu = torch.matmul(u, self.B.real) + 1j * torch.matmul(u, self.B.imag) # B x L x N
prod = Lambda_exp * Bu # B x L x N
x = prod.cumsum(0) # B x L x N
# y = torch.matmul(x, self.C).real # B x L x H
y = torch.matmul(x.real, self.C.real) - torch.matmul(x.imag, self.C.imag) # B x L x H
return y
if __name__ == "__main__":
# import torch_directml
device = "cpu" # torch_directml.device()
B = 4
L = 1000
d_model = 1024
d_hidden = 1024
reverse = False
lengths = torch.randint(1, L, (B,))
layer = LRU(d_model, d_hidden, reverse=reverse).to(device)
seq = torch.randn(B, L, d_model, device=device)
print("START")
seq = layer(seq, lengths)
print(seq.mean(), seq.std())
| 2,938
| 37.168831
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/copy_generator.py
|
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
from onmt.modules.linear import XavierLinear
import math
import onmt
class CopyGenerator(nn.Module):
"""Generator module that additionally considers copying
words directly from the source.
The main idea is that we have an extended "dynamic dictionary".
It contains `|tgt_dict|` words plus an arbitrary number of
additional words introduced by the source sentence.
For each source sentence we have a `src_map` that maps
each source word to an index in `tgt_dict` if it known, or
else to an extra word.
The copy generator is an extended version of the standard
generator that computse three values.
* :math:`p_{softmax}` the standard softmax over `tgt_dict`
* :math:`p(z)` the probability of instead copying a
word from the source, computed using a bernoulli
* :math:`p_{copy}` the probility of copying a word instead.
taken from the attention distribution directly.
The model returns a distribution over the extend dictionary,
computed as
:math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`
.. mermaid::
graph BT
A[input]
S[src_map]
B[softmax]
BB[switch]
C[attn]
D[copy]
O[output]
A --> B
A --> BB
S --> D
C --> D
D --> O
B --> O
BB --> O
Args:
input_size (int): size of input representation
tgt_dict (Vocab): output target dictionary
"""
def __init__(self, hidden_size, output_size, fix_norm=False):
super(CopyGenerator, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.must_softmax = True
# this constant is used to inverse the softmax function
self.c = 0.1712209
# gate for linear
self.linear_copy = XavierLinear(hidden_size, 1)
# we need a linear projector for attention
# self.linear_attn = XavierLinear(hidden_size, hidden_size)
self.linear = nn.Linear(hidden_size, output_size)
stdv = 1. / math.sqrt(self.linear.weight.size(1))
torch.nn.init.uniform_(self.linear.weight, -stdv, stdv)
self.fix_norm = fix_norm
def forward(self, model_outputs, log_softmax=True):
"""
"""
input = model_outputs['hidden']
context = model_outputs['context']
src = model_outputs['src']
fix_norm = self.fix_norm
tlen, bsz, hsize = input.size()
#
# batch_by_tlen, _ = input.size()
# batch_by_tlen_, src_len = attn.size()
# src_len_, batch, vocab_size = src_map.size()
""" Probability of copying p(z=1) batch. """
copy_prob = torch.sigmoid(self.linear_copy(input)) # T x B x 1
""" probabilities from the model output """
if not fix_norm:
logits = self.linear(input)
else:
normalized_weights = F.normalize(self.linear.weight, dim=-1)
normalized_bias = self.linear.bias
logits = F.linear(input, normalized_weights, normalized_bias)
prob = F.softmax(logits.float(), dim=-1, dtype=torch.float32)
p_g = torch.mul(prob, 1 - copy_prob) # tlen x B x V
""" probabilities from copy """
query = input.transpose(0, 1)
keys = context.transpose(0, 1) # B x slen x H
attn_score = torch.bmm(query, keys.transpose(1, 2)) # B x tlen x slen
src_mask = src.eq(onmt.constants.PAD).unsqueeze(1) # B x s_len
attn_score = attn_score.float().masked_fill_(src_mask, -float('inf')).type_as(attn_score)
attns = F.softmax(attn_score.float(), dim=-1) # B x tlen x slen
p_c = torch.mul(attns.transpose(0, 1), copy_prob) # tlen x B x slen
src_indices = src.unsqueeze(0).expand_as(p_c)
# add the probabilities into the positions directly
p_g.scatter_add_(2, src_indices, p_c)
# p_c = torch.bmm(mul_attn, src)
# mul_attn = torch.mul(attn, copy_prob.expand_as(attn)).view(-1, batch, slen) # tlen_, batch, src_len
# p_c = torch.bmm(mul_attn.transpose(0, 1),
# src_map.transpose(0, 1)).transpose(0, 1) # tlen, batch, vocab_size
# revert the softmax function to get logits
if log_softmax:
output = torch.log(p_g)
else:
output = p_g
model_outputs['logits'] = output
model_outputs['softmaxed'] = self.must_softmax
# the logits is then used in the normal loss function
return model_outputs
| 4,676
| 33.389706
| 109
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/bottle.py
|
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
"""
Class Bottle:
When working with masked tensors, bottles extract the "true" tensors
using masks to avoid unnecessary computation
"""
class Bottle(nn.Module):
def __init__(self, function):
super(Bottle, self).__init__()
self.function = function
def forward(self, input, mask=None):
"""
input: batch x time x hidden
mask: batch x time
currently its a no-op function to be fully compatible with fp16
"""
# revert to no-op for variational dropout
# print(input.type(), self.function.weight.data.type())
return self.function(input)
# # remember the original shape
# original_shape = input.size()
#
# # flattned the tensor to 2D
# flattened_input = input.contiguous().view(-1, input.size(-1))
# flattened_size = flattened_input.size()
#
#
# dim = original_shape[-1]
#
# if mask is not None:
# flattened_mask = mask.view(-1)
#
# non_pad_indices = torch.nonzero(flattened_mask).squeeze(1)
#
# clean_input = flattened_input.index_select(0, non_pad_indices )
# else:
# clean_input = flattened_input
#
# # forward pass on the clean input only
# clean_output = self.function(clean_input)
#
# if mask is not None:
# # after that, scatter the output (the position where we don't scatter are masked zeros anyways)
# flattened_output = Variable(flattened_input.data.new(*flattened_size[:-1], clean_output.size(-1)).zero_())
# flattened_output.index_copy_(0, non_pad_indices, clean_output)
# else:
# flattened_output = clean_output
#
# # restore the tensor original size
# output = flattened_output.view(*original_shape[:-1], flattened_output.size(-1))
# return output
| 2,044
| 30.953125
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/base_seq2seq.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt, math
from onmt.modules.optimized.linear import Linear, linear_function
class Generator(nn.Module):
def __init__(self, hidden_size, output_size, fix_norm=False):
super(Generator, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.linear = nn.Linear(hidden_size, output_size)
self.fix_norm = fix_norm
self.must_softmax = False
stdv = 1. / math.sqrt(self.linear.weight.size(1))
torch.nn.init.uniform_(self.linear.weight, -stdv, stdv)
self.linear.bias.data.zero_()
def forward(self, output_dicts):
"""
:param output_dicts: dictionary contains the outputs from the decoder
:return: logits (the elements before softmax)
"""
input = output_dicts['hidden']
fix_norm = self.fix_norm
target_mask = output_dicts['target_mask']
if not fix_norm:
logits = self.linear(input)
else:
normalized_weights = F.normalize(self.linear.weight, dim=-1)
normalized_bias = self.linear.bias
logits = F.linear(input, normalized_weights, normalized_bias)
# softmax will be done at the loss function
# output = F.log_softmax(logits, dim=-1, dtype=torch.float32)
output_dicts['logits'] = logits
output_dicts['softmaxed'] = self.must_softmax
return output_dicts
class NMTModel(nn.Module):
def __init__(self, encoder, decoder, generator=None, rec_decoder=None, rec_generator=None,
mirror=False, ctc=False):
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
self.rec_decoder = rec_decoder
self.rec_generator = rec_generator
self.ctc = ctc
if self.rec_decoder:
self.rec_decoder.word_lut.weight = self.encoder.word_lut.weight
self.reconstruct = True
else:
self.reconstruct = False
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
self.generator[0].linear.weight = self.decoder.word_lut.weight
def share_enc_dec_embedding(self):
self.encoder.word_lut.weight = self.decoder.word_lut.weight
def mark_pretrained(self):
self.encoder.mark_pretrained()
self.decoder.mark_pretrained()
def load_state_dict(self, state_dict, strict=False):
"""
override this method to have back-compatibility
"""
def condition(param_name):
# don't load these buffers (more like a bug)
if 'positional_encoder' in param_name:
return False
if 'time_transformer' in param_name:
if self.encoder is not None:
if getattr(self.encoder, "enc_pretrained_model", None) or self.encoder.time == 'positional_encoding':
return False
if param_name == 'decoder.mask':
return False
if param_name == 'decoder.r_w_bias' or param_name == 'decoder.r_r_bias':
if param_name in model_dict:
return True
return False
return True
# restore old generated if necessary for loading
if "generator.linear.weight" in state_dict and type(self.generator) is nn.ModuleList:
self.generator = self.generator[0]
model_dict = self.state_dict()
# only load the filtered parameters
filtered = {k: v for k, v in state_dict.items() if condition(k)}
for k, v in model_dict.items():
if k not in filtered:
filtered[k] = v
# removing the keys in filtered but not in model dict
if strict:
removed_keys = list()
for k, v in filtered.items():
if k not in model_dict:
removed_keys.append(k)
for k in removed_keys:
filtered.pop(k)
# ctc weights can be ignored:
if 'ctc_linear.weight' not in model_dict and 'ctc_linear.weight' in filtered:
filtered.pop('ctc_linear.weight')
if 'ctc_linear.bias' in filtered:
filtered.pop('ctc_linear.bias')
super().load_state_dict(filtered)
# in case using multiple generators
if type(self.generator) is not nn.ModuleList:
self.generator = nn.ModuleList([self.generator])
def convert_autograd(self):
def attempt_to_convert(m):
if hasattr(m, 'convert_autograd'):
m.convert_autograd()
for n, ch in m.named_children():
attempt_to_convert(ch)
attempt_to_convert(self.encoder)
attempt_to_convert(self.decoder)
class Reconstructor(nn.Module):
"""
This class is currently unused, but can be used to learn to reconstruct from the hidden states
"""
def __init__(self, decoder, generator=None):
super(Reconstructor, self).__init__()
self.decoder = decoder
self.generator = generator
class DecoderState(object):
"""Interface for grouping together the current state of a recurrent
decoder. In the simplest case just represents the hidden state of
the model. But can also be used for implementing various forms of
input_feeding and non-recurrent models.
Modules need to implement this to utilize beam search decoding.
"""
def update_beam(self, beam, b, remaining_sents, idx):
raise NotImplementedError
def prune_complete_beam(self, active_idx, remaining_sents):
raise NotImplementedError
| 5,861
| 32.497143
| 121
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/loss.py
|
import math
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import onmt
import onmt.modules
from onmt.utils import flip
def tiny_value_of_dtype(dtype: torch.dtype):
"""
Returns a moderately tiny value for a given PyTorch data type that is used to avoid numerical
issues such as division by zero.
This is different from `info_value_of_dtype(dtype).tiny` because it causes some NaN bugs.
Only supports floating point dtypes.
"""
if not dtype.is_floating_point:
raise TypeError("Only supports floating point dtypes.")
if dtype == torch.float or dtype == torch.double:
return 1e-13
elif dtype == torch.half:
return 1e-4
else:
raise TypeError("Does not support dtype " + str(dtype))
class CrossEntropyLossBase(_Loss):
"""
Class for managing efficient loss computation.
loss computations
Users can implement their own loss computation strategy by making
subclass of this one.
Args:
output_size: number of words in vocabulary()
"""
def __init__(self, output_size, label_smoothing, padding_idx=0, **kwargs):
super(CrossEntropyLossBase, self).__init__()
self.output_size = output_size
self.padding_idx = padding_idx
self.smoothing_value = label_smoothing / (output_size - 2) # pad and <s>
self.confidence = 1.0 - label_smoothing
self.label_smoothing = label_smoothing
# use apex fast entropy implementation
self.fast_xentropy = False
try:
import xentropy_cuda
from onmt.modules.optimized.softmax_xentropy import SoftmaxCrossEntropyLoss
self.softmax_xentropy = SoftmaxCrossEntropyLoss.apply
self.fast_xentropy = True
except (ModuleNotFoundError, AttributeError):
# print("[INFO] Fast xentropy cannot be found. Using PyTorch/Python based cross entropy loss.")
self.softmax_xentropy = None
self.fast_xentropy = False
def _compute_loss(self, logits, targets, vocab_mask=None, softmaxed=False):
"""
:param logits: T x B x V or B x T x V tensor (output of decoder)
:param targets: T x B x V or B x T target tensor
:param vocab_mask V: bool tensor or None
:return:
"""
label_smoothing = self.label_smoothing if self.training else 0.0
gtruth = targets.view(-1) # B*T
logits = logits.view(-1, logits.size(-1)) # B*T x V
eps_i = self.smoothing_value if self.training else 0.0
fast_entropy = self.fast_xentropy and not softmaxed
go_to_slow_code = True
if not softmaxed:
# Try the fastest softmax + loglikelihood implementation first
if fast_entropy:
half_to_float = (logits.dtype == torch.half)
loss = self.softmax_xentropy(logits, gtruth, label_smoothing, self.padding_idx, half_to_float)
# We need to return the loss data without masking bad positions
# Otherwise the values from "low" validation perplexities cannot be trusted
with torch.no_grad():
loss_data = loss.sum().data.item()
bad_loss = torch.logical_or(torch.isinf(loss), torch.isnan(loss))
if bad_loss.any():
loss.masked_fill_(bad_loss, 0)
loss = loss.sum()
else:
try:
# Otherwise backoff to Pytorch (1.10+)
loss = F.cross_entropy(logits.float(), gtruth, weight=None,
ignore_index=self.padding_idx, reduction='none',
label_smoothing=label_smoothing)
with torch.no_grad():
loss_data = loss.sum().data.item()
bad_loss = torch.logical_or(torch.isinf(loss), torch.isnan(loss))
if bad_loss.any():
loss.masked_fill_(bad_loss, 0)
loss = loss.sum()
except AttributeError:
go_to_slow_code = True
else:
go_to_slow_code = True
# Then backoff to manual python code
if go_to_slow_code:
if not softmaxed:
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
else:
lprobs = logits
non_pad_mask = gtruth.ne(self.padding_idx)
nll_loss = -lprobs.gather(1, gtruth.unsqueeze(1))[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask] if eps_i > 0 else None
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum() if eps_i > 0 else None
loss = (1. - label_smoothing) * nll_loss + eps_i * smooth_loss if eps_i > 0 else nll_loss
loss_data = loss.data.item()
return loss, loss_data
def forward(self, model_outputs, targets, hiddens, **kwargs):
return NotImplementedError
class NMTLossFunc(CrossEntropyLossBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, hidden_size, output_size, label_smoothing, mirror=False, padding_idx=0):
"""
:param hidden_size:
:param output_size:
:param label_smoothing:
:param mirror:
:param padding_idx:
"""
super(NMTLossFunc, self).__init__(output_size, label_smoothing, padding_idx=padding_idx)
self.hidden_size = hidden_size
self.output_size = output_size
self.padding_idx = padding_idx
self.smoothing_value = label_smoothing / output_size
self.confidence = 1.0 - label_smoothing
self.label_smoothing = label_smoothing
self.mirror = mirror
self.extra_modules = nn.ModuleDict()
def set_label_smoothing(self, new_value):
self.label_smoothing = new_value
self.confidence = 1.0 - self.label_smoothing
self.smoothing_value = self.label_smoothing / self.output_size
def add_loss_function(self, loss_function, name):
self.extra_modules[name] = loss_function
def get_loss_function(self, name):
return self.extra_modules[name] if name in self.extra_modules else None
def forward(self, model_outputs, targets, model=None, vocab_mask=None, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
:param vocab_mask:
:param model_outputs: a dictionary containing the predictive output from the model.
time x batch x vocab_size
or time x batch x hidden_size
:param targets: the validate target to compare output with. time x batch
:param model: passing the model in for necessary components
"""
softmaxed = model_outputs['softmaxed']
outputs = model_outputs['hidden']
# the model no longer outputs logprobs, only logits
logits = model_outputs['logprobs']
mirror = self.mirror
targets_ = targets.view(-1)
non_pad_mask = torch.nonzero(targets_.ne(self.padding_idx)).squeeze(1)
labels = targets_.index_select(0, non_pad_mask)
logits = logits.view(-1, logits.size(-1)).index_select(0, non_pad_mask)
with torch.no_grad():
# don't need softmax, just take argmax on unnormalized probabilities
preds = torch.argmax(logits, dim=1)
correct = (preds == labels).sum().item()
total = labels.numel()
if mirror:
reverse_outputs = model_outputs['reverse_hidden']
reverse_logits = model_outputs['reverse_logprobs']
# reverse_targets = torch.flip(targets, (0, )) # reverse the targets in time dimension
reverse_targets = model_outputs['reverse_target']
alpha = 1.0
loss, loss_data = self._compute_loss(logits, labels, vocab_mask=vocab_mask, softmaxed=softmaxed)
total_loss = loss
if mirror:
reverse_loss, rev_loss_data = self._compute_loss(reverse_logits, reverse_targets, softmaxed=softmaxed)
# flip the reverse outputs so they have the same thing
reverse_outputs = torch.flip(reverse_outputs, (0,))
lengths = model_outputs['target_lengths']
mirror_loss = 0
# forward: 1 2 3 4 5 6 7 8
# backward: 9 8 7 6 5 4 3 2 > 2 3 4 5 6 7 8 9
# we want 1 == 3, 2 == 4, 5 == 7 etc because they predict the same output word
fwd_mask = model_outputs['tgt_mask'].new(outputs.size(0), outputs.size(1)).fill_(0)
bwd_mask = model_outputs['tgt_mask'].new(outputs.size(0), outputs.size(1)).fill_(0)
for (b, length) in enumerate(lengths):
L = length - 1
fwd_mask[:L - 1, b].fill_(1)
bwd_mask[1:L, b].fill_(1)
fwd_mask = fwd_mask.view(-1)
fwd_mask = torch.nonzero(fwd_mask).squeeze(1)
fwd_hiddens = outputs.contiguous().view(-1, outputs.size(-1))
fwd_hiddens = fwd_hiddens.index_select(0, fwd_mask)
bwd_mask = bwd_mask.view(-1)
bwd_mask = torch.nonzero(bwd_mask).squeeze(1)
bwd_hiddens = reverse_outputs.contiguous().view(-1, reverse_outputs.size(-1))
bwd_hiddens = bwd_hiddens.index_select(0, bwd_mask)
mirror_loss_2 = F.mse_loss(fwd_hiddens, bwd_hiddens, reduction='sum')
mirror_loss = mirror_loss_2.div(outputs.size(-1))
total_loss = total_loss + reverse_loss + alpha * mirror_loss
rev_loss = reverse_loss
else:
mirror_loss = None
rev_loss = None
rev_loss_data = None
# if we also use reconstruction:
if model_outputs['reconstruct']:
rec_logits = model_outputs['rec_logprobs']
rec_targets = model_outputs['rec_target']
rec_loss, rec_loss_data = self._compute_loss(rec_logits, rec_targets, softmaxed=softmaxed)
total_loss = total_loss + rec_loss
else:
rec_loss, rec_loss_data = None, None
output_dict = {"loss": loss, "data": loss_data,
"rev_loss": rev_loss, "rev_loss_data": rev_loss_data, "mirror_loss": mirror_loss,
"rec_loss": rec_loss, "rec_loss_data": rec_loss_data,
"correct": correct, "total": total}
# return loss, loss_data, None
return output_dict
class MPCLoss(_Loss):
def forward(self, model_outputs, **kwargs):
mpc_rec = model_outputs['mpc'] # T x B x F
original_source = model_outputs['original_source'] # T x B x F
masked_positions = model_outputs['masked_positions'] # T x B
mask = model_outputs['src_mask']
mask = mask.squeeze(1).transpose(0, 1)
# because mask is input.eq(pad) which means the pad positions are 1
# reverse the mask so that the correct positions are 1
flattened_mask = ~mask.view(-1)
# get the non-zero positions and index select
non_pad_indices = torch.nonzero(flattened_mask).squeeze(1)
clean_rec = mpc_rec.view(-1, mpc_rec.size(-1)).index_select(0, non_pad_indices)
clean_source = original_source.view(-1, original_source.size(-1)).index_select(0, non_pad_indices)
clean_masked_positions = masked_positions.view(-1).index_select(0, non_pad_indices)
clean_masked_positions = torch.nonzero(clean_masked_positions).squeeze(1)
# print(clean_masked_positions)
#
# # next, choose the masked positions
mpc_rec = clean_rec.index_select(0, clean_masked_positions)
source = clean_source.index_select(0, clean_masked_positions)
loss = F.l1_loss(mpc_rec.float(), source.float(), reduction='sum')
loss_data = loss.item()
output_dict = {"loss": loss, "data": loss_data, "numel": mpc_rec.size(0)}
return output_dict
class ClassifierLoss(CrossEntropyLossBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, hidden_size, output_size, label_smoothing=0.0):
"""
:param hidden_size:
:param output_size:
:param label_smoothing:
:param mirror:
:param fast_xentropy:
"""
self.label_smoothing = label_smoothing
super(ClassifierLoss, self).__init__(output_size, label_smoothing)
self.hidden_size = hidden_size
self.output_size = output_size
self.padding_idx = -9999999999 # don't pad
def forward(self, model_outputs, targets, model=None,
granularity="average", **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
:param granularity:
:param model_outputs: a dictionary containing the predictive output from the model.
time x batch x vocab_size
or time x batch x hidden_size
:param targets: the validate target to compare output with. time x batch
:param model: passing the model in for necessary components
"""
softmaxed = model_outputs['softmaxed']
# the model no longer outputs logprobs, only logits
logits = model_outputs['logprobs']
# assert targets.size(0) == 1
# targets should be [1 x batch]
t = logits.size(0)
# targets = targets.repeat(t, 1) # --> time x batch
# print(logits.size())
mask = model_outputs['src_mask'] # B x T
mask = mask.squeeze(1).transpose(0, 1).contiguous() # T x B
# because mask is input.eq(pad) which means the pad positions are 1
# reverse the mask so that the correct positions are 1
# flattened_mask = ~mask.view(-1)
# get the non-zero positions and index select
# non_pad_indices = torch.nonzero(flattened_mask).squeeze(1)
if granularity == 'average':
mask = mask
logits.masked_fill_(mask.bool().unsqueeze(-1), 0)
lengths = (1 - mask.long()).sum(dim=0, keepdim=False)
clean_logits = logits.sum(dim=0, keepdim=False).div(lengths.unsqueeze(-1))
clean_targets = targets.squeeze(0) # --> batch
else:
raise NotImplementedError
# clean_targets = targets.view(-1).index_select(0, non_pad_indices)
# clean_logits = logits.view(-1, logits.size(-1)).index_select(0, non_pad_indices)
#
# # loss, loss_data = self._compute_loss(clean_logits, clean_targets, softmaxed=softmaxed)
loss = F.cross_entropy(clean_logits.float(), clean_targets, weight=None,
ignore_index=-100, reduction='sum', label_smoothing=self.label_smoothing)
loss_data = loss.item()
#
predictions = F.log_softmax(clean_logits.float()).topk(1, dim=1)[1].squeeze(1)
#
n_correct = (clean_targets.eq(predictions.long())).sum()
output_dict = {"loss": loss, "data": loss_data, "numel": clean_targets.numel(),
"n_correct": n_correct}
# return loss, loss_data, None
return output_dict
class CTCLossFunc(_Loss):
"""
Standard NMT Loss Computation.
"""
def __init__(self, output_size, label_smoothing=0.0):
super(CTCLossFunc, self).__init__(output_size)
self.ctc = nn.CTCLoss(output_size - 1, reduction='sum')
def forward(self, model_outputs, targets, model=None, backward=False, normalizer=1, **kwargs):
"""
Args:
model_outputs: a dictionary containing the predictive output from the model.
time x batch x vocab_size
or time x batch x hidden_size
targets: the validate target to compare output with. time x batch
model: passing the model in for necessary components
backward: to control if we should perform backward pass (to free the graph) or not
normalizer: the denominator of the loss before backward
**kwargs(optional): additional info for computing loss.
"""
raise NotImplementedError
# outputs = model_outputs['encoder']
# original_outputs = outputs
# batch_size = outputs.size(1)
# h_size = outputs.size(-1)
#
# source_mask = model_outputs['src_mask']
# target_mask = model_outputs['tgt_mask']
#
# target_length = target_mask.sum(0)
# if source_mask.dim() == 3:
# input_length = (1-source_mask).squeeze(1).sum(1)
# else:
# input_length = (1-source_mask).sum(1)
#
# # remove elements with more targets than input
# comp = torch.lt(target_length,input_length)
# target_length = target_length.index_select(0,comp.nonzero().squeeze())
# input_length = input_length.index_select(0,comp.nonzero().squeeze())
# outputs = outputs.index_select(1,comp.nonzero().squeeze())
# targets = targets.index_select(1,comp.nonzero().squeeze())
#
# # flatten the output
# size = outputs.size()
# outputs = outputs.contiguous().view(-1, outputs.size(-1))
#
# clean_input = outputs
#
# # dists = generator(outputs)
# if model is not None:
# # the 'second' generator is the encoder softmax one
# dists = model.generator[1](clean_input)
# else:
# dists = clean_input
#
# # reshape back to 3D for CTC
# dists = dists.view(size[0], size[1], -1)
#
# loss = self.ctc(dists,targets.transpose(0,1), input_length, target_length)
#
# loss_data = loss.data.item()
#
# # if not numpy.isfinite(loss_data):
# # print("Input:", input_length)
# # print("Target:", target_length)
# # print("Compare:", comp)
# # print("Selected:", comp.nonzero().squeeze().size())
# # loss = torch.zeros_like(loss)
# # loss_data = loss.data.item()
#
# if backward:
# loss.div(normalizer).backward()
#
# output_dict = {"loss": loss, "data": loss_data}
# return output_dict
# return loss,loss_data, None
class NMTAndCTCLossFunc(_Loss):
"""
Standard NMT Loss Computation.
"""
def __init__(self, output_size, label_smoothing=0.0, ctc_weight=0.0):
super(NMTAndCTCLossFunc, self).__init__(output_size)
self.ctc_weight = ctc_weight
self.ce_loss = NMTLossFunc(output_size, label_smoothing)
self.ctc_loss = CTCLossFunc(output_size + 1, label_smoothing)
def forward(self, model_outputs, targets, model=None, backward=False, normalizer=1, **kwargs):
"""
Args:
model_outputs: a dictionary containing the predictive output from the model.
time x batch x vocab_size
or time x batch x hidden_size
targets: the validate target to compare output with. time x batch
model: passing the model in for necessary components
backward: to control if we should perform backward pass (to free the graph) or not
normalizer: the denominator of the loss before backward
**kwargs(optional): additional info for computing loss.
"""
ce_loss = self.ce_loss(model_outputs, targets, model, False, normalizer)
ctc_loss = self.ctc_loss(model_outputs, targets, model, False, normalizer)
loss = self.ctc_weight * ctc_loss['loss'] + (1 - self.ctc_weight) * ce_loss['loss']
loss_data = self.ctc_weight * ctc_loss['data'] + (1 - self.ctc_weight) * ce_loss['data']
if not numpy.isfinite(ctc_loss['data']):
print("CTC_Loss:", ctc_loss['data'])
print("NMT_Loss:", ce_loss['data'])
print("Loss:", loss_data)
exit()
if backward:
loss.div(normalizer).backward()
output_dict = {"loss": loss, "data": loss_data}
return output_dict
def cuda(self):
self.ce_loss = self.ce_loss.cuda()
self.ctc_loss = self.ctc_loss.cuda()
return self
class FusionLoss(CrossEntropyLossBase):
def forward(self, model_outputs, targets, model=None, backward=False, normalizer=1, **kwargs):
"""
Args:
model_outputs: a dictionary containing the predictive output from the model.
time x batch x vocab_size
or time x batch x hidden_size
targets: the validate target to compare output with. time x batch
model: passing the model in for necessary components
backward: to control if we should perform backward pass (to free the graph) or not
normalizer: the denominator of the loss before backward
**kwargs(optional): additional info for computing loss.
"""
# in this implementation, the PRENORM algorithm is used
tm_outputs = model_outputs['tm']['hidden']
lm_outputs = model_outputs['lm']['hidden']
mask = model_outputs['tgt_mask']
# flatten the output
tm_outputs = tm_outputs.contiguous().view(-1, tm_outputs.size(-1))
lm_outputs = lm_outputs.contiguous().view(-1, lm_outputs.size(-1))
targets = targets.view(-1)
if mask is not None:
""" We remove all positions with PAD """
flattened_mask = mask.view(-1)
non_pad_indices = torch.nonzero(flattened_mask).squeeze(1)
clean_tm_input = tm_outputs.index_select(0, non_pad_indices)
clean_lm_input = lm_outputs.index_select(0, non_pad_indices)
clean_targets = targets.index_select(0, non_pad_indices)
else:
clean_tm_input = tm_outputs
clean_lm_input = lm_outputs
clean_targets = targets
if model is not None:
# the 'first' generator is the decoder softmax one
# PRENORM algorithm from
# https://arxiv.org/pdf/1809.00125.pdf
# Simple Fusion: Return of the Language Model
tm_logits = model.tm_model.generator[0](clean_tm_input, log_softmax=False)
with torch.no_grad():
log_lm = model.lm_model.generator[0](clean_lm_input, log_softmax=True)
dists = F.log_softmax(tm_logits + log_lm, dim=-1)
# # POSTNORM algorithm
# tm_logits = model.tm_model.generator[0](clean_tm_input, log_softmax=False)
#
# with torch.no_grad():
# lm_logits = model.lm_model.generator[0](clean_lm_input, log_softmax=False)
#
# dists = F.log_softmax(F.softmax(tm_logits, dim=-1) * F.softmax(lm_logits, dim=-1), dim=-1)
else:
raise NotImplementedError
loss, loss_data = self._compute_loss(dists, clean_targets)
if backward:
loss.div(normalizer).backward()
output_dict = {"loss": loss, "data": loss_data}
return output_dict
| 23,600
| 38.400668
| 114
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/convolution.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
class Conv2dSubsampling(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.0):
"""
:param input_dim: the log mel feature (normally 40)
:param output_dim: network size (512)
:param dropout: dropout rate
"""
super(Conv2dSubsampling, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
# first conv is nn.Conv2d(1, output_dim, 3, 2)
# secnd conv is nn.Conv2d(output_dim, odin, 3, 2)
self.in_conv_weight = nn.Parameter(torch.Tensor(output_dim, 1, 3, 3))
self.in_conv_bias = nn.Parameter(torch.Tensor(output_dim))
self.in_stride = 2
self.out_conv_weight = nn.Parameter(torch.Tensor(output_dim, output_dim, 3, 3))
self.out_conv_bias = nn.Parameter(torch.Tensor(output_dim))
self.out_stride = 2
cnn_feature_size = output_dim * (((input_dim - 1) // 2 - 1) // 2)
self.out_weight = nn.Parameter(torch.Tensor(output_dim, cnn_feature_size))
self.out_bias = nn.Parameter(torch.Tensor(output_dim))
self.dropout = dropout
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.in_conv_weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.out_conv_weight, a=math.sqrt(5))
fan_in, _ = init._calculate_fan_in_and_fan_out(self.in_conv_weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.in_conv_bias, -bound, bound)
fan_in, _ = init._calculate_fan_in_and_fan_out(self.out_conv_weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.out_conv_bias, -bound, bound)
std_ = math.sqrt(2.0 / (self.output_dim + self.output_dim))
nn.init.normal_(self.out_weight, 0.0, std_)
nn.init.constant_(self.out_bias, 0.)
return
def forward(self, input, input_mask):
"""
:param input: [bsz x seq_len x input_size]
:param input_mask: [bsz x seq_len]
:return:
"""
input = input.unsqueeze(1) # [bsz x 1 x seq_len x input_size]
# padding = 0, dilation = 1, groups = 1
input = F.conv2d(input, self.in_conv_weight, self.in_conv_bias, self.in_stride, 0, 1, 1)
input = F.relu(input)
input = F.conv2d(input, self.out_conv_weight, self.out_conv_bias, self.out_stride, 0, 1, 1)
input = F.relu(input)
b, c, t, f = input.size()
input = input.transpose(1, 2).contiguous().view(b, t, c * f)
input = F.linear(input, self.out_weight, self.out_bias)
# input = F.dropout(input, p=self.dropout, training=self.training)
mask = input_mask[:, :-2:2][:, :-2:2]
return input, mask
class ConformerConvBlock(nn.Module):
def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
super(ConformerConvBlock, self).__init__()
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(channels, 2*channels, kernel_size=1, stride=1, padding=0, bias=bias)
self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1,
padding=(kernel_size - 1) // 2, groups=channels, bias=bias)
self.batch_norm = nn.BatchNorm1d(channels)
self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias)
self.activation = activation
# self.in_pointwise_weight = nn.Conv1d(channels, 2*channels, kernel_size=1, stride=1, padding=0, bias=False)
# self.in_pointwise_bias = nn.Parameter(torch.Tensor(2 * channels))
#
# self.depthwise_weight = nn.Parameter(torch.Tensor(channels, channels // channels, kernel_size))
# self.depthwise_bias = nn.Parameter(torch.Tensor(channels))
# self.padding = (kernel_size - 1) // 2
# self.groups = channels
#
# self.norm = nn.BatchNorm1d(channels)
# self.out_pointwise_weight = nn.Parameter(torch.Tensor(channels, channels, 1))
# self.out_pointwise_bias = nn.Parameter(torch.Tensor(channels))
#
# self.activation = activation
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.pointwise_conv1.weight, nonlinearity='relu')
nn.init.kaiming_normal_(self.depthwise_conv.weight, nonlinearity='relu')
nn.init.kaiming_normal_(self.pointwise_conv2.weight, nonlinearity='relu')
nn.init.constant_(self.pointwise_conv1.bias, 0)
nn.init.constant_(self.pointwise_conv2.bias, 0)
nn.init.constant_(self.depthwise_conv.bias, 0)
# nn.init.kaiming_uniform_(self.in_pointwise_weight, a=math.sqrt(5))
# nn.init.kaiming_uniform_(self.depthwise_weight, a=math.sqrt(5))
# nn.init.kaiming_uniform_(self.out_pointwise_weight, a=math.sqrt(5))
#
# fan_in, _ = init._calculate_fan_in_and_fan_out(self.in_pointwise_weight)
# bound = 1 / math.sqrt(fan_in)
# init.uniform_(self.in_pointwise_bias, -bound, bound)
#
# fan_in, _ = init._calculate_fan_in_and_fan_out(self.depthwise_weight)
# bound = 1 / math.sqrt(fan_in)
# init.uniform_(self.depthwise_bias, -bound, bound)
#
# fan_in, _ = init._calculate_fan_in_and_fan_out(self.out_pointwise_weight)
# bound = 1 / math.sqrt(fan_in)
# init.uniform_(self.out_pointwise_bias, -bound, bound)
def forward(self, x, pad_mask=None):
"""
:param pad_mask: [seq_len x bsz] indicating which element is correct
(this should be the same with the attention mask (pad=1, unpad=0)
:param x: [seq_len x bsz x hidden_size]
:return:
"""
x = x.transpose(0, 1).transpose(1, 2) # to [bsz x hidden_size x seq_len]
# pointwise conv does not need to mask because its elementwise projection
x = self.pointwise_conv1(x)
x = F.glu(x, dim=1)
# if pad_mask is not None:
# pad_mask = pad_mask.transpose(0, 1).transpose(1, 2)
# # print(x.size(), pad_mask.size())
# x = x.masked_fill_(pad_mask, 0)
x = self.depthwise_conv(x)
x = self.activation(self.batch_norm(x))
x = self.pointwise_conv2(x)
# x = F.conv1d(x, self.in_pointwise_weight, self.in_pointwise_bias, 1, 0, 1, 1)
# x = F.glu(x, dim=1)
#
# x = F.conv1d(x, self.depthwise_weight, self.depthwise_bias, 1, self.padding, 1, self.groups)
# x = self.activation(x)
#
# x = F.conv1d(x, self.out_pointwise_weight, self.out_pointwise_bias, 1, 0, 1, 1)
x = x.transpose(1, 2).transpose(0, 1) # back to [seq_len x bsz x hidden_size]
return x
if __name__ == "__main__":
bsz = 160
seq_len = 1000
input_size = 48
output_size = 128
kernel = 31
subsampler = Conv2dSubsampling(input_size, output_size)
subsampler = subsampler.cuda()
conv = ConformerConvBlock(output_size, kernel)
conv = conv.cuda()
input = torch.randn(seq_len, bsz, input_size)
mask = torch.randn(bsz, seq_len)
input = input.cuda()
mask = mask.cuda()
input, mask = subsampler(input.transpose(0, 1), mask)
print(input.size())
print(mask.size())
output = conv(input.transpose(0, 1))
print(output.size())
| 7,447
| 35.509804
| 116
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/test_layer_norm.py
|
import unittest
import sys
import os
import numpy as np
import torch
#
# import fast_layer_norm as fln
# from apex.contrib.layer_norm.layer_norm import FastLayerNorm
import fast_layer_norm_cuda as fln
from layer_norm import LayerNorm
class GPUTimer:
def __init__(self, stream):
self.start_ = torch.cuda.Event(enable_timing=True)
self.stop_ = torch.cuda.Event(enable_timing=True)
self.stream_ = stream
def start(self):
self.stream_.record_event(self.start_)
def stop(self):
self.stream_.record_event(self.stop_)
def sync(self):
self.stream_.synchronize()
def millis(self):
return self.start_.elapsed_time(self.stop_)
def size_in_bytes(t):
return torch.numel(t) * t.element_size()
def metrics(y_ref, y, epsilon=1e-6):
y_ref = y_ref.float()
y = y.float()
relerr, mse = (
(y_ref - y).abs().sum() / (y_ref.abs().sum() + epsilon),
(y_ref - y).square().mean(),
)
return relerr.item(), mse.item()
device = torch.device("cuda")
fp32 = torch.float32
fp16 = torch.float16
bf16 = torch.bfloat16
def backward_(dz, x, mu, rs, gamma):
wtype = gamma.dtype
itype = x.dtype
otype = dz.dtype
ctype = mu.dtype
mu = mu.unsqueeze(1)
rs = rs.unsqueeze(1)
hidden_size = gamma.numel()
y = rs * (x.to(ctype) - mu)
dbeta = dz.view(-1, hidden_size).sum(0, dtype=ctype)
dgamma = (dz * y).view(-1, hidden_size).sum(0, dtype=ctype)
dy = dz.view(-1, hidden_size).to(ctype) * gamma.unsqueeze(0).to(ctype)
mdy = dy.mean(1, keepdim=True, dtype=ctype)
mdyy = (dy * y).mean(1, keepdim=True, dtype=ctype)
dx = rs * (dy - mdyy * y - mdy)
return dx.to(itype), dgamma.to(wtype), dbeta.to(wtype)
def benchmark_(S, B, hidden_size, itype, wtype, runs=100):
epsilon = 1e-5
x = torch.randn((S * B, hidden_size), dtype=itype, device=device)
beta = torch.randn(hidden_size, dtype=wtype, device=device)
gamma = torch.randn(hidden_size, dtype=wtype, device=device)
dz = torch.randn(x.shape, dtype=wtype, device=device)
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
timer = GPUTimer(stream)
# warmup
for r in range(runs):
z, mu, rsigma = fln.ln_fwd(x, gamma, beta, epsilon)
timer.start()
for r in range(runs):
z, mu, rsigma = fln.ln_fwd(x, gamma, beta, epsilon)
timer.stop()
timer.sync()
total_bytes_fwd = sum([size_in_bytes(t) for t in [x, z, gamma, beta, mu, rsigma]])
ms_fwd = timer.millis() / runs
print(
"[FWD] Time: {:.4f}ms Throughput: {:.4f} GB/sec".format(
ms_fwd, total_bytes_fwd * 1e-6 / ms_fwd
)
)
timer.start()
for r in range(runs):
dx, dgamma, dbeta, dbp, dgp = fln.ln_bwd(dz, x, mu, rsigma, gamma)
timer.stop()
timer.sync()
total_bytes_bwd = sum(
[
size_in_bytes(t)
for t in [dz, x, mu, rsigma, gamma, dx, dgamma, dbeta, dbp, dbp, dgp, dgp]
]
)
ms_bwd = timer.millis() / runs
print(
"[BWD] Time: {:.4f}ms Throughput: {:.4f} GB/sec".format(
ms_bwd, total_bytes_bwd * 1e-6 / ms_bwd
)
)
def test_(S, B, hidden_size, itype, wtype, ctype=fp32):
seed = 1243
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
otype = wtype
print("========================================================")
print(f"S={S} B={B} Hidden={hidden_size} {itype} {wtype}")
print("--------------------------------------------------------")
x = torch.randn(S * B, hidden_size, dtype=itype, device=device)
gamma = torch.randn(hidden_size, dtype=wtype, device=device) * 0.2
beta = torch.randn(hidden_size, dtype=wtype, device=device) * 0.2
epsilon = 1e-5
x.requires_grad = True
gamma.requires_grad = True
beta.requires_grad = True
mu_ref = x.mean(1, dtype=ctype, keepdim=True)
v = torch.square(x - mu_ref).mean(1, dtype=ctype, keepdim=True)
rs_ref = torch.rsqrt(v + epsilon)
y_ref = rs_ref * (x.to(ctype) - mu_ref)
z_ref = (gamma.unsqueeze(0) * (y_ref).to(otype) + beta.unsqueeze(0)).to(otype)
mu_ref = mu_ref.flatten()
rs_ref = rs_ref.flatten()
dz = torch.randn_like(z_ref)
# z_ref.backward(dz)
# dx_ref = x.grad
# dgamma_ref = gamma.grad
# dbeta_ref = beta.grad
dx_ref, dg_ref, db_ref = backward_(dz, x, mu_ref, rs_ref, gamma)
z, mu, rs = fln.ln_fwd(x, gamma, beta, epsilon)
dx, dg, db, dg_part, db_part = fln.ln_bwd(dz, x, mu, rs, gamma)
re_z, mse_z = metrics(z_ref, z)
re_mu, mse_mu = metrics(mu_ref, mu)
re_rs, mse_rs = metrics(rs_ref, rs)
re_dx, mse_dx = metrics(dx_ref, dx)
re_dg, mse_dg = metrics(dg_ref, dg)
re_db, mse_db = metrics(db_ref, db)
print(f" z: relerr={re_z :.4e} mse={mse_z :.4e}")
print(f"mu: relerr={re_mu:.4e} mse={mse_mu:.4e}")
print(f"rs: relerr={re_mu:.4e} mse={mse_mu:.4e}")
print(f"dx: relerr={re_dx:.4e} mse={mse_dx:.4e}")
print(f"dg: relerr={re_dg:.4e} mse={mse_dg:.4e}")
print(f"db: relerr={re_db:.4e} mse={mse_db:.4e}")
def check_err(x, relerr):
tol = 1e-3 if x.dtype == torch.float16 else 5e-6
return relerr < tol
return [
check_err(x, re)
for x, re in zip([z, mu, rs, dx, dg, db], [re_z, re_mu, re_rs, re_dx, re_dg, re_db])
]
class TestFastLayerNorm(unittest.TestCase):
def assertAll(self, l):
if not all(l):
print(l)
for x in l:
self.assertTrue(x)
def test_all_configs(self):
hidden_sizes = [
768,
1024,
1536,
2048,
2304,
3072,
3840,
4096,
5120,
6144,
8192,
10240,
12288,
12800,
15360,
16384,
18432,
20480,
24576,
25600,
30720,
32768,
40960,
49152,
65536,
]
for h in hidden_sizes:
with self.subTest(f"hidden_size={h}"):
self.assertAll(test_(256, 2, h, fp32, fp32))
self.assertAll(test_(256, 2, h, fp16, fp16))
self.assertAll(test_(256, 2, h, fp32, fp16))
# self.assertAll(test_(256, 2, h, bf16, bf16))
# self.assertAll(test_(256, 2, h, fp32, bf16))
def test_run_benchmark(self):
for (S, B, hidden_size, runs) in (
(512, 32, 768, 1000),
(512, 32, 1024, 1000),
(512, 8, 4096, 1000),
(512, 8, 5120, 1000),
(512, 8, 6144, 1000),
(256, 2, 20480, 500),
(256, 2, 25600, 500),
(256, 2, 40960, 250),
(256, 2, 65536, 250),
):
with self.subTest(f"(S, B, hidden_size)=({S}, {B}, {hidden_size})"):
benchmark_(S, B, hidden_size, fp16, fp16, runs)
def test_compat_with_autocast(self):
autocast_dtypes = (
(torch.half, torch.bfloat16) if torch.cuda.is_bf16_supported() else (torch.half,)
)
input_shape = (512, 32, 768)
layer_norm = LayerNorm(input_shape[-1]).cuda()
input = torch.randn(input_shape).cuda()
for dtype in autocast_dtypes:
layer_norm.zero_grad(set_to_none=True)
with self.subTest(f"autocast_dtype={dtype}"):
with torch.cuda.amp.autocast(enabled=True, dtype=dtype):
out = layer_norm(input)
self.assertEqual(dtype, out.dtype)
grad = torch.randn_like(out)
out.backward(grad)
self.assertEqual(torch.float32, layer_norm.weight.grad.dtype)
if __name__ == "__main__":
unittest.main()
| 7,976
| 27.797834
| 93
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/linear.py
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
# from onmt.modules.swish import Swish
from onmt.modules.dropout import VariationalDropout
# different linears for the same input
def group_linear(linears, input, bias=False):
weights = [linear.weight for linear in linears]
weight = torch.cat(weights, dim=0)
if bias:
biases = [linear.bias for linear in linears]
bias_ = torch.cat(biases)
else:
bias_ = None
return F.linear(input, weight, bias_)
class XavierLinear(nn.Module):
''' Simple Linear layer with xavier init '''
def __init__(self, d_in, d_out, bias=True, nonlinearity='linear'):
super(XavierLinear, self).__init__()
linear = nn.Linear(d_in, d_out, bias=bias)
weight_norm = onmt.constants.weight_norm
self.weight_norm = weight_norm
if weight_norm:
self.linear = WeightNorm(linear, name='weight')
else:
self.linear = linear
# init.xavier_uniform_(self.linear.weight)
#
# if bias:
# self.linear.bias.data.zero_()
def forward(self, x):
return self.linear(x)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.linear.in_features) \
+ ', out_features=' + str(self.linear.out_features) \
+ ', bias=' + str(self.linear.bias is not None) \
+ ', weight_norm=' + str(self.weight_norm) + ')'
Linear = XavierLinear
class MaxOut(nn.Module):
def __init__(self, d, m, k):
super(MaxOut, self).__init__()
self.d_in, self.d_out, self.pool_size = d, m, k
self.lin = Linear(d, m * k)
def forward(self, inputs):
original_size = inputs.size()
inputs = inputs.view(-1, inputs.size(-1))
shape = list(inputs.size())
shape[-1] = self.d_out
shape.append(self.pool_size)
max_dim = len(shape) - 1
out = self.lin(inputs)
m, i = out.view(*shape).max(dim=max_dim)
m = m.view(*original_size[:-1], m.size(-1))
return m
class FeedForwardSwish(nn.Module):
"""Applies position-wise feed forward to inputs
Args:
d_model: dimension of model
d_ff: dimension of feed forward
p: dropout probability
Params:
fc_1: FC layer from d_model to d_ff
fc_2: FC layer from d_ff to d_model
Input Shapes:
input: batch_size x len x d_model or len x batch_size x d_model
Output Shapes:
out: batch_size x len x d_model or len x batch_size x d_model
"""
def __init__(self, d_model, d_ff, p, variational=False):
super(FeedForwardSwish, self).__init__()
self.d_model = d_model
self.d_ff = d_ff
self.fc_1 = XavierLinear(d_model, d_ff)
self.fc_2 = XavierLinear(d_ff, d_model)
self.swish = torch.nn.SilU()
if variational:
self.dropout = VariationalDropout(p)
else:
self.dropout = nn.Dropout(p)
def forward(self, input):
out = self.swish(self.fc_1(input))
out = self.dropout(out)
out = self.fc_2(out)
return out
class FeedForward(nn.Module):
"""Applies position-wise feed forward to inputs
Args:
d_model: dimension of model
d_ff: dimension of feed forward
p: dropout probability
Params:
fc_1: FC layer from d_model to d_ff
fc_2: FC layer from d_ff to d_model
Input Shapes:
input: batch_size x len x d_model or len x batch_size x d_model
Output Shapes:
out: batch_size x len x d_model or len x batch_size x d_model
"""
def __init__(self, d_model, d_ff, p, variational=False):
super(FeedForward, self).__init__()
self.d_model = d_model
self.d_ff = d_ff
self.fc_1 = Linear(d_model, d_ff, nonlinearity="relu")
self.fc_2 = Linear(d_ff, d_model)
if variational:
self.dropout = VariationalDropout(p)
else:
self.dropout = nn.Dropout(p)
def forward(self, input):
out = F.relu(self.fc_1(input), inplace=True)
out = self.dropout(out)
out = self.fc_2(out)
return out
# class ChunkFeedForward(nn.Module):
# """Applies position-wise feed forward to CHUNKs of inputs
#
# Args:
# d_model: dimension of model
# d_ff: dimension of feed forward
# p: dropout probability
#
# Params:
# fc_1: FC layer from d_model to d_ff
# fc_2: FC layer from d_ff to d_model
#
# Input Shapes:
# input: batch_size x len x d_model or len x batch_size x d_model
#
# Output Shapes:
# out: batch_size x len x d_model or len x batch_size x d_model
# """
# def __init__(self, d_model, d_ff, p, **kwargs):
# super(ChunkFeedForward, self).__init__()
# self.d_model = d_model
# self.d_ff = d_ff
# self.fc_1 = Linear(d_model, d_ff, nonlinearity="relu")
# self.fc_2 = Linear(d_ff, d_model)
#
# i
#
# def forward(self, input):
#
# out = F.relu(self.fc_1(input), inplace=True)
# out = self.dropout(out)
# out = self.fc_2(out)
# return out
| 5,439
| 26.897436
| 77
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/utilities.py
|
import torch
import torch.nn as nn
class AttributeEmbeddings(nn.Module):
def __init__(self, atb_dicts, atb_size):
self.n_attributes = len(atb_dicts)
self.atb_sizes = atb_size
super().__init__()
self.atb_embeddings = nn.ModuleDict()
for i in atb_dicts:
self.atb_embeddings[str(i)] = nn.Embedding(atb_dicts[i].size(), atb_size)
def forward(self, atbs):
"""
Input: atbs is a dictionary of features
"""
embeddings = []
for i in atbs:
embedding = self.atb_embeddings[str(i)](atbs[i])
embeddings.append(embedding)
# Concatenation of the features
embedding = torch.cat(embeddings, dim=-1)
return embedding
def size(self):
return len(self.atb_embeddings)
| 821
| 20.631579
| 85
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/dropout.py
|
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import onmt
class VariationalDropout(torch.nn.Module):
def __init__(self, p=0.5, batch_first=False, inplace=False):
super().__init__()
self.p = p
self.batch_first = batch_first
self.inplace = inplace
def forward(self, x):
if not self.training or not self.p:
return x
if self.batch_first:
m = x.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.p)
else:
m = x.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.p)
m.div_(1 - self.p)
if self.inplace:
x.mul_(m)
return x
else:
return x * m
def variational_dropout(x, p=0.5, training=True, inplace=False, batch_first=False):
if not training or p <= 0:
return x
if batch_first:
m = x.new(x.size(0), 1, x.size(2)).bernoulli_(1 - p)
else:
m = x.new(1, x.size(1), x.size(2)).bernoulli_(1 - p)
m.div_(1 - p)
if inplace:
x.mul_(m)
return x
else:
return x * m
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(
embed.weight) / (1 - dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
# X = embed._backend.Embedding.apply(words, masked_embed_weight,
# padding_idx, embed.max_norm, embed.norm_type,
# embed.scale_grad_by_freq, embed.sparse
# )
x = F.embedding(
words, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
return x
def switchout(words, vocab_size, tau=1.0, transpose=False, offset=0):
"""
:param offset: number of initial tokens to be left "untouched"
:param transpose: if the tensor has initial size of l x b
:param words: torch.Tensor(b x l)
:param vocab_size: vocabulary size
:param tau: temperature control
:return:
sampled_words torch.LongTensor(b x l)
"""
if transpose:
words = words.t()
if offset > 0:
offset_words = words[:, :offset]
words = words[:, offset:]
mask = torch.eq(words, onmt.constants.BOS) | \
torch.eq(words, onmt.constants.EOS) | torch.eq(words, onmt.constants.PAD)
lengths = (1 - mask.byte()).float().sum(dim=1)
batch_size, n_steps = words.size()
# first, sample the number of words to corrupt for each sent in batch
logits = torch.arange(n_steps).type_as(words).float() # size l
logits = logits.mul_(-1).unsqueeze(0).expand_as(words).contiguous().masked_fill_(mask, -float("inf"))
probs = torch.nn.functional.log_softmax(logits.mul_(tau), dim=1)
probs = torch.exp(probs)
num_words = torch.distributions.Categorical(probs).sample().float()
# second, sample the corrupted positions
corrupt_pos = num_words.div(lengths)
corrupt_pos = corrupt_pos.unsqueeze(1).expand_as(words).contiguous()
corrupt_pos.masked_fill_(mask, 0)
corrupt_pos = torch.bernoulli(corrupt_pos, out=corrupt_pos).byte()
total_words = int(corrupt_pos.sum())
# sample the corrupted values, which will be added to sents
corrupt_val = torch.LongTensor(total_words).type_as(words)
corrupt_val = corrupt_val.random_(1, vocab_size)
corrupts = words.clone().zero_()
corrupts = corrupts.masked_scatter_(corrupt_pos.type_as(mask), corrupt_val)
# to add the corruption and then take the remainder w.r.t the vocab size
sampled_words = words.add(corrupts).remainder_(vocab_size)
if offset > 0:
sampled_words = torch.cat([offset_words, sampled_words], dim=1)
if transpose:
sampled_words = sampled_words.t()
return sampled_words
class ReLUDropout(torch.nn.Dropout):
def __init__(self, p=0.5, variational=False, batch_first=False, inplace=False):
super().__init__(p, inplace=True)
self.variational = variational
self.batch_first = batch_first
def forward(self, input):
return relu_dropout(input, p=self.p, training=self.training,
variational=self.variational, batch_first=self.batch_first)
def relu_dropout(x, p=0, training=False, variational=False, batch_first=False):
if not training or p == 0:
return x.clamp_(min=0)
p1m = 1 - p
if variational:
if batch_first:
mask = torch.rand_like(x[:, 0, :]) > p1m
mask = mask.unsqueeze(1).repeat(1, x.size(1), 1)
else:
mask = torch.rand_like(x[0]) > p1m
mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1 )
else:
mask = torch.rand_like(x) > p1m
mask |= (x < 0)
return x.masked_fill_(mask, 0).div_(p1m)
| 5,102
| 29.375
| 108
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/sinusoidal_positional_encoding.py
|
import torch.nn as nn
import torch
import math
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, sin_first=True, bsz=None):
"""
:param bsz: integer to repeat
:param pos_seq: sequences of RELATIVE position indices (can be negative for future)
:param sin_first: in Attention is all you need paper, sin is first then cosin
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq.type_as(pos_seq))
if sin_first:
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
else:
pos_emb = torch.cat([sinusoid_inp.cos(), sinusoid_inp.sin()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].repeat(1, bsz, 1)
else:
return pos_emb[:, None, :]
class FastSinusoidalPositionalEncoding(nn.Module):
"""Adds positional embeddings to standard word embeddings
This matches the original TensorFlow implementation at
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py.
Args:
d_model: dimension of model
p: dropout probability
len_max: max seq length for pre-calculated positional embeddings
Inputs Shapes:
word_emb: batch_size x len_seq x d_model
Outputs Shapes:
out: batch_size x len_seq x d_model
"""
def __init__(self, d_model, p=0, len_max=1024):
# save a fixed positional embedding matrix up to len_max,
# so that no need to recreate it everytime
super(FastSinusoidalPositionalEncoding, self).__init__()
self.len_max = len_max
self.d_model = d_model
self.data_type = None
self.renew(len_max)
self.p = p
def renew(self, new_max_len):
# detele the old variable to avoid Pytorch's error when register new buffer
cuda = False
if hasattr(self, 'pos_emb'):
cuda = self.pos_emb.is_cuda
# self.data_type = torch.type(self.pos_emb)
del self.pos_emb
position = torch.arange(0, new_max_len).float()
num_timescales = self.d_model // 2
log_timescale_increment = math.log(10000) / (num_timescales - 1)
inv_timescales = torch.exp(torch.arange(0, num_timescales).float() * -log_timescale_increment)
scaled_time = position.unsqueeze(1) * inv_timescales.unsqueeze(0)
pos_emb = torch.cat((torch.sin(scaled_time), torch.cos(scaled_time)), 1)
if cuda:
pos_emb = pos_emb.cuda()
if self.data_type is not None:
pos_emb.type(self.data_type)
# wrap in a buffer so that model can be moved to GPU
self.register_buffer('pos_emb', pos_emb)
# self.data_type = self.pos_emb.type()
self.len_max = new_max_len
def forward(self, word_emb, t=None):
"""
:param word_emb: Tensor [BxTxH] (batch first)
:param t: integer
:return:
"""
len_seq = t if t else word_emb.size(1)
self.data_type = word_emb.type()
if len_seq > self.len_max:
self.renew(len_seq)
if word_emb.size(1) == len_seq:
time_ = self.pos_emb[:len_seq, :].type_as(word_emb)
out = word_emb + time_
else:
# out = word_emb + Variable(self.pos_emb[:len_seq, :][-1, :], requires_grad=False)
time_emb = self.pos_emb[len_seq - 1, :] # 1 x dim
# out should have size bs x 1 x dim
out = word_emb + time_emb.detach().unsqueeze(0).type_as(word_emb)
# repeat(word_emb.size(0), 1, 1).type_as(word_emb)
return out
def get_positional_embeddings(self, word_emb, t=None):
len_seq = t if t else word_emb.size(1)
self.data_type = word_emb.type()
if len_seq > self.len_max:
self.renew(len_seq)
if word_emb.size(1) == len_seq:
time_emb = self.pos_emb[:len_seq, :].type_as(word_emb)
else:
time_emb = self.pos_emb[len_seq - 1, :].unsqueeze(0).type_as(word_emb)
return time_emb
| 4,400
| 33.116279
| 102
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/weight_control_lstm.py
|
# This is the
import torch
import torch.nn as nn
from torch.nn import Parameter
from functools import wraps
import math
class WeightDrop(torch.nn.Module):
def __init__(self, module, weights, dropout=0,):
"""
:param module: a LSTM module
:param weights:
:param dropout:
:param n_languages:
:param rank:
"""
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self._setup()
def trails_in_the_sky(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
# It must be a function rather than a lambda as otherwise pickling explodes
# We can't write boring code though, so ... TRAILS IN THE SKY ftw!!
# (╯°□°)╯︵ ┻━┻
return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self.trails_in_the_sky
print(self.weights)
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
# del self.module._parameters[name_w] # don't delete so we can load :))
self.module.register_parameter(name_w + '_raw', Parameter(w.data))
def _setweights(self):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)
setattr(self.module, name_w, Parameter(w))
def forward(self, *args, **kwargs):
self._setweights()
return self.module.forward(*args, **kwargs)
class WeightFactoredLSTM(torch.nn.Module):
def __init__(self, module, dropout=0, n_languages=1, rank=1, multiplicative=False, activation='none'):
"""
:param module: a LSTM module
:param weights:
:param dropout:
:param n_languages:
:param rank:
"""
super(WeightFactoredLSTM, self).__init__()
self.module = module
self.weights = None
self.dropout = dropout
self.n_languages = n_languages
self.rank = rank
self.multiplicative = multiplicative
self.activation = activation
self._setup()
def trails_in_the_sky(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
# It must be a function rather than a lambda as otherwise pickling explodes
# We can't write boring code though, so ... TRAILS IN THE SKY ftw!!
# (╯°□°)╯︵ ┻━┻
return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self.trails_in_the_sky
self.weights = list()
for l in range(self.module.num_layers):
self.weights.append("weight_ih_l%d" % l)
self.weights.append("weight_hh_l%d" % l)
else:
# this code only supports nn.LSTM
raise NotImplementedError
# In this part: we need to look at two things:
# First, __setattr__ of a module is overwritten so that the parameter is registered in module._parameter
# So we need to delete
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
del self.module._parameters[name_w]
self.module.register_parameter(name_w + '_raw', Parameter(w.data))
# for each parameter we need to add two auxiliary weights: s and r
aux_s = Parameter(torch.Tensor(self.n_languages, self.rank, w.data.size(0)))
aux_r = Parameter(torch.Tensor(self.n_languages, self.rank, w.data.size(1)))
# initialize these weights:
nn.init.normal_(aux_s, 0.0, math.sqrt(0.02))
nn.init.normal_(aux_r, 0.0, math.sqrt(0.02))
setattr(self, name_w + "_s", aux_s)
setattr(self, name_w + "_r", aux_r)
if self.multiplicative:
aux_ms = Parameter(torch.Tensor(self.n_languages, 1, w.data.size(0)))
aux_mr = Parameter(torch.Tensor(self.n_languages, 1, w.data.size(1)))
# initialize these weights:
if self.activation == 'sigmoid':
raise NotImplementedError
else:
nn.init.constant_(aux_ms, 1.0)
nn.init.constant_(aux_mr, 1.0)
setattr(self, name_w + "_ms", aux_ms)
setattr(self, name_w + "_mr", aux_mr)
def _setweights(self, indices):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
aux_s = getattr(self, name_w + "_s")
aux_r = getattr(self, name_w + "_r")
s_vector = torch.index_select(aux_s, 0, indices).squeeze(0)
r_vector = torch.index_select(aux_r, 0, indices).squeeze(0)
w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)
if self.multiplicative:
aux_ms = getattr(self, name_w + "_ms")
aux_mr = getattr(self, name_w + "_mr")
ms_vector = torch.index_select(aux_ms, 0, indices).squeeze(0)
mr_vector = torch.index_select(aux_mr, 0, indices).squeeze(0)
scale = torch.bmm(ms_vector.unsqueeze(-1), mr_vector.unsqueeze(1)).sum(dim=0)
if self.activation == 'sigmoid':
scale = torch.sigmoid(scale)
elif self.activation == 'tanh':
scale = torch.tanh(scale)
w = w * scale
w = w + torch.bmm(s_vector.unsqueeze(-1), r_vector.unsqueeze(1)).sum(dim=0)
setattr(self.module, name_w, w)
def forward(self, *args, indices=None):
self._setweights(indices)
return self.module.forward(*args)
if __name__ == '__main__':
import torch
from weight_drop_lstm import WeightDrop
# Input is (seq, batch, input)
x = torch.randn(2, 1, 10).cuda()
h0 = None
print('Testing WeightDrop')
print('=-=-=-=-=-=-=-=-=-=')
print('Testing WeightDrop with Linear')
lin = WeightDrop(torch.nn.Linear(10, 10), ['weight'], dropout=0.9)
lin.cuda()
run1 = [x.sum() for x in lin(x).data]
run2 = [x.sum() for x in lin(x).data]
print('All items should be different')
print('Run 1:', run1)
print('Run 2:', run2)
assert run1[0] != run2[0]
assert run1[1] != run2[1]
print('---')
###
print('Testing WeightDrop with LSTM')
wdrnn = WeightDrop(torch.nn.LSTM(10, 10), ['weight_hh_l0'], dropout=0.9)
wdrnn.cuda()
run1 = [x.sum() for x in wdrnn(x, h0)[0].data]
run2 = [x.sum() for x in wdrnn(x, h0)[0].data]
print('First timesteps should be equal, all others should differ')
print('Run 1:', run1)
print('Run 2:', run2)
# First time step, not influenced by hidden to hidden weights, should be equal
assert run1[0] == run2[0]
# Second step should not
assert run1[1] != run2[1]
print('---')
| 7,439
| 34.769231
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/performer.py
|
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.cuda.amp import autocast
from einops import rearrange, repeat
from functools import partial
from contextlib import contextmanager
# helpers
def exists(val):
return val is not None
def empty(tensor):
return tensor.numel() == 0
def default(val, d):
return val if exists(val) else d
@contextmanager
def null_context():
yield
def cast_tuple(val):
return (val,) if not isinstance(val, tuple) else val
def get_module_device(module):
return next(module.parameters()).device
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, *args, **kwargs):
return self.val
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device=None):
b, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
ratio = (projection_matrix.shape[0] ** -0.5) # sqrt(2 * nb_features )
# projection = repeat(projection_matrix, 'j d -> b j d', b=b)
# projection = projection_matrix[None, None, :, :].expand(b, h, projection_matrix.size(0), projection_matrix.size(1))
# .repeat(b, h, 1, 1)
projection = projection_matrix
projection = projection.type_as(data)
# data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
data_dash = torch.matmul((data_normalizer * data), projection.transpose(0, 1))
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_data -
torch.max(data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_data - torch.max(data_dash)) + eps)
# data_dash = ratio * (torch.exp(data_dash - diag_data) + eps)
return data_dash.type_as(data)
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(),
kernel_epsilon=0.001, normalize_data=True, device=None):
b, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
projection = repeat(projection_matrix, 'j d -> b j d', b=b)
# projection = projection_matrix[None, None, :, :].repeat(b, h, 1, 1)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime.type_as(data)
def orthogonal_matrix_chunk(cols, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.qr(unstructured_block.cpu(), some=True)
q, r = map(lambda t: t.to(device), (q, r))
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1)
elif scaling == 1:
multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
def apply_scaling(scale, x):
return torch.einsum("...n,...nd->...nd", scale, x)
def fast_attention(query, key, value):
"""
:param query: bsz * n_heads x seq_len x nb_features
:param key: bsz * n_heads x seq_len x nb_features
:param value: bsz * n_heads x seq_len x head_dim
:return:
"""
buffer = torch.cat([key.transpose(1, 2).bmm(value), key.sum(1).unsqueeze(-1)], dim=-1)
buffer = query.bmm(buffer)
return apply_scaling(1 / buffer[:, :, -1], buffer[:, :, :-1])
# non-causal linear attention
def linear_attention(q, k, v):
# print("[linear attention]", q.size(), k.size(), v.size())
# bsz, heads, len_q, nb = q.size(0), q.size(1), q.size(2), q.size(3)
# head_dim = v.size(-1)
# k should be the same as q
# k = k.view(bsz * heads, len_q, nb)
# D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q)) # b h n d * b h d -> b h n
# D_inv = 1. / torch.bmm(q.view, k) #
# b, h, d = q.size(0), q.size(1), q.size(-1)
# q = q.view(bsz * heads, len_q, nb)
# D_inv = 1. / torch.bmm(q, k_cumsum)
# print("[linear attention dinv]", D_inv.size())
# v = v.view(bsz * heads, len_q, head_dim)
# print("[linear attention v]", v.size(), k.size())
# context = torch.bmm(k.transpose(1, 2).contiguous(), v) # BH * nb * len_q x BH * len_q * head_dim
# print("[linear attention context]", context.size())
# -> BH * nb * head_dim
# out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv) # b h d e * b h n d * b h n -> b h n e
# out = torch.bmm(q, context) * D_inv
# out = out.view(bsz, heads, len_q, head_dim)
# print("[linear attention out]", out.size())
# k_cumsum = k.sum(dim=-2) # B n d -> B d
# D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q)) # b h n d * b h d -> b h n
# context = torch.einsum('...nd,...ne->...de', k, v) # b h n d * b h n e -> b h d e ( e = d = head_dim )
# out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
# return out
query, key, value = q, k, v
buffer = torch.cat([key.transpose(1, 2).bmm(value), key.sum(1).unsqueeze(-1)], dim=-1)
buffer = query.bmm(buffer)
return apply_scaling(1 / buffer[:, :, -1], buffer[:, :, :-1])
def apply_regular_feature_map(x, orf, epsilon=1e-6):
m, d_k = orf.shape
proj_x = x @ orf.T / math.pow(d_k, 1 / 4)
norm = (x ** 2).sum(dim=-1, keepdim=True) / (2 * math.sqrt(d_k))
return (torch.exp(proj_x - norm) + epsilon) / math.sqrt(m)
def apply_hyperbolic_feature_map(x, orf, epsilon=1e-6):
m, d_k = orf.shape
proj_x = x @ orf.T / math.pow(d_k, 1 / 4)
proj_x = torch.cat([proj_x, -proj_x], dim=-1)
norm = (x ** 2).sum(dim=-1, keepdim=True) / (2 * math.sqrt(d_k))
return (torch.exp(proj_x - norm) + epsilon) / math.sqrt(2 * m)
def create_orf(d_k, m):
blocks = torch.randn(math.ceil(m / d_k), d_k, d_k)
blocks, _ = torch.qr(blocks)
scale = torch.randn(m, d_k).norm(dim=1)
return apply_scaling(scale, blocks.reshape(-1, d_k)[:m])
# TODO: maybe name this class FAVOR+ or FAVORSelfAttention
class Performer(nn.Module):
def __init__(self, dim_heads, nb_features=None, ortho_scaling=0, generalized_attention=False,
kernel_fn=nn.ReLU(), no_projection=False):
super().__init__()
# nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
nb_features = 32
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows=self.nb_features,
nb_columns=dim_heads, scaling=ortho_scaling)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
# if this is turned on, no projection will be used
# queries and keys will be softmax-ed as in the original efficient attention paper
self.no_projection = no_projection
self.causal = False
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v):
device = q.device
len_q, len_k = q.size(-2), k.size(-2)
bh = q.size(0) # , q.size(1)
if self.no_projection:
q = q.softmax(dim=-1)
k = torch.exp(k) if self.causal else k.softmax(dim=-2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn=self.kernel_fn,
projection_matrix=self.projection_matrix, device=device)
q, k = map(create_kernel, (q, k))
else:
# softmax approximation - default option
create_kernel = partial(softmax_kernel, projection_matrix=self.projection_matrix, device=device)
q = create_kernel(q, is_query=True)
k = create_kernel(k, is_query=False)
out = linear_attention(q, k, v)
# print("[performer out]", out.size())
# attn_weights = out.new(b, h, len_q, len_k).zero_()
return out, None
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
# a module for keeping track of when to update the projections
class ProjectionUpdater(nn.Module):
def __init__(self, instance, feature_redraw_interval):
super().__init__()
self.instance = instance
self.feature_redraw_interval = feature_redraw_interval
# self.register_buffer('calls_since_last_redraw', torch.tensor(0))
self.calls_since_last_redraw = 0
def fix_projections_(self):
self.feature_redraw_interval = None
def redraw_projections(self):
model = self.instance
if not self.training:
return
if exists(self.feature_redraw_interval) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(model)
print("draw new random features ...")
fast_attentions = find_modules(model, Performer)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
# self.calls_since_last_redraw.zero_()
self.calls_since_last_redraw = 0
return
self.calls_since_last_redraw += 1
def forward(self, x):
raise NotImplemented
| 10,788
| 33.691318
| 121
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/relative_attention.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.constants import double_precision
def _rel_shift(x, zero_triu=False):
# zero_pad size: [q_len, 1, bsz, n_head]
zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])
x = x_padded[1:].view_as(x)
# fills the 'unnecessary' parts with zeros
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:, :, None, None]
return x
def _rel_future_shift(x):
"""
x input dimension: [qlen, klen, bsz, nhead]
"""
qlen, klen = x.size(0), x.size(1)
# adding the device here is MUCH faster than using device after expanding
rel = torch.arange(klen - qlen, -qlen, -1, device=x.device).unsqueeze(0)
shift = torch.arange(0, qlen, 1, device=x.device).unsqueeze(1)
indices = klen - 1 - torch.abs(rel + shift)
# expanding to the batch size and head dimensions
for i in range(x.dim() - 2):
indices = indices.unsqueeze(-1)
indices = indices.expand_as(x)
output_ = torch.gather(x, 1, indices)
return output_
# Relative Multihead Attention
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
# self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
# self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
# self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m, :m] = torch.triu(mask[:m, :m])
mask[-m:, -m:] = torch.tril(mask[-m:, -m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen - 1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:, :, None, None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
# efficient computation of B and D term using shift
# x dimension: [q_len, k_len, bsz, n_head]
def _rel_shift(self, x, zero_triu=False):
# zero_pad size: [q_len, 1, bsz, n_head]
zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
# x_padded:
x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:, :, None, None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
# Relative Partially Learnable (from Transformer XL)
class RelPartialLearnableMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropatt=0, asynchronous=False,
tgt_len=None, ext_len=None, mem_len=None, shared_pos_across_heads=False):
super(RelPartialLearnableMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
# self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.shared_pos_across_heads = shared_pos_across_heads
self.asynchronous = asynchronous
if not shared_pos_across_heads:
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
# Parameters for the position biases
# Each head has a different bias
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
def compute_attention(self, r, w_head_q, w_head_k, w_head_v, attn_mask=None, debug=False):
r_w_bias = self.r_r_bias
r_r_bias = self.r_r_bias
qlen, rlen, bsz = w_head_q.size(0), r.size(0), w_head_q.size(1)
rsize = r.size(-1)
klen = w_head_k.size(0)
assert rlen >= klen # can allocate more relative positions than klen
# mapping d-model to d-head
if rsize == self.d_model:
r_head_k = self.r_net(r)
elif rsize == self.d_head:
# shared R for each head
r_head_k = r.unsqueeze(-2).expand(rlen, 1, self.n_head, self.d_head)
else:
raise NotImplementedError
w_head_q = w_head_q.contiguous().view(qlen, bsz * self.n_head, self.d_head).transpose(0, 1)
w_head_k = w_head_k.contiguous().view(klen, bsz * self.n_head, self.d_head).transpose(0, 1)
w_head_v = w_head_v.contiguous().view(klen, bsz * self.n_head, self.d_head).transpose(0, 1)
w_head_q = w_head_q.view(bsz, self.n_head, qlen, self.d_head)
w_head_k = w_head_k.view(bsz, self.n_head, klen, self.d_head)
# r_head_k is the projected positions (not depending on the tensors)
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head).transpose(0, 1) # n_head xrlen x d_head
# compute attention score
# r_w_bias is [n_head, d_head]
rw_head_q = w_head_q + r_w_bias.unsqueeze(1) # qlen x bsz x n_head x d_head
AC = torch.matmul(rw_head_q, w_head_k.transpose(2, 3))
rr_head_q = w_head_q + r_r_bias.unsqueeze(1)
# [bsz, n_head, q_len, d] > [bsz, n_head, q_len, k_len]
BD = torch.matmul(rr_head_q, r_head_k.transpose(1, 2))
# [bsz, n_head, q_len, k_len] to [q_len, k_len, bsz, n_head]
BD = BD.transpose(0, 2).transpose(1, 3)
# relative_future_shift gives us 5 4 3 2 1 0 1 2 3 4 5 ... relatives for position at 0
# BD = _rel_future_shift(BD)
# Rel shift uses simple view which is faster than torch.gather
# the input to rel_shift should have size: [qlen, klen, bsz, n_head]
BD = _rel_shift(BD)
BD = BD.transpose(0, 2).transpose(1, 3)
# output size of BD: [bsz, n_head, q_len, k_len]
# take the first klen results from BD (the rest might not be necessary)
BD = BD[:, :, :, :klen]
# [bsz x n_head x qlen x klen]
attn_score = AC + BD
attn_score.mul_(self.scale)
# [qlen x klen x bsz x n_head]
attn_score = attn_score.transpose(0, 2).transpose(1, 3)
# compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None, :, :, None], -float('inf')).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:, :, :, None], -float('inf')).type_as(attn_score)
# [bsz x n_head x qlen x klen] again
attn_score = attn_score.transpose(0, 2).transpose(1, 3)
# [bsz x n_head x qlen x klen] again
_dtype = torch.float64 if double_precision else torch.float32
attn_prob = F.softmax(attn_score, dim=-1, dtype=_dtype).type_as(attn_score)
# nan may happen ... because of the first positions (aligned right) will have nothing to attend to
if debug:
nan_mask = torch.isnan(attn_prob)
attn_prob = attn_prob.masked_fill(nan_mask, 0).type_as(attn_score)
coverage = attn_prob
attn_prob = self.dropatt(attn_prob)
attn_prob = attn_prob.view(bsz * self.n_head, qlen, klen)
# compute attention vector
attn_vec = torch.bmm(attn_prob, w_head_v)
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.transpose(0, 1).contiguous().view(qlen, bsz, self.d_model)
# linear projection
attn_out = self.o_net(attn_vec)
output = attn_out
return output, coverage
def forward(self, w, r, attn_mask=None, debug=False, mems=None,
incremental=False, incremental_cache=None):
"""
:param mems:
:param attn_mask:
:param incremental_cache:
:param incremental:
:param debug:
:param w: input embeddings (E) T x B x H
:param r: relative encodings (R)
:param attn_mask:
:return:
"""
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
w_heads = self.qkv_net(torch.cat([mems, w], 0))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
if incremental:
if 'k' in incremental_cache and 'v' in incremental_cache:
with torch.no_grad():
w_head_k = torch.cat([incremental_cache['k'], w_head_k], dim=0) # time first
incremental_cache['k'] = w_head_k.detach()
w_head_v = torch.cat([incremental_cache['v'], w_head_v], dim=0) # time first
incremental_cache['v'] = w_head_v.detach()
else:
incremental_cache['k'] = w_head_k.detach()
incremental_cache['v'] = w_head_v.detach()
# print(w_head_q.size(), w_head_k.size(), w_head_v.size())
output, coverage = self.compute_attention(r, w_head_q, w_head_k, w_head_v, attn_mask=attn_mask, debug=debug)
return output, coverage, incremental_cache
class LearnableRelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropatt=0, max_len=64,
tgt_len=None, ext_len=None, mem_len=None):
super(LearnableRelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.max_len = max_len
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.position_embedding = nn.Embedding(2 * self.max_len + 1, d_head)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.scale = 1 / (d_head ** 0.5)
def generate_relative_positions(self, qlen, klen, device, caching=False):
if caching:
distance_mat = torch.arange(-klen+1, 1, 1).unsqueeze(0) # 1 x T
distance_mat = distance_mat.to(device)
else:
# assert qlen == klen
range_vec = torch.arange(klen) # klen
range_vec = range_vec.to(device)
range_mat = range_vec.unsqueeze(-1).expand(-1, klen).transpose(0, 1)
distance_mat = range_mat - range_mat.transpose(0, 1) # T x T
distance_mat_clipped = torch.clamp(distance_mat, min=-self.max_len, max=self.max_len)
relative_distance = distance_mat_clipped + self.max_len
return relative_distance
def forward(self, w, attn_mask=None, debug=False, mems=None,
incremental=False, incremental_cache=None):
"""
:param mems:
:param attn_mask:
:param incremental_cache:
:param incremental:
:param debug:
:param w: input embeddings (E) T x B x H
:param r: relative encodings (R)
:param attn_mask:
:return:
"""
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
w_heads = self.qkv_net(torch.cat([mems, w], 0))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:] # why ? # maybe some stupid thing related to streaming
if incremental:
if 'k' in incremental_cache and 'v' in incremental_cache:
with torch.no_grad():
w_head_k = torch.cat([incremental_cache['k'], w_head_k], dim=0) # time first
incremental_cache['k'] = w_head_k.detach()
w_head_v = torch.cat([incremental_cache['v'], w_head_v], dim=0) # time first
incremental_cache['v'] = w_head_v.detach()
else:
incremental_cache['k'] = w_head_k.detach()
incremental_cache['v'] = w_head_v.detach()
q_len = w_head_q.size(0)
k_len = w_head_k.size(0)
device_ = w_head_q.device
r_matrix = self.generate_relative_positions(q_len, k_len, device_, caching=incremental)
# T x T x H
r = self.position_embedding(r_matrix)
output, coverage = self.compute_attention(r, w_head_q, w_head_k, w_head_v, attn_mask=attn_mask, debug=debug)
return output, coverage, incremental_cache
def compute_attention(self, r, w_head_q, w_head_k, w_head_v, attn_mask=None, debug=False):
qlen, rlen, bsz = w_head_q.size(0), r.size(0), w_head_q.size(1)
rsize = r.size(1)
klen = w_head_k.size(0)
assert rlen >= klen # can allocate more relative positions than klen
w_head_q = w_head_q.contiguous().view(qlen, bsz * self.n_head, self.d_head).transpose(0, 1)
w_head_k = w_head_k.contiguous().view(klen, bsz * self.n_head, self.d_head).transpose(0, 1)
w_head_v = w_head_v.contiguous().view(klen, bsz * self.n_head, self.d_head).transpose(0, 1)
w_head_q.mul_(self.scale)
w_head_q = w_head_q.view(bsz, self.n_head, qlen, self.d_head)
w_head_k = w_head_k.view(bsz, self.n_head, klen, self.d_head)
w_head_v = w_head_v.view(bsz, self.n_head, klen, self.d_head)
qk_score = torch.matmul(w_head_q, w_head_k.transpose(2, 3))
w_head_q_t = w_head_q.permute(2, 0, 1, 3) # qlen x bsz x n_head x d_head
w_head_q_r = w_head_q_t.reshape(qlen, bsz * self.n_head, -1)
r_t = r.transpose(1, 2) # klen x dhead x klen
qr_score = torch.matmul(w_head_q_r, r_t)
qr_score = qr_score.reshape(klen, bsz, self.n_head, -1)
qr_score = qr_score.permute(1, 2, 0, 3)
attn_score = qk_score + qr_score
# [qlen x klen x bsz x n_head]
attn_score = attn_score.transpose(0, 2).transpose(1, 3)
# compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None, :, :, None], -float('inf')).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:, :, :, None], -float('inf')).type_as(attn_score)
# [bsz x n_head x qlen x klen] again
attn_score = attn_score.transpose(0, 2).transpose(1, 3)
# [bsz x n_head x qlen x klen] again
dtype_ = torch.float64 if double_precision else torch.float32
attn_prob = F.softmax(attn_score, dim=-1, dtype=dtype_).type_as(attn_score)
# nan may happen ... because of the first positions (aligned right) will have nothing to attend to
# nan_mask = torch.isnan(attn_prob)
# attn_prob = attn_prob.masked_fill(nan_mask, 0).type_as(attn_score)
coverage = attn_prob
attn_prob = self.dropatt(attn_prob)
context_org = torch.matmul(attn_prob, w_head_v)
attn_t = attn_prob.permute(2, 0, 1, 3)
attn_r = attn_t.reshape(klen, bsz * self.n_head, -1)
# what is r size?
context_pos = torch.matmul(attn_r, r)
context_pos = context_pos.reshape(klen, bsz, self.n_head, -1)
context_pos = context_pos.permute(1, 2, 0, 3)
attn_vec = context_org + context_pos
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.transpose(1, 2).contiguous().view(bsz, qlen, self.n_head * self.d_head)
# linear projection and transpose to T B D
attn_out = self.o_net(attn_vec).transpose(0, 1)
output = attn_out
return output, coverage
if __name__ == '__main__':
bsz = 1
n_head = 8
tgt_len = 10
src_len = 6
qlen = 5
klen = 12
pos = torch.arange(klen - 1, -klen, -1.0).unsqueeze(1).expand(-1, bsz) # T x B
pos = pos.unsqueeze(0).expand(klen, -1, -1)
print(pos.size())
pos = _rel_shift(pos)
print(pos.size())
print(pos.squeeze(-1))
# x = torch.arange(klen - 1, -klen, -1.0).unsqueeze(0).repeat(qlen, 1)
# input = x.mul(10)
# print(input, input.size())
# x = torch.randint(0, 100, (tgt_len, tgt_len))
#
# print(x)
#
# tgt_tgt_mask = torch.triu(torch.ones(tgt_len, tgt_len), diagonal=1)
# tgt_src_mask = torch.zeros(tgt_len, src_len)
#
# tgt_mask = torch.cat([tgt_src_mask, tgt_tgt_mask], dim=-1)
# print(tgt_mask)
# # print(attn_mask)
#
# src_src_mask = torch.zeros(src_len, src_len)
# src_tgt_mask = torch.ones(src_len, tgt_len)
#
# src_mask = torch.cat([src_src_mask, src_tgt_mask], dim=-1)
#
# print(src_mask)
# print("FULL ATTENTION MASK")
# attn_mask = torch.cat([src_mask, tgt_mask], dim=0)
#
# print(attn_mask)
| 18,254
| 35.148515
| 116
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/rezero.py
|
# Implementation of the ReZERO training strategy
import torch
import torch.nn as nn
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.tensor(1e-3))
self.fn = fn
def forward(self, x):
return x * self.g
| 291
| 18.466667
| 49
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/swish.py
|
import torch
import torch.nn as nn
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from .optimized.compat import half_function
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .optimized.compat import custom_fwd, custom_bwd
try:
import silu_cuda
except (ModuleNotFoundError, ImportError) as e:
silu_cuda = None
class SwishFunction(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, inp):
ctx.save_for_backward(inp)
return silu_cuda.forward(inp)
@staticmethod
@custom_bwd
def backward(ctx, grad_out):
inp, = ctx.saved_tensors
if not ctx.needs_input_grad[0]: return (None,)
return silu_cuda.backward(inp, grad_out)
@half_function
def fast_silu(input):
return SwishFunction.apply(input)
class SiLU(nn.Module):
def __init__(self, inplace=False):
super(SiLU, self).__init__()
self.inplace = inplace
def forward(self, input):
# maybe only use during training to avoid kernel problem?
if silu_cuda is not None and input.is_cuda:
return fast_silu(input)
else:
try:
output = torch.nn.functional.silu(input, inplace=self.inplace)
except AttributeError:
output = input * torch.sigmoid(input)
return output
| 1,489
| 23.42623
| 78
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/__init__.py
|
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.base_seq2seq import Generator, NMTModel
from onmt.modules.static_dropout import StaticDropout
# For flake8 compatibility.
__all__ = [MultiHeadAttention, Generator, NMTModel, StaticDropout]
| 262
| 36.571429
| 66
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/layer_norm.py
|
import math
import torch
import numbers
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
import importlib
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .optimized.compat import custom_fwd, custom_bwd
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
"""
Faster version of Layer Norm from apex (new)
"""
try:
import fast_layer_norm_cuda
# print("[INFO] Fast layer norm implementation detected.")
except (ModuleNotFoundError, ImportError) as e:
fast_layer_norm_cuda = None
# print("[INFO] Fast layer norm implementation not found.")
class FastLayerNormFN(torch.autograd.Function):
@staticmethod
def forward(ctx, x, gamma, beta, epsilon):
x = x.contiguous()
gamma = gamma.contiguous()
beta = beta.contiguous()
hidden_size = gamma.numel()
y, mu, rsigma = fast_layer_norm_cuda.ln_fwd(x, gamma, beta, epsilon)
ctx.save_for_backward(x, gamma, mu, rsigma)
ctx.need_weight_grad = gamma.requires_grad
return y
@staticmethod
def backward(ctx, dy):
# assert dy.is_contiguous()
dy = dy.contiguous() # this happens!
x, gamma, mu, rsigma = ctx.saved_tensors
dx, dgamma, dbeta, _, _ = fast_layer_norm_cuda.ln_bwd(dy, x, mu, rsigma, gamma)
# TODO: write bwd function that doesn't need backward
if not ctx.need_weight_grad:
dgamma = None
dbeta = None
return dx, dgamma, dbeta, None
"""
Fast version of Layer Norm from Apex
"""
def fast_layer_norm_affine(input, weight, bias, normalized_shape, eps=1e-5):
args = _cast_if_autocast_enabled(input, weight, bias, eps)
with torch.cuda.amp.autocast(enabled=False):
return FastLayerNormFN.apply(*args)
class FP32LayerNorm(torch.nn.Module):
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super().__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
eps = self.eps
return F.layer_norm(
input.float(), self.normalized_shape, self.weight, self.bias, eps).type_as(input)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
def layer_norm_func(input, weight, bias, normalized_shape, eps=1e-05):
if input.size(-1) in [768, 1024, 2048, 3072, 4096] and weight is not None \
and input.is_cuda and fast_layer_norm_cuda is not None:
return fast_layer_norm_affine(input, weight, bias, normalized_shape, eps)
else:
return F.layer_norm(input, normalized_shape, weight, bias, eps)
class LayerNorm(torch.nn.Module):
"""
See LayerNorm for details.
Note, however, that unlike LayerNorm this norm includes a batch component.
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super().__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input, fast=True):
eps = self.eps
if input.is_cuda and fast and fast_layer_norm_cuda is not None and input.size(-1) in [768, 1024, 2048, 3072, 4096]:
return fast_layer_norm_affine(input, self.weight, self.bias, self.normalized_shape, eps)
return F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
class MultilingualLayerNorm(torch.nn.Module):
"""
See LayerNorm for details.
Note, however, that unlike LayerNorm this norm includes a batch component.
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True, n_languages=1):
super().__init__()
self.n_languages = n_languages
self.fused = False
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(self.n_languages, *self.normalized_shape))
self.bias = Parameter(torch.Tensor(self.n_languages, *self.normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input, factor):
eps = self.eps
if self.elementwise_affine:
weight = torch.index_select(self.weight, 0, factor).squeeze(0)
bias = torch.index_select(self.bias, 0, factor).squeeze(0)
else:
weight, bias = None, None
if not input.is_cuda or not fast_fused:
return F.layer_norm(
input, self.normalized_shape, weight, bias, eps)
if self.elementwise_affine:
if fast_fused and input.is_cuda:
return fast_layer_norm_affine(input, weight, bias, self.normalized_shape, eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
| 7,129
| 31.557078
| 123
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/attention.py
|
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.modules.static_dropout import StaticDropout
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import group_linear
class MultiHeadAttention(nn.Module):
"""Applies multi-head attentions to inputs (query, key, value)
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
Params:
fc_query: FC layer to project query, d_model x (h x d_head)
fc_key: FC layer to project key, d_model x (h x d_head)
fc_value: FC layer to project value, d_model x (h x d_head)
fc_concat: FC layer to concat and project multiheads, d_model x (h x d_head)
Inputs Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Outputs Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, attn_p=0.1, static=False, share=3):
super(MultiHeadAttention, self).__init__()
self.h = h
self.d = d_model
self.share = share
assert d_model % h == 0
self.d_head = d_model // h
self.fc_query = Bottle(Linear(d_model, h * self.d_head, bias=False))
self.fc_key = Bottle(Linear(d_model, h * self.d_head, bias=False))
self.fc_value = Bottle(Linear(d_model, h * self.d_head, bias=False))
self.fc_concat = Bottle(Linear(h * self.d_head, d_model, bias=False))
self.sm = nn.Softmax(dim=-1)
if static:
self.attn_dropout = StaticDropout(attn_p)
else:
self.attn_dropout = nn.Dropout(attn_p)
def forward(self, query, key, value, mask,
incremental=False, incremental_cache=None):
len_query, b = query.size(0), query.size(1)
# batch_size*h x len_query x d_head
# project inputs to multi-heads
if self.share == 1:
# shared_qkv = group_linear(
# [self.fc_query.function.linear, self.fc_key.function.linear, self.fc_value.function.linear], query)
# proj_query, proj_key, proj_value = shared_qkv.chunk(3, dim=-1)
proj_query = self.fc_query(query)
proj_key = self.fc_key(key)
proj_value = self.fc_value(value)
# In incremental case: we concatenate the previously computed (mapped) states to the proj_key and proj_v
if incremental:
if 'k' in incremental_cache and 'v' in incremental_cache:
proj_key = torch.cat([incremental_cache['k'], proj_key], dim=0) # time first
incremental_cache['k'] = proj_key
proj_value = torch.cat([incremental_cache['v'], proj_value], dim=0) # time first
incremental_cache['v'] = proj_value
len_key, b_ = proj_key.size(0), proj_key.size(1)
else:
incremental_cache['k'] = proj_key
incremental_cache['v'] = proj_value
elif self.share == 2:
# This function will have to change in the future for Transformer XL
proj_query = self.fc_query(query) # batch_size x len_query x h*d_head
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
proj_key = incremental_cache['c_k']
proj_value = incremental_cache['c_v']
else:
# shared_kv = group_linear([self.fc_key.function.linear, self.fc_value.function.linear], key)
# proj_key, proj_value = shared_kv.chunk(2, dim=-1)
proj_key = self.fc_key(key)
proj_value = self.fc_value(value)
if incremental:
incremental_cache['c_k'] = proj_key
incremental_cache['c_v'] = proj_value
else:
proj_query = self.fc_query(query)
proj_key = self.fc_key(key) # batch_size x len_key x h*d_head
proj_value = self.fc_value(value) # batch_size x len_key x h*d_head
q, k, v = proj_query, proj_key, proj_value
len_key, b_ = k.size(0), k.size(1)
# prepare the shape for applying softmax
q = q.contiguous().view(len_query, b * self.h, self.d_head).transpose(0, 1)
k = k.contiguous().view(len_key, b * self.h, self.d_head).transpose(0, 1)
v = v.contiguous().view(len_key, b * self.h, self.d_head).transpose(0, 1)
q = q * (self.d_head ** -0.5)
# get dotproduct softmax attns for each head
attns = torch.bmm(q, k.transpose(1, 2)) # batch_size*h x len_query x len_key
attns = attns.view(b, self.h, len_query, len_key)
if mask is not None:
mask_ = mask.unsqueeze(-3)
# FP16 support: cast to float and back
attns = attns.float().masked_fill_(mask_, -float('inf')).type_as(attns)
dtype_ = torch.float64 if onmt.constants.double_precision else torch.float32
attns = F.softmax(attns, dim=-1, dtype=dtype_).type_as(attns)
# return mean attention from all heads as coverage
coverage = torch.mean(attns, dim=1)
attns = self.attn_dropout(attns)
attns = attns.view(b * self.h, len_query, len_key)
# apply attns on value
out = torch.bmm(attns, v) # batch_size*h x len_query x d_head
out = out.transpose(0, 1).contiguous().view(len_query, b, self.d)
out = self.fc_concat(out)
return out, coverage
| 5,883
| 40.43662
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/lsh_attention.py
|
# coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch REFORMER model. Currently not working and might not worth trying for ASR"""
import numpy as np
import torch
from torch import nn
from torch.autograd.function import Function
class ReverseSort(Function):
"""
After chunked attention is applied which sorted clusters,
original ordering has to be restored.
Since customized backward function is used for Reformer (because of reversible network)
the gradients of the output vectors have to be explicitely
sorted here.
Implementation note for myself: the number of forward arguments (except ctx) needs to match the backward outputs
the number of backward arguments (except ctx) must match the forward output
"""
@staticmethod
def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):
# save sorted_bucket_idx for backprop
with torch.no_grad():
ctx.sorted_bucket_idx = sorted_bucket_idx
# undo sort to have correct order for next layer
expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)
out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)
logits = torch.gather(logits, 2, undo_sorted_bucket_idx)
return out_vectors, logits
@staticmethod
def backward(ctx, grad_out_vectors, grad_logits):
# get parameters saved in ctx
sorted_bucket_idx = ctx.sorted_bucket_idx
expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)
# reverse sort of forward
grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)
grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)
# return grad and `None` fillers for last 2 forward args
return grad_out_vectors, grad_logits, None, None
class EfficientAttentionMixin:
"""
A few utilities for nn.Modules in Reformer, to be used as a mixin.
"""
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
""" Used to implement attention between consecutive chunks.
Args:
vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]
num_chunks_before: chunks before current chunk to include in attention
num_chunks_after: chunks after current chunk to include in attention
Returns:
tensor of shape [num_chunks, N * chunk_length, ...], where
N = (1 + num_chunks_before + num_chunks_after).
"""
# if we don't look at the previous chunk or next chunk then return
if num_chunks_before == 0 and num_chunks_after == 0:
return vectors
slices = []
for i in range(-num_chunks_before, num_chunks_after + 1):
if i == 0:
slices.append(vectors)
else:
# if i > 0:
# the first term is all chunks from i. the second term is the last i chunks
# if i < 0:
# the first term is the last |i| chunks. the second term is all chunks before |i|
slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))
return torch.cat(slices, dim=3)
def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):
"""
splits hidden_size dim into attn_head_size and num_attn_heads
"""
# remove the last dim (hidden size) and add two more dims
new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)
x = x.view(*new_x_shape)
# output size : [bsz x num_heads x seq_len x d_head]
return x.transpose(2, 1)
def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):
"""
merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
# x should have size: batch_size * n_heads * length * head_size
x = x.permute(0, 2, 1, 3)
return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))
def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):
"""
splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims
"""
batch_size = vectors.shape[0]
split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)
if len(vectors.shape) == 4:
return torch.reshape(vectors, split_dim_shape + (attn_head_size,))
elif len(vectors.shape) == 3:
return torch.reshape(vectors, split_dim_shape)
else:
raise ValueError("Input vector rank should be one of [3, 4], but is: {}".format(len(vectors.shape)))
class LSHSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.chunk_length = opt.chunk_length
self.num_hashes = opt.num_hashes
self.num_buckets = None
self.num_chunks_before = opt.lsh_num_chunks_before
self.num_chunks_after = opt.lsh_num_chunks_after
self.dropout = opt.attn_dropout
self.n_heads = opt.n_heads
self.model_size = opt.model_size
self.d_head = self.model_size // self.n_heads
self.query_key = nn.Linear(self.model_size, self.model_size, bias=False)
self.values = nn.Linear(self.model_size, self.model_size, bias=False)
self.value_out = nn.Linear(self.model_size, self.model_size, bias=False)
def _set_num_buckets(self, seq_len):
# num buckets should be set to 2 * seq_len // chunk_length (recommended in paper)
num_buckets_pow_2 = (2 * (seq_len // self.chunk_length)).bit_length() - 1
num_buckets = 2 ** num_buckets_pow_2
self.num_buckets = num_buckets
return num_buckets
def _hash_vectors(self, vectors, num_hashes, attn_mask):
batch_size = vectors.shape[0]
assert self.num_buckets % 2 == 0
rotation_size = self.num_buckets
num_buckets = self.num_buckets
# remove the gradients, but why?
vectors = vectors.detach()
# [n_head x d_head x num_hashes x rotation_size//2]
rotations_shape = (self.n_heads, vectors.shape[-1], num_hashes, rotation_size // 2)
# create a random self.attention_head_size x num_hashes x num_buckets/2
random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)
# rotated vectors: [bsz x n_head x num_hashes x seq_len x num_buckets/2]
rotated_vectors = torch.einsum('bhtd,hdnr->bhntr', vectors, random_rotations)
# TODO: understand why they only randomize for half of the buckets and take the negative for the other half
rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)
buckets = torch.argmax(rotated_vectors, dim=-1)
# add an extra bucket for padding tokens only
if attn_mask is not None:
num_buckets = num_buckets + 1
# assign padding tokens extra bucket
buckets_mask = attn_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)
buckets = torch.where(buckets_mask, buckets,
torch.Tensor(num_buckets-1, dtype=torch.long, device=buckets.device))
# buckets is now [bsz x n_head x num_hashes x seq_len]
# next we add offset so that bucket numbers from different hashing rounds don't overlap
offsets = torch.arange(num_hashes, device=vectors.device)
offsets = (offsets * num_buckets).view(1, 1, -1, 1)
# expand to batch_size and n_head
offsets = offsets.expand((batch_size, self.n_heads) + offsets.shape[-2:])
offsets_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)
return offsets_buckets
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, seq_len, buckets, num_hashes):
with torch.no_grad():
# buckets should have size [bsz x nhead x (num_hashes * seq_len)]
batch_size = buckets.shape[0]
# arange and expand to get the original indices
orig_indices = torch.arange(num_hashes * seq_len, device=buckets.device).view(1, 1, -1)
orig_indices = orig_indices.expand(batch_size, self.n_heads, orig_indices.shape[-1])
# scale buckets
# why do we have to scale the buckets ???
scaled_buckets = seq_len * buckets + (orig_indices % seq_len)
scaled_buckets = scaled_buckets.detach()
# hash-based sort
# this should have size [bsz x nhead
sorted_bucket_idx = torch.argsort(scaled_buckets, dim=-1)
# create simple indices to scatter to, to have undo sort
indices = (
torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)
.view(1, 1, -1)
.expand(sorted_bucket_idx.shape)
)
# get undo sort
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return sorted_bucket_idx, undo_sorted_bucket_idx
def _gather_by_expansion(self, vectors, idxs, num_hashes):
"""
expand dims of idxs and vectors for all hashes and gather
"""
expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.d_head)
vectors = vectors.repeat(1, 1, num_hashes, 1)
return torch.gather(vectors, 2, expanded_idxs)
def _compute_attn_mask(self, query_indices, key_indices, attn_mask, query_key_dot_shape, seq_len):
# attention mask for LSH
if attn_mask is not None:
# if chunked attention, the mask has to correspond to LSH order
assert attn_mask.dim() == 2
if seq_len > self.chunk_length:
if attn_mask.dim() < 3:
attn_mask = attn_mask.unsqueeze(1) # [ batch_size, 1, seq_len ]
attn_mask = attn_mask.expand(query_indices.shape[:-1] + (-1, ))
attn_mask = torch.gather(attn_mask, -1, key_indices)
attn_mask = attn_mask.unsqueeze(-2).expand(query_key_dot_shape)
# we don't really need causal mask
return
def _attend(self, query_vectors, key_vectors, value_vectors, sorted_bucket_idx_per_hash,
attention_mask, seq_len):
# look at previous and following chunks if chunked attention
if self.chunk_length < seq_len:
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
# get logits from dot-product
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free mem
# del query_vectors, key_vectors
# if chunked attention split bucket ids to query and key
if self.chunk_length < seq_len:
query_bucket_idx = self._split_seq_length_dim_to(
sorted_bucket_idx_per_hash, -1, self.chunk_length, self.n_heads
)
key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)
else:
query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash
# get correct mask values during on precision
mask = self._compute_attn_mask(query_bucket_idx, key_value_bucket_idx, attn_mask, query_key_dots.shape, seq_len)
# # apply self-mask
# # From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf):
# # " While attention to the future is not allowed, typical implementations of the
# # Transformer do allow a position to attend to itself.
# # Such behavior is undesirable in a shared-QK formulation because the dot-product
# # of a query vector with itself will almost always be greater than the dot product of a
# # query vector with a vector at another position. We therefore modify the masking
# # to forbid a token from attending to itself, except in situations
# # where a token has no other valid attention targets (e.g. the first token in a sequence) "
# self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)) \
# .to(query_bucket_idx.device)
if mask is not None:
query_key_dots = query_key_dots.float().masked_fill_(mask, -float('inf')).type_as(query_key_dots)
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
attn_probs = torch.exp(query_key_dots - logits)
# free mem
# del query_key_dots
attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
# attend values
out_vectors = torch.matmul(attn_probs, value_vectors)
# free memory
# del value_vectors
# merge chunk
if self.chunk_length < seq_len:
logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
return out_vectors, logits, attn_probs
def forward(self, x, attn_mask, buckets=None, **kwargs):
"""
:param x: hidden states / embeddings of the previous layer [bsz x seq_len x hidden_size]
:param attn_mask: attention mask
:param buckets: TODO: findout about buckets
:param kwargs:
:return:
"""
batch_size, seq_len = x.size(0), x.size(1)
if attn_mask is not None:
print(attn_mask.shape)
if len(attn_mask.shape) == 3:
attn_mask = attn_mask.squeeze(-1)
num_hashes = self.num_hashes
query_key = self.query_key(x)
values = self.values(x)
# del x
query_key = self._split_hidden_size_dim(query_key, self.n_heads, self.d_head)
values = self._split_hidden_size_dim(values, self.n_heads, self.d_head)
# LSTM attention only makes sense if chunked attention should be performed
if self.chunk_length < seq_len:
# hash
num_buckets = self._set_num_buckets(seq_len)
if buckets is None:
buckets = self._hash_vectors(query_key, num_hashes, attn_mask)
assert (
int(buckets.shape[-1]) == num_hashes * seq_len
), "last dim of buckets is {}, but should be {}".format(buckets.shape[-1], num_hashes * seq_len)
sorted_bucket_idx, undo_sorted_bucket_idx = \
self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(seq_len, buckets, num_hashes)
# make sure that bucket_idx is not longer than seq_len
sorted_bucket_idx_per_hash = sorted_bucket_idx % seq_len
# cluster query key vectors according to hashed buckets:
query_key_vectors = self._gather_by_expansion(query_key, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(values, sorted_bucket_idx_per_hash, num_hashes)
query_key_vectors = self._split_seq_length_dim_to(query_key_vectors, -1, self.chunk_length,
self.n_heads, self.d_head)
value_vectors = self._split_seq_length_dim_to(value_vectors, -1, self.chunk_length,
self.n_heads, self.d_head)
else:
sorted_bucket_idx_per_hash = torch.arange(seq_len, device=query_key.device).repeat(
batch_size, self.n_heads, 1
)
query_key_vectors = query_key
value_vectors = values
# scale the key vectors
key_vectors = query_key_vectors * (self.d_head ** -0.5)
# get attention probabilities
out_vectors, logits, attention_probs = self._attend(
query_key_vectors,
key_vectors,
value_vectors,
sorted_bucket_idx_per_hash,
attn_mask,
seq_len=seq_len
)
# free memory
# del query_key_vectors, key_vectors, value_vectors
# re-order out-vectors and logits
if self.chunk_length < seq_len:
# sort the clusters back to correct ordering
# out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)
out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)
logits = torch.gather(logits, 2, undo_sorted_bucket_idx)
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(out_vectors, num_hashes, seq_len, self.n_heads, self.d_head)
logits = self._split_seq_length_dim_to(logits, num_hashes, seq_len, self.n_heads, self.d_head)\
.unsqueeze(-1)
prob_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * prob_vectors, dim=2)
# free mem
# del prob_vectors
# del logits
assert out_vectors.shape == (
batch_size,
self.n_heads,
seq_len,
self.d_head
), "out_vectors have be of shape `[batch_size, n_head, seq_len, d_head]`."
out_vectors = self._merge_hidden_size_dims(out_vectors, self.n_heads, seq_len, self.d_head)
out_vectors = self.value_out(out_vectors)
return out_vectors, attention_probs
| 18,617
| 41.027088
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/static_dropout.py
|
import torch
from torch.autograd.function import InplaceFunction, Function
from torch.autograd import Variable
from itertools import repeat
import torch.nn as nn
class StaticDropoutFunction(Function):
@staticmethod
def forward(ctx, input, module, train=False):
ctx.train = train
ctx.module = module
ctx.p = module.p
if ctx.p == 0 or not ctx.train:
return input
if torch.numel(module.noise) != torch.numel(input):
module.gen_noise(input)
ctx.noise = module.noise
output = input * ctx.noise
return output
@staticmethod
def backward(ctx, grad_output):
#~ print("BACKWARD PASS")
ctx.module.noise_created = False
ctx.module.noise = None
if ctx.p > 0 and ctx.train:
return grad_output * ctx.noise, None, None
else:
return grad_output, None, None
class StaticDropout(nn.Module):
def __init__(self, p=0.5):
super(StaticDropout, self).__init__()
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
self.p = p
self.noise_created = False
def gen_noise(self, input):
self.noise = input.new().resize_as_(input)
if self.p == 1:
self.noise.fill_(0)
else:
self.noise.bernoulli_(1 - self.p).div_(1 - self.p)
self.noise = self.noise.expand_as(input)
self.noise_created = True
def forward(self, input):
if self.noise_created == False and self.training:
self.gen_noise(input)
#~ self.noise = input.new().resize_as_(input)
#~ if self.p == 1:
#~ self.noise.fill_(0)
#~ else:
#~ self.noise.bernoulli_(1 - self.p).div_(1 - self.p)
#~ self.noise = self.noise.expand_as(input)
#~ self.noise_created = True
return StaticDropoutFunction.apply(input, self, self.training)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ')'
| 2,235
| 28.421053
| 78
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/nce/nce_loss.py
|
"""NCE Implementation from https://github.com/Stonesjtu/Pytorch-NCE"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import onmt
class NCELoss(_Loss):
def __init__(self, hidden_size, output_size, noise_ratio=256, logz=1, label_smoothing=0.0):
super(NCELoss, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.noise_ratio = noise_ratio
self.padding_idx = onmt.constants.PAD
self.smoothing_value = label_smoothing / (self.noise_ratio+1)
self.confidence = 1.0 - label_smoothing
self.label_smoothing = label_smoothing
self.logz = 8
try:
from apex.contrib import xentropy as label_smoothing
self.softmax_xentropy = label_smoothing.SoftmaxCrossEntropyLoss.apply
except (ModuleNotFoundError, AttributeError):
print("Fast xentropy cannot be found. Reinstalling apex with --xentropy is probably required.")
exit()
def forward(self, model_outputs, targets, **kwargs):
if self.training:
scores_model_target = model_outputs['scores_model_target'].float()
scores_model_noise = model_outputs['scores_model_noise'].float()
logprob_noise_target, logprob_noise_noise = \
model_outputs['logprob_noise_target'].float(), \
model_outputs['logprob_noise_noise'].float()
# remove masking
gtruth = targets.view(-1)
non_pad_mask = gtruth.ne(self.padding_idx)
non_pad_indices = torch.nonzero(non_pad_mask, as_tuple=False).squeeze(1)
scores_model_target = scores_model_target.index_select(0, non_pad_indices) # bsz x 1
scores_model_noise = scores_model_noise.index_select(0, non_pad_indices) # bsz x K
logprob_noise_target = logprob_noise_target.index_select(0, non_pad_indices) # bsz x 1
logprob_noise_noise = logprob_noise_noise.index_select(0, non_pad_indices) # bsz x K
logit_model = torch.cat([scores_model_target, scores_model_noise], dim=1) - self.logz
logit_noise = torch.cat([logprob_noise_target, logprob_noise_noise], dim=1)
# prob_noise = logprob_noise.exp()
# logtrue = logprob.exp() / (prob_noise + self.noise_ratio * prob_noise)
# logtrue = torch.log(logtrue) # bsz x [K + 1]
logit_true = logit_model - logit_noise - math.log(self.noise_ratio)
# e^-x = e(-log_model + logit_noise + math.log(noise))
# 1 / p_model * p_noise * K
# e^-x + 1 = ( p_model + k p_noise ) / p_model
label = torch.zeros_like(logit_true).add_(self.smoothing_value)
label[:, 0].fill_(self.confidence)
loss = F.binary_cross_entropy_with_logits(logit_true, label, None, pos_weight=None, reduction='sum')
# loss.div_(self.noise_ratio + 1)
loss_data = loss.data.item()
# output_dict = {"loss": loss, "data": loss_data,
# "rev_loss": None, "rev_loss_data": None, "mirror_loss": None,
# "rec_loss": None, "rec_loss_data": None}
else:
# return cross entropy
logits = model_outputs['logprobs']
gtruth = targets.view(-1)
half_to_float = (logits.dtype == torch.half)
label_smoothing = self.label_smoothing if self.training else 0.0
loss = self.softmax_xentropy(logits.view(-1, logits.size(-1)), gtruth,
label_smoothing, self.padding_idx, half_to_float)
loss = loss.sum()
loss_data = loss.data.item()
output_dict = {"loss": loss, "data": loss_data,
"rev_loss": None, "rev_loss_data": None, "mirror_loss": None,
"rec_loss": None, "rec_loss_data": None}
return output_dict
| 4,012
| 41.691489
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/nce/nce_utils.py
|
import torch
import onmt
# from math import is_close
def build_unigram_noise(freq, alpha=1.0):
"""
:param alpha: scaling factor. 0.0 = uniform distribution
:param freq: torch tensor with frequencies of each word
:return: torch tensor - probability distribution (multinomial distribution)
"""
probs = freq.new(*freq.size()).copy_(freq)
# don't sample PAD or BOS
probs[onmt.constants.PAD] = 0
probs[onmt.constants.BOS] = 0
probs = probs / probs.sum()
probs = torch.pow(probs, alpha)
probs = probs / probs.sum()
return probs
| 580
| 23.208333
| 79
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/nce/nce_linear.py
|
"""An index linear class for generic NCE module"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import isclose
BACKOFF_PROB=1e-10
class AliasMultinomial(torch.nn.Module):
''' Alias sampling method to speedup multinomial sampling
The alias method treats multinomial sampling as a combination of uniform sampling and
bernoulli sampling. It achieves significant acceleration when repeatedly sampling from
the saved multinomial distribution.
Attributes:
- probs: the probability density of desired multinomial distribution
Refs:
- https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
'''
def __init__(self, probs):
super(AliasMultinomial, self).__init__()
assert isclose(probs.sum().item(), 1), 'The noise distribution must sum to 1'
cpu_probs = probs.cpu()
K = len(probs)
# such a name helps to avoid the namespace check for nn.Module
self_prob = [0] * K
self_alias = [0] * K
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for idx, prob in enumerate(cpu_probs):
self_prob[idx] = K*prob
if self_prob[idx] < 1.0:
smaller.append(idx)
else:
larger.append(idx)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self_alias[small] = large
self_prob[large] = (self_prob[large] - 1.0) + self_prob[small]
if self_prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller+larger:
self_prob[last_one] = 1
self.register_buffer('prob', torch.Tensor(self_prob))
self.register_buffer('alias', torch.LongTensor(self_alias))
def draw(self, *size):
"""Draw N samples from multinomial
Args:
- size: the output size of samples
"""
max_value = self.alias.size(0)
kk = self.alias.new(*size).random_(0, max_value).long().view(-1)
prob = self.prob[kk]
alias = self.alias[kk]
# b is whether a random number is greater than q
b = torch.bernoulli(prob).long()
oq = kk.mul(b)
oj = alias.mul(1 - b)
return (oq + oj).view(size)
class NCELinear(nn.Module):
def __init__(self, hidden_size, output_size, fix_norm=False,
noise_distribution=None, noise_ratio=32, shared_noise=False):
super().__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.linear = nn.Linear(hidden_size, output_size)
# self.weight = self.linear.weight
# self.bias = self.linear.bias
#self.noise_distribution = noise_distribution
self.fix_norm = fix_norm # shouldn't use it with large vocab size
self.noise_ratio = noise_ratio
self.shared_noise = shared_noise
noise_distribution.clamp_(min=BACKOFF_PROB)
self.alias = AliasMultinomial(noise_distribution)
self.register_buffer('logprob_noise', noise_distribution.log())
# if noise distribution is None then create an uniform distribution
# if self.noise_distribution is None:
# print("[INFO] Create an unigram distribution for NCE.")
def sample_noise(self, len_seq, bsz):
if self.shared_noise:
noise_size = (self.noise_ratio, )
noise_samples = self.alias.draw(self.noise_ratio) #.expand(*noise_size)
logprob_noise_noise = self.logprob_noise[noise_samples].view_as(noise_samples) # K
# noise_samples = noise_samples.expand(*noise_size).contiguous()
# logprob_noise_noise = logprob_noise_noise.expand(*noise_size).contiguous()
# return size [K] and [K]
return noise_samples, logprob_noise_noise
else:
noise_size = (len_seq * bsz, self.noise_ratio)
# [B x K]
noise_samples = self.alias.draw(*noise_size)
# [B x K]
logprob_noise_noise = self.logprob_noise[noise_samples.view(-1)].view_as(noise_samples)
return noise_samples, logprob_noise_noise
def _compute_sample_logits(self, input, weight, bias, target_idx, noise_idx):
"""
:param input: [bsz x hidden_size]
:param target_idx: [bsz]
:param noise_idx: [bsz x K]
:return:
"""
# bsz x hidden_size -> bsz x 1 x hidden_size
input = input.unsqueeze(1)
# bsz -> bsz x 1
target_idx = target_idx.unsqueeze(1)
indices = torch.cat([target_idx, noise_idx], dim=-1) # bsz x (K+1)
# bsz x (K+1) x H
emb_weights = weight.index_select(0, indices.view(-1)).view(*indices.size(), -1)
emb_bias = bias.index_select(0, indices.view(-1)).view(*indices.size())
# element wise multiplication into [bsz x (1 + K)]
logits = torch.sum(torch.mul(input, emb_weights), dim=2) + emb_bias
scores_model_target, scores_model_noise = logits[:, 0].unsqueeze(-1), logits[:, 1:]
# return size [bsz x 1], [bsz x K]
return scores_model_target, scores_model_noise
def _compute_sample_logits_shared(self, input, weight, bias, target_idx, noise_idx):
"""
:param input: [bsz x hidden_size]
:param target_idx: [bsz]
:param noise_idx: [K]
:return:
"""
emb_weights = weight.index_select(0, target_idx) # bsz x hidden_size
emb_bias = bias.index_select(0, target_idx) # bsz
# [bsz*len_seq x hidden_size] x [bsz*len_seq x hidden_size]
scores_model_target = torch.sum(torch.mul(input, emb_weights), dim=1) + emb_bias # bsz
# print(noise_idx.size()) should be K
noise_weights = weight.index_select(0, noise_idx) # K x hidden_size
noise_bias = bias.index_select(0, noise_idx) # K
# [bsz*len_seq x hidden_size] \times [hidden_size \times K] -> [bsz*len_seq x K]
scores_model_noise = torch.addmm(noise_bias, input, noise_weights.t())
# return [bsz*len_seq x 1] and [bsz*len_seq x K]
return scores_model_target.unsqueeze(1), scores_model_noise
def forward(self, output_dicts):
"""
:param output_dicts: dictionary
:return:
"""
input = output_dicts['hidden']
# for this output layer we need a target during training to compute scores for them specifically
target = output_dicts['target'] if 'target' in output_dicts else None
fix_norm = self.fix_norm
# for large vocbulary, this option will increase memory cost by H x V x 2
weight = F.normalize(self.linear.weight, dim=-1) if fix_norm else self.linear.weight
bias = self.linear.bias
if self.training:
seq_len, bsz = input.size(0), input.size(1)
# reshape input and target to 2D and 1D
input = input.view(seq_len * bsz, input.size(-1))
target = target.view(seq_len * bsz)
# sample noises from the noise distribution
noises, logprob_noise_noise = self.sample_noise(seq_len, bsz)
# logprob_noise_noise = self.logprob_noise[noises.view(-1)].view_as(noises) # bsz*len_seq x K
logprob_noise_target = self.logprob_noise[target].unsqueeze(1) # bsz*len_seq x 1
if self.shared_noise:
# compute scores for targets and noises
scores_model_target, scores_model_noise = self._compute_sample_logits_shared(input, weight, bias,
target, noises)
# [1 x K] to [bsz*len_seq x K]
# scores_model_noise = scores_model_noise.expand(scores_model_target.size(0), self.noise_ratio)
logprob_noise_noise = logprob_noise_noise.\
unsqueeze(0).expand(scores_model_target.size(0), self.noise_ratio)
else:
scores_model_target, scores_model_noise = self._compute_sample_logits(input, weight, bias,
target, noises)
# logprob_noise_noise should have size [len_seq*bsz x noise_ratio] already
output_dicts['logprob_noise_noise'] = logprob_noise_noise
output_dicts['logprob_noise_target'] = logprob_noise_target
output_dicts['scores_model_target'] = scores_model_target
output_dicts['scores_model_noise'] = scores_model_noise
# return scores_model_target, scores_model_noise, logprob_noise_target, logprob_noise_noise
else:
logits = F.linear(input, weight, bias)
output_dicts['logits'] = logits
return output_dicts
| 9,215
| 38.553648
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/nce/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_partitioned/linear.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import math
class MPLinear(torch.nn.Module):
"""
A linear layer with partitioned weights
"""
# TODO: write gradcheck testing
def __init__(self, input_size, output_size, factor_size):
super().__init__()
self.factor_size = factor_size
self.input_size = input_size
self.output_size = output_size
self.shared_weight = torch.nn.Parameter(torch.Tensor(output_size * input_size, factor_size))
self.shared_bias = torch.nn.Parameter(torch.Tensor(output_size, factor_size))
self.reset_parameters()
def reset_parameters(self, init='normal'):
if init == 'normal':
std_ = math.sqrt(2.0 / (self.input_size + self.output_size))
torch.nn.init.normal_(self.shared_weight, 0.0, std_)
else:
std_ = math.sqrt(6.0 / (self.input_size + self.output_size))
torch.nn.init.uniform_(self.shared_weight, -std_, std_)
nn.init.constant_(self.shared_bias, 0.)
# for batch ensemble we init r_i and s_i with random sign vectors
def forward(self, input, factor):
"""
:param input: T x B x H
:param indices: H (shared factor for the whole minibatch)
:return:
"""
assert factor.ndim == 1 and factor.size(0) == self.factor_size
weight = torch.mv(self.shared_weight, factor).view(self.output_size, self.input_size)
bias = torch.mv(self.shared_bias, factor)
input = F.linear(input, weight, bias)
return input
# Multilingual Factorized Weight
class MPPositionWiseFeedForward(torch.nn.Module):
"""
Multilingually Partitioned Position Wise Feedforward model
"""
def __init__(self, model_size, inner_size, dropout=0., variational=False, activation='relu',
factor_size=8, rank_size=-1):
super().__init__()
self.input_linear = MPLinear(model_size, inner_size, factor_size)
self.output_linear = MPLinear(inner_size, model_size, factor_size)
self.variational = variational
self.dropout = dropout
self.activation = activation
self.factor_size = factor_size
if rank_size == -1:
rank_size = factor_size
self.rank_size = rank_size
# self.factor_to_rank = nn.Linear(self.factor_size, self.rank_size)
if self.variational:
from onmt.modules.dropout import variational_dropout
self.dropout_function = variational_dropout
else:
self.dropout_function = F.dropout
def forward(self, hidden, factor):
# factor = self.factor_to_rank(factor)
hidden = self.input_linear(hidden, factor)
hidden = F.relu(hidden, inplace=True)
hidden = self.dropout_function(hidden, p=self.dropout, training=self.training)
hidden = self.output_linear(hidden, factor)
return hidden
def reset_parameters(self, init='normal'):
self.input_linear.reset_parameters(init)
self.output_linear.reset_parameters(init)
if __name__ == "__main__":
bsz = 16
seq_len = 6
input_size = 16
output_size = 32
ensemble = 72
rank = 2
input = torch.randn((seq_len, bsz, input_size), requires_grad=True)
weight = torch.randn((output_size, input_size), requires_grad=True)
bias = torch.randn((output_size,), requires_grad=True)
r = torch.randn((bsz, rank, input_size), requires_grad=True)
s = torch.randn((bsz, rank, output_size), requires_grad=True)
function = BatchEnsembleLinearFunction.apply
input = input.double().cuda()
weight = weight.double().cuda()
bias = bias.double().cuda()
r = r.double().cuda()
s = s.double().cuda()
print("Gradchecking ...")
torch.autograd.gradcheck(function, (input, weight, bias, r, s))
| 3,881
| 30.306452
| 100
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_partitioned/relative_attention.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
from ..optimized.relative_self_attention_func import relative_self_attn_func
class MPRelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., factor_size=8, rank_size=-1):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.factor_size = factor_size
if rank_size == -1:
rank_size = factor_size
self.rank_size = rank_size
self.factor_to_rank = nn.Linear(self.factor_size, self.rank_size)
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = True
self.in_proj_weight = Parameter(torch.Tensor(embed_dim * 3 * embed_dim, factor_size))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim * embed_dim, factor_size))
self.pos_proj_weight = Parameter(torch.Tensor(embed_dim * embed_dim, factor_size))
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim, factor_size))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim, factor_size))
self.pos_proj_bias = Parameter(torch.Tensor(embed_dim, factor_size))
self.r_w_bias = nn.Parameter(torch.Tensor(self.num_heads * self.head_dim, factor_size))
self.r_r_bias = nn.Parameter(torch.Tensor(self.num_heads * self.head_dim, factor_size))
self.reset_parameters()
self.attn_func = relative_self_attn_func
def reset_parameters(self, init='normal'):
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
else: # xavier uniform
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
nn.init.uniform_(self.pos_proj_weight, -std_, std_)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, 0.02)
nn.init.normal_(self.r_r_bias, 0.0, 0.02)
def forward(self, input, pos, factor=None, key_padding_mask=None, attn_mask=None, mems=None,
incremental=False, incremental_cache=None):
# factor = self.factor_to_rank(factor)
embed_dim = self.embed_dim
in_proj_weight = torch.mv(self.in_proj_weight, factor).view(embed_dim * 3, embed_dim)
pos_proj_weight = torch.mv(self.pos_proj_weight, factor).view(embed_dim, embed_dim)
out_proj_weight = torch.mv(self.out_proj_weight, factor).view(embed_dim, embed_dim)
in_proj_bias = torch.mv(self.in_proj_bias, factor)
pos_proj_bias = torch.mv(self.pos_proj_bias, factor)
out_proj_bias = torch.mv(self.out_proj_bias, factor)
r_w_bias = torch.mv(self.r_w_bias, factor).view(self.num_heads, self.head_dim)
r_r_bias = torch.mv(self.r_r_bias, factor).view(self.num_heads, self.head_dim)
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
mask = mask.squeeze(0).transpose(0, 1)
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(-1)
else:
mask = None
is_training = self.training
outputs, coverage = self.attn_func(input, pos, attn_mask is not None, is_training, self.num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_w_bias, r_r_bias,
mask, self.dropout,
incremental, incremental_cache, False, False)
# last False is double precision
return outputs, coverage
| 4,563
| 41.654206
| 106
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_partitioned/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_partitioned/encdec_attention.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
from ..optimized.encdec_attention_func import encdec_attn_func
class MPEncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0., factor_size=8, rank_size=-1):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
self.factor_size = factor_size
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
if rank_size == -1:
rank_size = factor_size
self.rank_size = rank_size
# factor size is the size of the language factor
# rank size is to reduce the language factor size to a manageable number of parameters
self.factor_to_rank = nn.Linear(self.factor_size, self.rank_size)
self.in_proj_weight_q = Parameter(torch.Tensor(embed_dim * embed_dim, rank_size))
self.in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim * embed_dim, rank_size))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim * embed_dim, rank_size))
self.in_proj_bias_q = None
self.in_proj_bias_kv = None
self.out_proj_bias = None
self.attn_func = encdec_attn_func
try:
# the fast one requires apex and does not work with incremental so careful
from ..optimized.encdec_attention_func_bias import fast_encdec_attn_func
self.attn_func_fast = fast_encdec_attn_func
self.optimized = 1
except ModuleNotFoundError as e:
self.optimized = 2
self.attn_func_fast = None
self.reset_parameters()
def reset_parameters(self, init='normal'):
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
else: # xavier uniform
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight_q, -std_, std_)
nn.init.uniform_(self.in_proj_weight_kv, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
def forward(self, query, key, value, src_factor=None, tgt_factor=None, attn_mask=None,
incremental=False, incremental_cache=None):
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
time_masking = False
len_key = key.size(0)
# tgt_factor = self.factor_to_rank(tgt_factor)
# src_factor = self.factor_to_rank(src_factor)
in_proj_weight_q = torch.mv(self.in_proj_weight_q, tgt_factor).view(self.embed_dim, self.embed_dim)
in_proj_weight_kv = torch.mv(self.in_proj_weight_kv, src_factor).view(self.embed_dim * 2, self.embed_dim)
out_proj_weight = torch.mv(self.out_proj_weight, tgt_factor).view(self.embed_dim, self.embed_dim)
if self.optimized == 1 and (self.training and not incremental) and len_key <= 1024 \
and query.is_cuda and in_proj_weight_q.dtype == torch.half:
if attn_mask is not None:
if attn_mask.dim() == 3:
attn_mask = attn_mask.squeeze(1)
attn_mask = attn_mask.byte()
outputs = self.attn_func_fast(time_masking, is_training, self.num_heads,
query, key.type_as(in_proj_weight_q),
in_proj_weight_q, in_proj_weight_kv, out_proj_weight,
attn_mask, self.dropout)
coverage = None
# during evaluation we use the python binding which is safer ....
else:
outputs, coverage, = self.attn_func(time_masking, is_training,
self.num_heads, query, key,
in_proj_weight_q, in_proj_weight_kv,
out_proj_weight, attn_mask, self.dropout,
incremental, incremental_cache)
# TODO: add incremental cache
return outputs, coverage
| 4,713
| 41.468468
| 113
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_factorized/multilingual_adapters.py
|
# Implementation of the multilingual adapter as in Bapna et. al 2019
import torch
from torch.nn import Parameter
import torch.nn.functional as F
import math
from ..optimized.feed_forward import PositionWiseFeedForward
from ..layer_norm import LayerNorm
def xavier_normal(weight, gain=1.0):
fan_in, fan_out = weight.size(-2), weight.size(-1)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
with torch.no_grad():
weight.normal_(0, std)
class MultilingualAdapter(torch.nn.Module):
def __init__(self, model_size, bottleneck_size, n_languages=1, dropout=0.0):
super(MultilingualAdapter, self).__init__()
self.all_modules = torch.nn.ModuleList()
for i in range(n_languages):
layer_norm = LayerNorm(model_size)
feed_forward = PositionWiseFeedForward(model_size, bottleneck_size, dropout=dropout)
adapter = torch.nn.Sequential(layer_norm, feed_forward)
self.all_modules.append(adapter)
def forward(self, input, lang=None):
"""
:param input: TxBxN Tensor
:param lang: [1] Tensor
:return:
"""
assert lang.numel() == 1
index = lang.item()
adapter = self.all_modules[index]
# normalize -> transform -> residual
return input + adapter(input)
| 1,342
| 25.333333
| 96
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_factorized/linear.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.cuda.amp import autocast
class MultilingualLinear(torch.nn.Module):
def __init__(self, input_size, output_size, n_factors=1, rank=1,
use_multiplicative=False,
weight_drop=0.0, mfw_activation="none", no_bias=False):
super().__init__()
self.use_multiplicative = use_multiplicative
self.weight_drop = weight_drop
self.no_bias = no_bias
assert (not self.no_bias) or self.use_multiplicative
self.weight = torch.nn.Parameter(torch.Tensor(output_size, input_size))
self.bias = torch.nn.Parameter(torch.Tensor(output_size))
if not self.no_bias:
self.r = torch.nn.Parameter(torch.Tensor(n_factors, rank, output_size))
self.s = torch.nn.Parameter(torch.Tensor(n_factors, rank, input_size))
if use_multiplicative:
self.rm = torch.nn.Parameter(torch.Tensor(n_factors, 1, output_size))
self.sm = torch.nn.Parameter(torch.Tensor(n_factors, 1, input_size))
self.reset_parameters()
self.mfw_activation = mfw_activation.lower()
def reset_parameters(self, init='normal'):
if init == 'normal':
torch.nn.init.xavier_normal_(self.weight)
else:
torch.nn.init.xavier_uniform_(self.weight)
# for batch ensemble we init r_i and s_i with random sign vectors
if self.use_multiplicative:
torch.nn.init.constant_(self.rm, 1.0)
torch.nn.init.constant_(self.sm, 1.0)
if not self.no_bias:
torch.nn.init.normal_(self.r, 0.0, 0.02)
torch.nn.init.normal_(self.s, 0.0, 0.02)
def freeze(self):
if self.use_multiplicative:
self.rm.requires_grad = False
self.sm.requires_grad = False
if not self.no_bias:
self.r.requires_grad = False
self.s.requires_grad = False
def unfreeze(self):
if self.use_multiplicative:
self.rm.requires_grad = True
self.sm.requires_grad = True
if not self.no_bias:
self.r.requires_grad = True
self.s.requires_grad = True
def get_weight(self, indices, factorize=True):
weight_ = self.weight
if indices is None:
return weight_, self.bias
if factorize:
weight_ = F.dropout(self.weight, p=self.weight_drop, training=self.training)
if indices.size(0) == 1 and len(indices.shape) == 1:
if self.use_multiplicative:
rm = torch.index_select(self.rm, 0, indices).squeeze(0)
sm = torch.index_select(self.sm, 0, indices).squeeze(0)
weight_ = weight_ * torch.sum(torch.bmm(rm.unsqueeze(-1), sm.unsqueeze(1)), dim=0)
if self.mfw_activation == "none":
weight_ = weight_
elif self.mfw_activation == "gelu":
weight_ = F.gelu(weight_)
elif self.mfw_activation == "silu":
weight_ = F.silu(weight_)
else:
raise NotImplementedError
if not self.no_bias:
r = torch.index_select(self.r, 0, indices).squeeze(0)
s = torch.index_select(self.s, 0, indices).squeeze(0)
weight_mask = torch.bmm(r.unsqueeze(-1), s.unsqueeze(1))
weight_mask = torch.sum(weight_mask, dim=0)
weight_ = weight_ + weight_mask
return weight_, self.bias
def forward(self, input, indices=None, factorize=True):
"""
:param factorize:
:param input: T x B x H
:param indices: T x B or B
:return:
"""
if indices.size(0) == 1 and len(indices.shape) == 1:
weight_, bias = self.get_weight(indices, factorize=factorize)
input = F.linear(input, weight_, self.bias)
return input
else:
print(indices.size(), input.size())
raise NotImplementedError
# Multilingual Factorized Weight
class MFWPositionWiseFeedForward(torch.nn.Module):
"""
Position Wise Feedforward model with factorized weights
"""
def __init__(self, model_size, inner_size, dropout=0., variational=False, activation='relu',
n_languages=1, rank=1, use_multiplicative=False, weight_drop=0.0, mfw_activation='none',
glu=False, no_bias=False):
super().__init__()
self.variational = variational
self.dropout = dropout
self.activation = activation
self.n_languages = n_languages
self.weight_drop = weight_drop
self.glu = glu
self.dropout_residual = False
self.fused = False
self.input_linear = MultilingualLinear(model_size, inner_size * (2 if glu else 1), n_languages,
rank, use_multiplicative, weight_drop, mfw_activation=mfw_activation,
no_bias=no_bias)
self.output_linear = MultilingualLinear(inner_size, model_size, n_languages,
rank, use_multiplicative, weight_drop, mfw_activation=mfw_activation,
no_bias=no_bias)
if self.activation == 'relu':
self.act = nn.ReLU(inplace=True)
elif self.activation == 'gelu':
self.act = nn.GELU()
elif self.activation in ['silu', 'swish']:
self.act = nn.SiLU(inplace=True)
if self.variational:
from onmt.modules.dropout import variational_dropout
self.dropout_function = variational_dropout
else:
self.dropout_function = F.dropout
# At the moment fused mlp is supported for RELU, SiLU, Swish, GELU and AGELU (approximated GELU)
if not self.glu and \
self.activation in ['relu', 'silu', 'swish', 'gelu', 'agelu'] and not self.variational:
if self.activation == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation in ['silu', 'swish']:
from onmt.modules.mlp.mlp import mlp_silu_function
if mlp_silu_function is not None:
self.fused_function = mlp_silu_function
self.fused = True
elif self.activation == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
elif self.activation == 'agelu':
from onmt.modules.mlp.mlp import mlp_agelu_function
if mlp_agelu_function is not None:
self.fused_function = mlp_agelu_function
self.fused = True
def freeze(self):
self.input_linear.freeze()
self.output_linear.freeze()
def unfreeze(self):
self.input_linear.unfreeze()
self.output_linear.unfreeze()
def forward(self, hidden, indices=None, factorize=True, **kwargs):
"""
:param factorize:
:param hidden: tensor [T x B x H]
:param indices: tensor [1]
:return:
"""
if self.fused and hidden.is_cuda:
in_weight, in_bias = self.input_linear.get_weight(indices, factorize=factorize)
out_weight, out_bias = self.output_linear.get_weight(indices, factorize=factorize)
with autocast(enabled=False):
input = hidden
weights = [in_weight.half(), out_weight.half()]
biases = [in_bias.half(), out_bias.half()]
seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
recompute = False
dropout = self.dropout if self.training else 0.0
hidden = self.fused_function(dropout, recompute, input.half().view(seq_len * bsz, -1),
*weights, *biases).type_as(input)
hidden = hidden.view(seq_len, bsz, hidden_size)
return hidden
else:
hidden = self.input_linear(hidden, indices)
if self.glu:
hidden, gate = hidden.chunk(2, dim=-1)
hidden = self.act(hidden) * gate
else:
hidden = self.act(hidden)
hidden = self.dropout_function(hidden, p=self.dropout, training=self.training)
hidden = self.output_linear(hidden, indices)
return hidden
def reset_parameters(self, init='normal'):
self.input_linear.reset_parameters(init)
self.output_linear.reset_parameters(init)
| 9,046
| 35.926531
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_factorized/relative_attention.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
from ..optimized.relative_self_attention_func import relative_self_attn_func
class MFWRelativeSelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., learnable_pos=False, max_pos=0,
n_languages=1, rank=1, use_multiplicative=False, weight_drop=0.0,
mfw_activation="none", no_bias=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = True
self.use_multiplicative = use_multiplicative
self.weight_drop = weight_drop
self.no_bias = no_bias
self.learnable_pos = learnable_pos
assert (not self.no_bias) or self.use_multiplicative
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
if self.learnable_pos:
# If using learnable position embeddings, then assign embeddings for 2N + 1 max positions
# (embeddings are shared across heads)
assert max_pos >= 1
self.pos_emb = nn.Embedding(2 * max_pos + 1, self.head_dim)
self.pos_proj_weight, self.pos_proj_bias = None, None
else:
self.pos_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.pos_proj_bias = Parameter(torch.Tensor(embed_dim))
if not self.no_bias:
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, 3 * embed_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if not self.learnable_pos:
self.r_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if use_multiplicative:
rank = 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, 3 * embed_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if not self.learnable_pos:
self.rm_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.sm_p = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_w_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.r_r_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
self.reset_parameters()
self.attn_func = relative_self_attn_func
self.mfw_activation = mfw_activation.lower()
def reset_parameters(self, init='normal'):
# nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
# nn.init.xavier_uniform_(self.out_proj_weight)
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
if not self.learnable_pos:
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
else:
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
if not self.learnable_pos:
nn.init.uniform_(self.pos_proj_weight, -std_, std_)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
if not self.learnable_pos:
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, 0.02)
nn.init.normal_(self.r_r_bias, 0.0, 0.02)
if not self.no_bias:
nn.init.normal_(self.r_i, 0.0, 0.02)
nn.init.normal_(self.s_i, 0.0, 0.02)
if not self.learnable_pos:
nn.init.normal_(self.r_p, 0.0, 0.02)
nn.init.normal_(self.s_p, 0.0, 0.02)
nn.init.normal_(self.r_o, 0.0, 0.02)
nn.init.normal_(self.s_o, 0.0, 0.02)
if self.use_multiplicative:
nn.init.constant_(self.rm_i, 1.0)
nn.init.constant_(self.sm_i, 1.0)
if not self.learnable_pos:
nn.init.constant_(self.rm_p, 1.0)
nn.init.constant_(self.sm_p, 1.0)
nn.init.constant_(self.rm_o, 1.0)
nn.init.constant_(self.sm_o, 1.0)
def freeze(self):
if not self.no_bias:
self.r_i.requires_grad = False
self.s_i.requires_grad = False
if not self.learnable_pos:
self.r_p.requires_grad = False
self.s_p.requires_grad = False
self.r_o.requires_grad = False
self.s_o.requires_grad = False
if self.use_multiplicative:
self.rm_i.requires_grad = False
self.sm_i.requires_grad = False
if not self.learnable_pos:
self.rm_p.requires_grad = False
self.sm_p.requires_grad = False
self.rm_o.requires_grad = False
self.sm_o.requires_grad = False
def unfreeze(self):
if not self.no_bias:
self.r_i.requires_grad = True
self.s_i.requires_grad = True
if not self.learnable_pos:
self.r_p.requires_grad = True
self.s_p.requires_grad = True
self.r_o.requires_grad = True
self.s_o.requires_grad = True
if self.use_multiplicative:
self.rm_i.requires_grad = True
self.sm_i.requires_grad = True
if not self.learnable_pos:
self.rm_p.requires_grad = True
self.sm_p.requires_grad = True
self.rm_o.requires_grad = True
self.sm_o.requires_grad = True
def forward(self, input, pos, indices=None, key_padding_mask=None, attn_mask=None,
incremental=False, incremental_cache=None, recompute=False, factorize=True, **kwargs):
in_proj_weight = self.in_proj_weight
out_proj_weight = self.out_proj_weight
pos_proj_weight = self.pos_proj_weight
# option to disable factorization
if factorize:
# weight dropout
in_proj_weight = F.dropout(self.in_proj_weight, p=self.weight_drop, training=self.training)
out_proj_weight = F.dropout(self.out_proj_weight, p=self.weight_drop, training=self.training)
if not self.learnable_pos:
pos_proj_weight = F.dropout(self.pos_proj_weight, p=self.weight_drop, training=self.training)
else:
pos_proj_weight = None
if self.use_multiplicative:
rm_i = torch.index_select(self.rm_i, 0, indices).squeeze(0)
sm_i = torch.index_select(self.sm_i, 0, indices).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, indices).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, indices).squeeze(0)
if not self.learnable_pos:
rm_p = torch.index_select(self.rm_p, 0, indices).squeeze(0)
sm_p = torch.index_select(self.sm_p, 0, indices).squeeze(0)
in_scale = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
in_proj_weight = in_proj_weight * in_scale
out_proj_weight = out_proj_weight * torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
if not self.learnable_pos:
pos_proj_weight = pos_proj_weight * torch.bmm(rm_p.unsqueeze(-1), sm_p.unsqueeze(1)).sum(dim=0)
if not self.no_bias:
if indices.size(0) == 1 and len(indices.shape) == 1:
r_i = torch.index_select(self.r_i, 0, indices).squeeze(0)
s_i = torch.index_select(self.s_i, 0, indices).squeeze(0)
if not self.learnable_pos:
r_p = torch.index_select(self.r_p, 0, indices).squeeze(0)
s_p = torch.index_select(self.s_p, 0, indices).squeeze(0)
r_o = torch.index_select(self.r_o, 0, indices).squeeze(0)
s_o = torch.index_select(self.s_o, 0, indices).squeeze(0)
else:
print(indices.size(), input.size())
raise NotImplementedError
in_proj_weight = in_proj_weight + torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
if not self.learnable_pos:
pos_proj_weight = pos_proj_weight + torch.bmm(r_p.unsqueeze(-1), s_p.unsqueeze(1)).sum(dim=0)
out_proj_weight = out_proj_weight + torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
if self.mfw_activation == "none":
in_proj_weight = in_proj_weight
elif self.mfw_activation == "gelu":
in_proj_weight = F.gelu(in_proj_weight)
pos_proj_weight = F.gelu(pos_proj_weight) if not self.learnable_pos else None
out_proj_weight = F.gelu(out_proj_weight)
elif self.mfw_activation == "silu":
in_proj_weight = F.silu(in_proj_weight)
pos_proj_weight = F.silu(pos_proj_weight) if not self.learnable_pos else None
out_proj_weight = F.silu(out_proj_weight)
else:
raise NotImplementedError
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
mask = mask.squeeze(0).transpose(0, 1)
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(-1)
else:
mask = None
is_training = self.training
if self.learnable_pos:
# [len_q x len_k] -> [len_q x len_k x head_dim]
pos = self.pos_emb(pos)
outputs, coverage = self.attn_func(input, pos, attn_mask is not None, is_training, self.num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
self.in_proj_bias, self.out_proj_bias, self.pos_proj_bias,
self.r_w_bias, self.r_r_bias,
mask, self.dropout,
incremental, incremental_cache, False,
self.learnable_pos, True, recompute)
# last False is double precision
return outputs, coverage
if __name__ == "__main__":
bsz = 4
seq_len_q = 4
seq_len_kv = 7
embed_dim = 32
n_heads = 4
output_size = 32
n_languages = 7
class TestNetwork(nn.Module):
def __init__(self):
super(TestNetwork, self).__init__()
self.func = relative_self_attn_func
self.n_heads = n_heads
def forward(self, q, r, input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias):
use_time_mask = False
mask = None
is_training = True
incremental = False
incremental_cache = None
double_precision = True
dropout_prob = 0.0
heads = self.n_heads
output, coverage = self.func(q, r, use_time_mask, is_training, heads,
input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias,
mask, dropout_prob,
incremental, incremental_cache, double_precision)
return output
r_w_bias = nn.Parameter(torch.Tensor(n_heads, embed_dim//n_heads)).double().cuda()
r_r_bias = nn.Parameter(torch.Tensor(n_heads, embed_dim//n_heads)).double().cuda()
in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)).double().cuda()
pos_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)).double().cuda()
in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)).double().cuda()
pos_proj_bias = Parameter(torch.Tensor(embed_dim)).double().cuda()
out_proj_bias = Parameter(torch.Tensor(embed_dim)).double().cuda()
r_i = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_i = torch.nn.Parameter(torch.Tensor(bsz, 3 * embed_dim)).double().cuda()
r_p = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_p = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
r_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
s_o = torch.nn.Parameter(torch.Tensor(bsz, embed_dim)).double().cuda()
std_ = math.sqrt(2.0 / (embed_dim + embed_dim))
nn.init.normal_(in_proj_weight, 0.0, std_)
nn.init.normal_(pos_proj_weight, 0.0, std_)
nn.init.normal_(out_proj_weight, 0.0, std_)
nn.init.normal_(r_w_bias, 0.0, std_)
nn.init.normal_(r_r_bias, 0.0, std_)
torch.nn.init.constant_(in_proj_bias, 0.0)
torch.nn.init.constant_(out_proj_bias, 0.0)
torch.nn.init.constant_(pos_proj_bias, 0.0)
with torch.no_grad():
r_i.bernoulli_(0.5).mul_(-2).add_(1)
s_i.bernoulli_(0.5).mul_(-2).add_(1)
r_p.bernoulli_(0.5).mul_(-2).add_(1)
s_p.bernoulli_(0.5).mul_(-2).add_(1)
r_o.bernoulli_(0.5).mul_(-2).add_(1)
s_o.bernoulli_(0.5).mul_(-2).add_(1)
model = TestNetwork()
q = torch.randn((seq_len_q, bsz, embed_dim), requires_grad=True)
r = torch.randn((seq_len_kv, bsz, embed_dim), requires_grad=False)
model = model.double().cuda()
q = q.double().cuda()
r = r.double().cuda()
print("Gradchecking ...")
torch.autograd.gradcheck(model, (q, r, in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_i, s_i, r_o, s_o, r_p, s_p,
r_w_bias, r_r_bias))
print("Gradcheck successful!!!")
| 15,431
| 43.472622
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_factorized/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/multilingual_factorized/encdec_attention.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import math
from ..optimized.encdec_attention_func import encdec_attn_func
class MFWEncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0.,
n_languages=1, rank=1,
use_multiplicative=False, no_bias=False,
weight_drop=0.0, mfw_activation="none"):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
self.use_multiplicative = use_multiplicative
self.weight_drop = weight_drop
self.no_bias = no_bias
assert (not self.no_bias) or self.use_multiplicative
self.in_proj_weight_q = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if not self.no_bias:
self.r_q = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_q = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_kv = torch.nn.Parameter(torch.Tensor(n_languages, rank, 2 * embed_dim))
self.s_kv = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, embed_dim))
if use_multiplicative:
self.ml_rank = 1
self.rm_q = torch.nn.Parameter(torch.Tensor(n_languages, self.ml_rank, embed_dim))
self.sm_q = torch.nn.Parameter(torch.Tensor(n_languages, self.ml_rank, embed_dim))
self.rm_kv = torch.nn.Parameter(torch.Tensor(n_languages, self.ml_rank, 2 * embed_dim))
self.sm_kv = torch.nn.Parameter(torch.Tensor(n_languages, self.ml_rank, embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, self.ml_rank, embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, self.ml_rank, embed_dim))
self.in_proj_bias_q = None
self.in_proj_bias_kv = None
self.out_proj_bias = None
self.attn_func = encdec_attn_func
self.mfw_activation = mfw_activation.lower()
self.reset_parameters()
def reset_parameters(self, init='normal'):
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
else: # xavier uniform
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight_q, -std_, std_)
nn.init.uniform_(self.in_proj_weight_kv, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
if not self.no_bias:
nn.init.normal_(self.r_q, 0.0, 0.02)
nn.init.normal_(self.s_q, 0.0, 0.02)
nn.init.normal_(self.r_kv, 0.0, 0.02)
nn.init.normal_(self.s_kv, 0.0, 0.02)
nn.init.normal_(self.r_o, 0.0, 0.02)
nn.init.normal_(self.s_o, 0.0, 0.02)
if self.use_multiplicative:
nn.init.constant_(self.rm_q, 1.0)
nn.init.constant_(self.sm_q, 1.0)
nn.init.constant_(self.rm_kv, 1.0)
nn.init.constant_(self.sm_kv, 1.0)
nn.init.constant_(self.rm_o, 1.0)
nn.init.constant_(self.sm_o, 1.0)
def freeze(self):
if not self.no_bias:
self.r_q.requires_grad = False
self.s_q.requires_grad = False
self.r_kv.requires_grad = False
self.s_kv.requires_grad = False
self.r_o.requires_grad = False
self.s_o.requires_grad = False
if self.use_multiplicative:
self.rm_q.requires_grad = False
self.sm_q.requires_grad = False
self.rm_kv.requires_grad = False
self.sm_kv.requires_grad = False
self.rm_o.requires_grad = False
self.sm_o.requires_grad = False
def unfreeze(self):
if not self.no_bias:
self.r_q.requires_grad = True
self.s_q.requires_grad = True
self.r_kv.requires_grad = True
self.s_kv.requires_grad = True
self.r_o.requires_grad = True
self.s_o.requires_grad = True
if self.use_multiplicative:
self.rm_q.requires_grad = True
self.sm_q.requires_grad = True
self.rm_kv.requires_grad = True
self.sm_kv.requires_grad = True
self.rm_o.requires_grad = True
self.sm_o.requires_grad = True
def forward(self, query, key, value, src_indices=None, tgt_indices=None, attn_mask=None,
incremental=False, incremental_cache=None, factorize=True, **kwargs):
indices = tgt_indices
bsz = query.size(1)
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
time_masking = False
recompute = False
# dropping the main weights during training
in_proj_weight_q = self.in_proj_weight_q
in_proj_weight_kv = self.in_proj_weight_kv
out_proj_weight = self.out_proj_weight
if factorize:
in_proj_weight_q = F.dropout(self.in_proj_weight_q, p=self.weight_drop, training=self.training)
in_proj_weight_kv = F.dropout(self.in_proj_weight_kv, p=self.weight_drop, training=self.training)
out_proj_weight = F.dropout(self.out_proj_weight, p=self.weight_drop, training=self.training)
if self.use_multiplicative:
# multiply main weights with extra weights
rm_q = torch.index_select(self.rm_q, 0, indices).squeeze(0)
sm_q = torch.index_select(self.sm_q, 0, src_indices).squeeze(0)
rm_kv = torch.index_select(self.rm_kv, 0, indices).squeeze(0)
sm_kv = torch.index_select(self.sm_kv, 0, src_indices).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, indices).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, src_indices).squeeze(0)
in_proj_weight_q = in_proj_weight_q * torch.bmm(rm_q.unsqueeze(-1), sm_q.unsqueeze(1)).sum(dim=0)
in_proj_weight_kv = in_proj_weight_kv * torch.bmm(rm_kv.unsqueeze(-1), sm_kv.unsqueeze(1)).sum(dim=0)
out_proj_weight = out_proj_weight * torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
# adding main weights with extra weights
# sum(dim=0) sums over the rank dimension
if not self.no_bias:
if indices.size(0) == 1 and len(indices.shape) == 1:
r_q = torch.index_select(self.r_q, 0, indices).squeeze(0)
s_q = torch.index_select(self.s_q, 0, src_indices).squeeze(0)
r_kv = torch.index_select(self.r_kv, 0, indices).squeeze(0)
s_kv = torch.index_select(self.s_kv, 0, src_indices).squeeze(0)
r_o = torch.index_select(self.r_o, 0, indices).squeeze(0)
s_o = torch.index_select(self.s_o, 0, src_indices).squeeze(0)
else:
print(indices.size(), input.size())
raise NotImplementedError
in_proj_weight_q = in_proj_weight_q + torch.bmm(r_q.unsqueeze(-1), s_q.unsqueeze(1)).sum(dim=0)
in_proj_weight_kv = in_proj_weight_kv + torch.bmm(r_kv.unsqueeze(-1), s_kv.unsqueeze(1)).sum(dim=0)
out_proj_weight = out_proj_weight + torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
if self.mfw_activation == "none":
in_proj_weight_q = in_proj_weight_q
elif self.mfw_activation == "gelu":
in_proj_weight_q = F.gelu(in_proj_weight_q)
in_proj_weight_kv = F.gelu(in_proj_weight_kv)
out_proj_weight = F.gelu(out_proj_weight)
elif self.mfw_activation == "silu":
in_proj_weight_q = F.silu(in_proj_weight_q)
in_proj_weight_kv = F.silu(in_proj_weight_kv)
out_proj_weight = F.silu(out_proj_weight)
else:
raise NotImplementedError
outputs, coverage, = self.attn_func(recompute, is_training,
self.num_heads, query, key,
in_proj_weight_q, in_proj_weight_kv,
out_proj_weight, attn_mask, self.dropout,
incremental, incremental_cache,
False, None, None,
False, True)
# TODO: add incremental cache
return outputs, coverage
| 9,527
| 45.028986
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/adaptive/feed_forward.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout
class AdaptiveFeedForward(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, inner_size, factor_size, dropout=0., variational=False, activation='relu',
adaptive_type='shared'):
super().__init__()
self.model_size = model_size
self.inner_size = inner_size
self.factor_size = factor_size
self.dropout = dropout
self.bias = True
self.variational = variational
self.activation = activation
self.adaptive_type = adaptive_type
self.factor_map = nn.Linear(self.model_size, self.factor_size)
assert self.activation == 'relu'
self.in_proj_weight = Parameter(torch.Tensor(inner_size, model_size, factor_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, inner_size, factor_size))
self.in_proj_bias = Parameter(torch.Tensor(inner_size, factor_size))
self.out_proj_bias = Parameter(torch.Tensor(model_size, factor_size))
self.reset_parameters()
try:
from apex.mlp.mlp import mlp_function
self.optimized = 1
self.fast_mlp_func = mlp_function
except ModuleNotFoundError as e:
self.optimized = 2
def reset_parameters(self):
# nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
# nn.init.xavier_uniform_(self.out_proj_weight)
std_ = math.sqrt(2.0 / (self.model_size + self.inner_size))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
def forward(self, input, factor):
factor = self.factor_map(factor).squeeze()
in_proj_weight = torch.mv(self.in_proj_weight.view(-1, self.factor_size), factor)\
.view(self.in_proj_weight.size(0), self.in_proj_weight.size(1))
out_proj_weight = torch.mv(self.out_proj_weight.view(-1, self.factor_size), factor)\
.view(self.out_proj_weight.size(0), self.out_proj_weight.size(1))
in_proj_bias = torch.mv(self.in_proj_bias, factor)
out_proj_bias = torch.mv(self.out_proj_bias, factor)
if self.optimized == 2 or not input.is_cuda:
hidden = F.linear(input, in_proj_weight, in_proj_bias)
hidden = torch.relu(hidden)
if self.variational:
hidden = variational_dropout(hidden, p=self.dropout, training=self.training)
else:
hidden = F.dropout(hidden, p=self.dropout, training=self.training)
hidden = F.linear(hidden, out_proj_weight, out_proj_bias)
else:
# Here weight dropout has to be done instead of dropout because
# Apex MLP does not support dropout
weights = [F.dropout(in_proj_weight, p=self.dropout, training=self.training),
F.dropout(out_proj_weight, p=self.dropout, training=self.training)]
biases = [F.dropout(in_proj_bias, p=self.dropout, training=self.training),
F.dropout(out_proj_bias, p=self.dropout, training=self.training)]
seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
hidden = self.fast_mlp_func(True, 1, input.view(seq_len*bsz, -1), *weights, *biases)
hidden = hidden.view(seq_len, bsz, hidden_size)
return hidden
| 3,676
| 40.784091
| 109
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/adaptive/__init__.py
| 0
| 0
| 0
|
py
|
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/adaptive/relative_self_attention.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from ..optimized. relative_self_attention_func import relative_self_attn_func
if hasattr(torch._C, '_jit_set_profiling_executor'):
torch._C._jit_set_profiling_executor(False)
if hasattr(torch._C, '_jit_set_profiling_mode'):
torch._C._jit_set_profiling_mode(False)
@torch.jit.script
def jit_dropout_add(x, residual, prob, is_training):
# type: (Tensor, Tensor, float, bool) -> Tensor
out = F.dropout(x, p=prob, training=True)
out = residual + out
return out
class AdaptiveRelativeAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, model_size, num_heads, factor_size, dropout=0., adaptive_type='shared'):
super().__init__()
self.model_size = model_size
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = model_size // num_heads
self.factor_size = factor_size
self.adaptive_type = adaptive_type
assert self.head_dim * num_heads == self.model_size, "model_size must be divisible by num_heads"
self.bias = True
self.in_proj_weight = Parameter(torch.Tensor(3 * model_size, model_size, factor_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, model_size, factor_size))
self.pos_proj_weight = Parameter(torch.Tensor(model_size, model_size, factor_size))
self.in_proj_bias = Parameter(torch.Tensor(3*model_size, factor_size))
self.out_proj_bias = Parameter(torch.Tensor(model_size, factor_size))
self.pos_proj_bias = Parameter(torch.Tensor(model_size, factor_size))
self.r_w_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim, factor_size))
self.r_r_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim, factor_size))
self.factor_map = nn.Linear(self.model_size, self.factor_size)
self.reset_parameters()
self.attn_func = relative_self_attn_func
def reset_parameters(self):
# nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
# nn.init.xavier_uniform_(self.out_proj_weight)
std_ = math.sqrt(2.0 / (self.model_size + self.model_size))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, std_)
nn.init.normal_(self.r_r_bias, 0.0, std_)
def forward(self, input, pos, factor, key_padding_mask=None, attn_mask=None, mems=None,
incremental=False, incremental_cache=None):
factor = self.factor_map(factor).squeeze()
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
mask = mask.squeeze(0).transpose(0, 1)
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(-1)
else:
mask = None
in_proj_weight = torch.mv(self.in_proj_weight.view(-1, self.factor_size), factor) \
.view(self.in_proj_weight.size(0), self.in_proj_weight.size(1))
out_proj_weight = torch.mv(self.out_proj_weight.view(-1, self.factor_size), factor) \
.view(self.out_proj_weight.size(0), self.out_proj_weight.size(1))
pos_proj_weight = torch.mv(self.pos_proj_weight.view(-1, self.factor_size), factor) \
.view(self.out_proj_weight.size(0), self.out_proj_weight.size(1))
in_proj_bias = torch.mv(self.in_proj_bias, factor)
out_proj_bias = torch.mv(self.out_proj_bias, factor)
pos_proj_bias = torch.mv(self.pos_proj_bias, factor)
r_w_bias = torch.mv(self.r_w_bias.view(-1, self.factor_size), factor) \
.view(self.r_w_bias.size(0), self.r_w_bias.size(1))
r_r_bias = torch.mv(self.r_r_bias.view(-1, self.factor_size), factor) \
.view(self.r_r_bias.size(0), self.r_r_bias.size(1))
is_training = self.training
outputs, coverage = self.attn_func(input, pos, attn_mask is not None, is_training, self.num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_w_bias, r_r_bias,
mask, self.dropout,
incremental, incremental_cache, False, False)
# last False is double precision
return outputs, coverage
| 4,993
| 42.051724
| 106
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/adaptive/encdec_attention.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_attention_func import encdec_attn_func
if hasattr(torch._C, '_jit_set_profiling_executor'):
torch._C._jit_set_profiling_executor(False)
if hasattr(torch._C, '_jit_set_profiling_mode'):
torch._C._jit_set_profiling_mode(False)
class EncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0., n_ensemble=1):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
self.n_ensemble = n_ensemble
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
self.in_proj_weight_q = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.register_parameter('in_proj_bias_q', None)
self.register_parameter('in_proj_bias_kv', None)
self.in_proj_bias_q = None
self.in_proj_bias_kv = None
self.out_proj_bias = None
# batch_ensemble weights
self.be_r = Parameter(torch.Tensor(embed_dim))
self.be_s = Parameter(torch.Tensor())
self.attn_func = encdec_attn_func
self.reset_parameters()
try:
# the fast one requires apex and does not work with incremental so careful
from apex.contrib.multihead_attn.fast_encdec_multihead_attn_func import fast_encdec_attn_func
self.attn_func_fast = fast_encdec_attn_func
self.optimized = 2
except ModuleNotFoundError as e:
# print(e)
# print("Cannot use fast self-attention implementation")
self.optimized = 2
self.attn_func_fast = None
def reset_parameters(self):
# nn.init.xavier_uniform_(self.in_proj_weight_q)
# in_proj_weight_kv has shape [2 * hidden, hidden] but it should be
# initialized like a [hidden, hidden] matrix.
# sqrt(6 / (hidden + hidden)) / sqrt(6 / (2 * hidden + hidden)) = sqrt(1.5)
# therefore xavier_uniform gain should be set to sqrt(1.5).
# nn.init.xavier_uniform_(self.in_proj_weight_kv, gain=math.sqrt(1.5))
# nn.init.xavier_uniform_(self.out_proj_weight)
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
nn.init.normal_(self.be_r, 0.0, std_)
nn.init.normal_(self.be_s, 0.0, std_)
def forward(self, query, key, value, attn_mask=None, incremental=False, incremental_cache=None):
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
time_masking = False
len_key = key.size(0)
if self.optimized == 1 and (self.training and not incremental) and len_key <= 1024 and query.is_cuda:
if attn_mask is not None:
if attn_mask.dim() == 3:
attn_mask = attn_mask.squeeze(1)
attn_mask = attn_mask.byte()
outputs = self.attn_func_fast(time_masking, is_training, self.num_heads, query, key,
self.in_proj_weight_q, self.in_proj_weight_kv, self.out_proj_weight,
attn_mask, self.dropout)
coverage = None
# during evaluation we use the python binding which is safer ....
else:
outputs, coverage, = self.attn_func(time_masking, is_training,
self.num_heads, query, key,
self.in_proj_weight_q, self.in_proj_weight_kv,
self.out_proj_weight, attn_mask, self.dropout,
incremental, incremental_cache)
# TODO: add incremental cache
return outputs, coverage
| 4,479
| 40.481481
| 110
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/test_self_attention_bias_func.py
|
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from copy import deepcopy
from time import time
import unittest
import numpy as np
from self_attention_func import self_attn_func
from self_attention_attnbias_func import self_attn_bias_func
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
super(Parameters, self).__init__()
self.in_proj_weight = Parameter(torch.Tensor(3 * model_size, model_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, model_size))
self.in_proj_bias = Parameter(torch.Tensor(3 * model_size))
self.out_proj_bias = Parameter(torch.Tensor(model_size))
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=23272123):
torch.cuda.set_device(0)
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
self.seq_length = 512
self.sequences = 64
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
embed_dim = self.hidden_dim
self.ref_parameters = Parameters(model_size=self.hidden_dim, heads=self.heads)
self.ref_parameters = self.ref_parameters.cuda().half()
self.tst_parameters = deepcopy(self.ref_parameters)
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
# Reset seed so parameters are identical
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
self.tst_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
self.tst_inputs.data.copy_(self.ref_inputs.data)
self.attnbias = torch.randn(self.sequences * self.heads, self.seq_length, self.seq_length,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
def test_input(self):
print("Checking if all inputs are the same ...")
self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight, self.tst_parameters.in_proj_weight,
atol=1e-5, rtol=1e-5))
print("Done.")
def test_output(self):
print("Testing self-attention with random mask ....")
training = True
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
with torch.no_grad():
self.tst_inputs.copy_(self.ref_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
ref_output, ref_coverage = self_attn_bias_func(False, training, self.heads, self.ref_inputs, self.attnbias,
self.ref_parameters.in_proj_weight,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias,
self.ref_parameters.out_proj_bias,
mask, self.dropout_prob,
False, None, False, None,
False, True, False)
tst_output, tst_coverage = self_attn_bias_func(False, training, self.heads, self.tst_inputs, self.attnbias,
self.tst_parameters.in_proj_weight,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias,
self.tst_parameters.out_proj_bias,
mask, self.dropout_prob,
False, None, False, None,
True, True, False)
print(ref_output - tst_output)
self.assertTrue(torch.allclose(ref_output, tst_output, atol=1e-1, rtol=1e-1))
grad_outputs_ref = torch.randn_like(tst_output)
grad_outputs_tst = torch.randn_like(tst_output).copy_(grad_outputs_ref)
tst_output.data.copy_(ref_output.data)
ref_output.backward(grad_outputs_ref)
tst_output.backward(grad_outputs_tst)
# self.assertTrue(torch.allclose(self.ref_parameters.out_proj_weight.grad,
# self.tst_parameters.out_proj_weight.grad,
# atol=1e-3, rtol=1e-3))
np.testing.assert_allclose(
self.ref_parameters.out_proj_weight.grad.detach().cpu().numpy(),
self.tst_parameters.out_proj_weight.grad.detach().cpu().numpy(),
atol=1e-1, rtol=1e-1)
np.testing.assert_allclose(
self.ref_parameters.out_proj_bias.grad.detach().cpu().numpy(),
self.tst_parameters.out_proj_bias.grad.detach().cpu().numpy(),
atol=1e-1, rtol=1e-1)
print("GRAD TEST", self.tst_parameters.in_proj_weight.grad)
print("GRAD TEST", self.ref_parameters.in_proj_weight.grad)
print("GRAD TEST", self.ref_parameters.in_proj_weight.grad - self.tst_parameters.in_proj_weight.grad)
#
# self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight.grad,
# self.tst_parameters.in_proj_weight.grad,
# atol=1e-2, rtol=1e-2))
np.testing.assert_allclose(
self.ref_parameters.in_proj_weight.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_weight.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(
self.ref_parameters.in_proj_bias.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_bias.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
# self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad,
# atol=1e-3, rtol=1e-3))
#
np.testing.assert_allclose(
self.ref_inputs.grad.detach().cpu().numpy(),
self.tst_inputs.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
# def test_output_autoregressive(self):
#
# print("Testing self-attention with time mask ....")
# training = True
#
# self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
# dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
#
# with torch.no_grad():
# self.tst_inputs.copy_(self.ref_inputs)
#
# # mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
# mask = torch.triu(
# self.ref_inputs.new_ones(self.seq_length, self.seq_length), diagonal=1).bool()
#
# ref_output, ref_coverage = self_attn_func(True, training, self.heads, self.ref_inputs,
# self.ref_parameters.in_proj_weight,
# self.ref_parameters.out_proj_weight,
# self.ref_parameters.in_proj_bias,
# self.ref_parameters.out_proj_bias,
# mask, self.dropout_prob,
# False, None, False, None,
# False, True)
#
# tst_output, tst_coverage = self_attn_func(True, training, self.heads, self.tst_inputs,
# self.tst_parameters.in_proj_weight,
# self.tst_parameters.out_proj_weight,
# self.tst_parameters.in_proj_bias,
# self.tst_parameters.out_proj_bias,
# mask, self.dropout_prob,
# False, None, False, None,
# True, True)
#
# self.assertTrue(torch.allclose(ref_output, tst_output, atol=1e-2, rtol=1e-2))
# grad_outputs_ref = torch.randn_like(tst_output)
#
# grad_outputs_tst = torch.randn_like(tst_output).copy_(grad_outputs_ref)
#
# tst_output.data.copy_(ref_output.data)
# ref_output.backward(grad_outputs_ref)
# tst_output.backward(grad_outputs_tst)
#
# self.assertTrue(torch.allclose(self.ref_parameters.out_proj_weight.grad,
# self.tst_parameters.out_proj_weight.grad,
# atol=1e-1, rtol=1e-1))
#
# print("GRAD TEST", self.tst_parameters.in_proj_weight.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight.grad - self.tst_parameters.in_proj_weight.grad)
#
# # self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight.grad,
# # self.tst_parameters.in_proj_weight.grad,
# # atol=1e-2, rtol=1e-2))
# #
# np.testing.assert_allclose(
# self.ref_parameters.in_proj_weight.grad.data.cpu().numpy(),
# self.tst_parameters.in_proj_weight.grad.data.cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# # self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad,
# # atol=1e-3, rtol=1e-3))
# #
# np.testing.assert_allclose(
# self.ref_inputs.detach().cpu().numpy(),
# self.tst_inputs.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
def test_performance(self):
training = True
for dropout in [0.0, 0.5]:
mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
num_iters = 32
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_output, ref_coverage = self_attn_bias_func(False, training, self.heads, self.ref_inputs, self.attnbias,
self.ref_parameters.in_proj_weight,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias,
self.ref_parameters.out_proj_bias,
mask, dropout,
False, None, False, None,
False, True, False)
grad_outputs_ref = torch.randn_like(ref_output)
ref_output.backward(grad_outputs_ref)
self.ref_parameters.zero_grad()
tst_output, tst_coverage = self_attn_bias_func(False, training, self.heads, self.tst_inputs, self.attnbias,
self.tst_parameters.in_proj_weight,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias,
self.tst_parameters.out_proj_bias,
mask, dropout,
False, None, False, None,
True, True, False)
grad_outputs_tst = torch.randn_like(tst_output)
tst_output.backward(grad_outputs_tst)
self.tst_parameters.zero_grad()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_output, ref_coverage = self_attn_bias_func(False, training, self.heads, self.ref_inputs, self.attnbias,
self.ref_parameters.in_proj_weight,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias,
self.ref_parameters.out_proj_bias,
mask, dropout,
False, None, False, None,
False, True, False)
grad_outputs_ref = torch.randn_like(ref_output)
ref_output.backward(grad_outputs_ref)
self.ref_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self-Attn time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
tst_output, tst_coverage = self_attn_bias_func(False, training, self.heads, self.tst_inputs, self.attnbias,
self.tst_parameters.in_proj_weight,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias,
self.tst_parameters.out_proj_bias,
mask, dropout,
False, None, False, None,
True, True, False)
grad_outputs_tst = torch.randn_like(tst_output)
tst_output.backward(grad_outputs_tst)
self.tst_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nCUDA Self-Attn time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
if __name__ == '__main__':
unittest.main()
| 15,445
| 48.348243
| 124
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/attention_softmax.py
|
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import mask_softmax_dropout_cuda
except (ModuleNotFoundError, ImportError) as e:
mask_softmax_dropout_cuda = None
class AttentionSoftmaxDropout(object):
@staticmethod
def forward(inputs, double_precision, dropout_prob, is_training, heads):
"""
:param heads:
:param is_training:
:param dropout_prob:
:param inputs:
:param double_precision:
:return:
"""
len_k = inputs.size(-1)
if mask_softmax_dropout_cuda and len_k <= 2048 and inputs.type() == 'torch.cuda.HalfTensor':
dropout_mask, softmax_results, dropout_results = \
mask_softmax_dropout_cuda.forward(is_training, heads, inputs, dropout_prob)
if is_training:
dropout_results = softmax_results
else:
dtype_ = torch.float64 if double_precision else torch.float32
softmax_results = F.softmax(inputs, dim=-1, dtype=dtype_)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = torch.tensor([])
return dropout_mask, softmax_results, dropout_results
@staticmethod
def backward(grad_outputs, softmax_results, dropout_prob_t, heads_t, dropout_mask):
len_key = softmax_results.size(-1)
if mask_softmax_dropout_cuda is not None and grad_outputs.type() == 'torch.cuda.HalfTensor' \
and len_key <= 2048:
softmax_grads = mask_softmax_dropout_cuda.backward_recompute(heads_t[0], grad_outputs, softmax_results,
dropout_mask, dropout_prob_t[0])
else:
dropout_grads = torch._masked_scale(grad_outputs, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# be careful we overwrite into "softmax_results" memory here
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
return softmax_grads
| 2,403
| 34.352941
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/dropout_add.py
|
import torch
import unittest
import numpy as np
from time import time
from torch.cuda.amp import custom_fwd, custom_bwd
try:
import fused_dropout_add_cuda
except (ModuleNotFoundError, ImportError) as e:
fused_dropout_add_cuda = None
#
# @torch.jit.script
# def jit_dropout_add(x, residual, prob, is_training):
# # type: (Tensor, Tensor, float, bool) -> Tensor
# out = F.dropout(x, p=prob, training=is_training)
# out = residual + out
# return out
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
class FusedDropoutAdd(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input, residual, dropout_prob, is_training):
input = input.contiguous()
residual = residual.contiguous()
null_tensor = torch.tensor([])
dropout_prob_t = torch.tensor([dropout_prob])
ctx.fused = False
ctx.training = is_training
if not is_training or dropout_prob <= 0.0:
dropout_mask = null_tensor
input.add_(residual)
output = input
else:
if fused_dropout_add_cuda is not None and input.is_cuda and input.dtype == torch.float16:
# print("Fused dropout add")
ctx.fused = True
dropout_mask, output = fused_dropout_add_cuda.forward(is_training, input, residual, dropout_prob)
else:
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(input, p=(1. - dropout_prob))
else:
dropout_mask = null_tensor
dropout_results = input
dropout_results.add_(residual)
output = dropout_results
ctx.save_for_backward(dropout_mask, dropout_prob_t)
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grads):
dropout_mask, dropout_prob_t = ctx.saved_tensors
if dropout_prob_t[0] <= 0 or not ctx.training:
return output_grads, output_grads, None, None
# if fused_dropout_add_cuda is not None and output_grads.is_cuda and output_grads.dtype == torch.float16:
if ctx.fused:
grad_input = fused_dropout_add_cuda.backward(output_grads, dropout_mask, dropout_prob_t[0])
else:
grad_input = torch._masked_scale(output_grads, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
return grad_input, output_grads, None, None
# def fused_dropout_add(input, residual, dropout, is_training):
def fused_dropout_add(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return FusedDropoutAdd.apply(*args)
if __name__ == '__main__':
batch_size = 512
seq_len = 64
hidden_size = 1024
num_iters = 100
dropout = 0.0
class TestMLP(unittest.TestCase):
#
def test_creation(self):
test_input = torch.empty(seq_len, batch_size, hidden_size,
device="cuda", dtype=torch.half).uniform_(-1., 1.).requires_grad_()
test_add_input = torch.empty(seq_len, batch_size, hidden_size,
device="cuda", dtype=torch.half).uniform_(-1., 1.).requires_grad_()
output = fused_dropout_add(test_input, test_add_input, dropout, True)
def test_numeric(self):
test_input = torch.empty(seq_len, batch_size, hidden_size,
device="cuda", dtype=torch.half).uniform_(-1., 1.).requires_grad_()
test_add_input = torch.empty(seq_len, batch_size, hidden_size,
device="cuda", dtype=torch.half).uniform_(-1., 1.).requires_grad_()
output = fused_dropout_add(test_input, test_add_input, dropout, True)
ref_input = test_input.clone().detach().requires_grad_()
ref_add_input = test_add_input.clone().detach().requires_grad_()
ref_output = ref_input + ref_add_input
np.testing.assert_allclose(
ref_output.detach().cpu().numpy(),
output.detach().cpu().numpy(),
atol=1e-5, rtol=1e-4)
output.mean().mul(10.).backward()
ref_output.mean().mul(10.).backward()
np.testing.assert_allclose(
test_input.grad.detach().cpu().numpy(),
ref_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
np.testing.assert_allclose(
test_add_input.grad.detach().cpu().numpy(),
ref_add_input.grad.detach().cpu().numpy(),
atol=1e-7, rtol=1e-5)
def test_performance_half(self):
test_input = torch.empty(seq_len, batch_size, hidden_size,
device="cuda", dtype=torch.half).uniform_(-1., 1.).requires_grad_()
test_add_input = torch.empty(seq_len, batch_size, hidden_size,
device="cuda", dtype=torch.half).uniform_(-1., 1.).requires_grad_()
ref_input = test_input.clone().detach().requires_grad_()
ref_add_input = test_add_input.clone().detach().requires_grad_()
# Warm up GPU
for _ in range(100):
ref_out = ref_input + ref_add_input
ref_loss = ref_out.mean()
ref_loss.backward()
output = fused_dropout_add(test_input, test_add_input, dropout, False)
test_loss = output.mean()
test_loss.backward()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_out = ref_input + ref_add_input
ref_loss = ref_out.mean()
ref_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch DropoutAdd time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
output = fused_dropout_add(test_input, test_add_input, dropout, False)
test_loss = output.mean()
test_loss.backward()
torch.cuda.synchronize()
stop_time = time()
print(F"C++ DropoutAdd time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
unittest.main()
| 6,838
| 35.572193
| 113
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/fused_clip_norm.py
|
# code is borrowed from NVIDIA Apex
# https://github.com/NVIDIA/apex/blob/master/apex/contrib/clip_grad/clip_grad.py
import torch
from torch._six import inf
from typing import Union, Iterable
from onmt.utils import clip_grad_norm
try:
import fused_optim
except (ModuleNotFoundError, ImportError) as e:
fused_optim = None
from .fused_adam import multi_tensor_applier
_kernel_import_succeeded = multi_tensor_applier.available
_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]]
def fused_clip_grad_norm(
parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0,
error_if_nonfinite: bool = False) -> torch.Tensor:
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
This is identical to torch.nn.utils.clip_grad_norm_, except it
uses a fused CUDA kernel when computing the 2-norm of GPU tensors
in float32 and float16.
Args:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
error_if_nonfinite (bool): if True, an error is thrown if the total
norm of the gradients from :attr:`parameters` is ``nan``,
``inf``, or ``-inf``. Default: False (will switch to True in the future)
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
max_norm = float(max_norm)
norm_type = float(norm_type)
# Trivial case
if len(parameters) == 0:
return torch.tensor(0.)
# Fallback implementation
if not (_kernel_import_succeeded
and norm_type == 2.0
and any(p.is_cuda for p in parameters)):
return clip_grad_norm(
parameters,
max_norm,
norm_type=norm_type,
error_if_nonfinite=error_if_nonfinite,
)
# print("clipping grad with fused kernel ...", flush=True)
# Find fp32 and fp16 gradients on GPU
device = next(p.device for p in parameters if p.is_cuda)
grads_fp32, grads_fp16, grads_misc = [], [], []
for p in parameters:
grad = p.grad.detach()
if p.dtype == torch.float32 and p.device == device:
grads_fp32.append(grad)
elif p.dtype == torch.float16 and p.device == device:
grads_fp16.append(grad)
else:
grads_misc.append(grad)
# Compute gradient L2 norms
norms = []
dummy_overflow_buf = torch.zeros([1], dtype=torch.int32, device=device)
if grads_fp32:
norms.append(
multi_tensor_applier(
fused_optim.multi_tensor_l2norm,
dummy_overflow_buf,
[grads_fp32],
False,
)[0]
)
if grads_fp16:
norms.append(
multi_tensor_applier(
fused_optim.multi_tensor_l2norm,
dummy_overflow_buf,
[grads_fp16],
False,
)[0],
)
for g in grads_misc:
norms.append(torch.linalg.norm(g).unsqueeze(0).to(device))
total_norm = torch.linalg.norm(torch.cat(norms))
# Check for non-finite values
if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()):
raise RuntimeError(
f'The total norm of order {norm_type} for gradients from '
'`parameters` is non-finite, so it cannot be clipped. To disable '
'this error and scale the gradients by the non-finite norm anyway, '
'set `error_if_nonfinite=False`')
if max_norm > 0:
# Scale gradients
clip_coef = max_norm / (total_norm + 1e-6)
clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
if grads_fp32:
multi_tensor_applier(
fused_optim.multi_tensor_scale,
dummy_overflow_buf,
[grads_fp32, grads_fp32],
clip_coef_clamped,
)
if grads_fp16:
multi_tensor_applier(
fused_optim.multi_tensor_scale,
dummy_overflow_buf,
[grads_fp16, grads_fp16],
clip_coef_clamped,
)
for g in grads_misc:
g.mul_(clip_coef_clamped.to(g.device))
return total_norm
| 4,710
| 33.137681
| 87
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/self_attention.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_attention_func import self_attn_func
from onmt.constants import double_precision
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0
def apply_rotary_pos_emb(q, k, cos, sin):
return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
class SelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., rotary_pos_enc=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = True
self.scaling = self.head_dim ** -0.5
self.rotary_pos_enc = rotary_pos_enc
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.reset_parameters()
self.attn_func = self_attn_func
def reset_parameters(self):
# nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
# nn.init.xavier_uniform_(self.out_proj_weight)
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
def forward(self, inputs, pos, key_padding_mask=None, attn_mask=None,
incremental=False, incremental_cache=None, **kwargs):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
is_training = self.training
input_weights = self.in_proj_weight
input_bias = self.in_proj_bias
bsz, len_q = inputs.size(1), inputs.size(0)
heads = self.num_heads
head_dim = self.head_dim
scale_t = torch.tensor([head_dim ** -0.5])
# input_lin_results = F.linear(inputs, input_weights, input_bias)
# input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
#
# queries = input_lin_results[:, :, 0, :]
# keys = input_lin_results[:, :, 1, :]
# values = input_lin_results[:, :, 2, :]
#
# if incremental:
# keys = keys.contiguous().view(len_q, bsz, heads * head_dim)
# values = values.contiguous().view(len_q, bsz, heads * head_dim)
# if 'k' in incremental_cache and 'v' in incremental_cache:
# keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
# incremental_cache['k'] = keys
# values = torch.cat([incremental_cache['v'], values], dim=0) # time first
# incremental_cache['v'] = values
# else:
# incremental_cache['k'] = keys
# incremental_cache['v'] = values
# keys = keys.view(-1, bsz * heads, head_dim)
# values = values.view(-1, bsz * heads, head_dim)
#
# len_k = keys.size(0)
#
# # apply rotary position encodings
# if self.rotary_pos_enc:
# cos, sin = pos
# queries, keys = apply_rotary_pos_emb(queries, keys, cos, sin)
#
# matmul1_results = torch.bmm(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2)).mul(scale_t[0])
#
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
if len(mask.shape) == 3:
mask = mask.squeeze(1)
#
# batches, seql_q, seql_k = matmul1_results.size()
# seqs = int(batches / heads)
# matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
# mask = mask.to(torch.bool)
# matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float('-inf'))
# matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
#
elif attn_mask is not None:
mask = attn_mask
if len(mask.shape) == 3:
mask = mask.squeeze(0)
mask = mask.to(torch.bool)
# matmul1_results.masked_fill_(mask, float('-inf'))
#
# softmax_results = F.softmax(matmul1_results, dim=-1)
# dropout_results = F.dropout(softmax_results, self.dropout, training=self.training)
#
# matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1))
#
# matmul2_results = matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1),
# inputs.size(2))
#
# outputs = F.linear(matmul2_results, self.out_proj_weight, self.out_proj_bias)
#
# coverage = dropout_results
outputs, coverage = self.attn_func(attn_mask is not None, is_training, self.num_heads, inputs,
input_weights, self.out_proj_weight,
input_bias, self.out_proj_bias,
mask, self.dropout,
self.rotary_pos_enc, pos,
incremental, incremental_cache,
False, True)
return outputs, coverage
| 6,492
| 42.871622
| 116
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/encdec_attention_func_bias.py
|
"""
Encoder-Decoder multi-head attention.
Code is heavily adapted from apex
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import encdec_multihead_attn_bias_cuda
except (ModuleNotFoundError, ImportError) as e:
encdec_multihead_attn_bias_cuda = None
try:
import encdec_multihead_attn_bias_blaslt
except (ModuleNotFoundError, ImportError) as e:
encdec_multihead_attn_bias_blaslt = None
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0
# only 1 term this time
def apply_rotary_pos_emb(q, cos, sin):
return (q * cos) + (rotate_half(q) * sin)
def rotate_backward(dx):
dx2, dx1 = dx[..., :dx.shape[-1] // 2], dx[..., dx.shape[-1] // 2:]
return torch.cat((dx1, -dx2), dim=dx1.ndim - 1)
class EncdecAttnBiasFunc(torch.autograd.Function):
@staticmethod
@custom_fwd()
def forward(ctx, recompute, is_training, heads, inputs_q, inputs_kv,
input_weights_q, input_weights_kv, output_weights,
input_bias_q, input_bias_kv, output_bias,
mask, dropout_prob,
incremental, incremental_cache,
rotary_pos_enc, pos_emb_q, pos_emb_k,
low_precision, return_coverage):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([]).to(inputs_q.device)
head_dim = inputs_q.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
use_mask = (mask is not None)
bsz, len_q, len_k = inputs_q.size(1), inputs_q.size(0), inputs_kv.size(0)
ctx.incremental = incremental
ctx.fused_softmax_dropout = False
ctx.fused_all = False
ctx.len_q = len_q
ctx.len_k = len_k
ctx.low_precision = low_precision
ctx.return_coverage = return_coverage
ctx.recompute = recompute
ctx.rotary_pos_enc = rotary_pos_enc
if encdec_multihead_attn_bias_cuda is not None and not incremental and len_k <= 2048 \
and inputs_q.type() == 'torch.cuda.HalfTensor' and not rotary_pos_enc and low_precision:
mask_ = mask
# print("[DEBUGGING] FAST CUDA ENCDEC ATTENTION BIAS")
# mask = mask.half() * -10000
mask = mask.unsqueeze(1).unsqueeze(2).bool()
cuda_module = encdec_multihead_attn_bias_blaslt if encdec_multihead_attn_bias_blaslt is not None \
else encdec_multihead_attn_bias_cuda
if encdec_multihead_attn_bias_blaslt is None:
ctx.recompute = False
input_lin_q_results, input_lin_kv_results, \
attn_scores, dropout_results, dropout_mask, \
matmul2_results, outputs \
= cuda_module.forward(is_training, heads, inputs_q, inputs_kv,
input_weights_q, input_weights_kv,
output_weights, input_bias_q, input_bias_kv, output_bias,
mask, dropout_prob)
sinq, cosq, = null_tensor, null_tensor
sink, cosk, = null_tensor, null_tensor
if ctx.recompute:
del matmul2_results, dropout_results, attn_scores, input_lin_q_results, input_lin_kv_results,
ctx.save_for_backward(heads_t,
scale_t,
null_tensor, null_tensor, null_tensor, null_tensor, null_tensor,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
input_bias_q,
input_bias_kv,
output_weights,
dropout_mask, mask,
dropout_prob_t,
sinq, cosq, sink, cosk)
dropout_results = null_tensor
else:
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
attn_scores,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
input_bias_q,
input_bias_kv,
output_weights,
dropout_mask, mask,
dropout_prob_t,
sinq, cosq, sink, cosk)
ctx.fused_all = True
if return_coverage:
return outputs, dropout_results
else:
return (outputs,)
if mask is not None:
# Self Attention Pad Mask
mask = mask.to(torch.bool)
if len(mask.shape) == 3:
mask = mask.unsqueeze(1) # for the head dimension
else:
mask = mask.unsqueeze(1).unsqueeze(2) # for the head and query dimension
# Input Linear GEMM Q
# input1: (activations) [seql_q, bsz, embed_dim] -> [len_q * bsz, embed_dim]
# input2: (weights) [embed_dim, embed_dim]. transpose(0, 1)
# output: [len_q * bsz, embed_dim] -> [seql_q, bsz, embed_dim]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_q_results = torch.addmm(input_bias_q,
inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
input_weights_q.transpose(0, 1),
beta=1., alpha=1.)
input_lin_q_results = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1), input_weights_q.size(0))
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads, head_dim)
# Input Linear GEMM KV
# input1: (activations) [seql_k, bsz, embed_dim(1024)]
# input2: (weights) [embed_dim*2 (2048), embed_dim (1024)] (transpose [0,1])
# output: [seql_k, bsz, embed_dim*2]
# GEMM: ( (seql_k*seqs) x embed_dim ) x ( embed_dim x embed_dim*2 ) = (seql_k*seqs x embed_dim*2)
# Slice out k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
keys = incremental_cache['c_k']
values = incremental_cache['c_v']
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
input_lin_kv_results = torch.stack([keys, values], dim=-2)
else:
input_lin_kv_results = torch.addmm(input_bias_kv,
inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)),
input_weights_kv.transpose(0, 1),
beta=1., alpha=1.)
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1),
input_weights_kv.size(0))
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads, 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
if incremental:
keys = keys.contiguous().view(len_k, bsz, heads * head_dim)
values = values.contiguous().view(len_k, bsz, heads * head_dim)
incremental_cache['c_k'] = keys
incremental_cache['c_v'] = values
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
# TODO: rotary pos encoding
if rotary_pos_enc:
assert pos_emb_q is not None and pos_emb_k is not None
cosq, sinq = pos_emb_q
queries = apply_rotary_pos_emb(queries, cosq, sinq)
cosk, sink = pos_emb_k
keys_ = apply_rotary_pos_emb(keys, cosk, sink)
keys.copy_(keys_)
else:
sinq, cosq = null_tensor, null_tensor
sink, cosk = null_tensor, null_tensor
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results, beta=0.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
batches, seql_q, seql_k = matmul1_results.size()
bsz = int(batches / heads)
matmul1_results = matmul1_results.view(bsz, heads, seql_q, seql_k)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, seql_q, seql_k)
if matmul1_results.type() == 'torch.cuda.HalfTensor':
softmax_results = F.softmax(matmul1_results, dim=-1, dtype=torch.float32).type_as(matmul1_results)
else:
softmax_results = F.softmax(matmul1_results, dim=-1)
nan_mask = torch.isnan(softmax_results)
if nan_mask.any():
softmax_results.masked_fill_(nan_mask, 0)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=dropout_results.device)
torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results.transpose(1, 0))
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1)).transpose(0, 1)
# view from [len_q, bsz*heads, head_dim] to [len_q, bsz, embed]
matmul2_results = matmul2_results.contiguous().view(inputs_q.size(0), inputs_q.size(1), inputs_q.size(2))
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
outputs = torch.addmm(output_bias,
matmul2_results.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = outputs.view(inputs_q.size(0), inputs_q.size(1), output_weights.size(0))
if not ctx.recompute:
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
input_bias_q,
input_bias_kv,
output_weights,
dropout_mask, mask,
dropout_prob_t,
sinq, cosq, sink, cosk)
else:
ctx.save_for_backward(heads_t,
scale_t,
null_tensor, null_tensor, null_tensor, null_tensor, null_tensor,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
input_bias_q,
input_bias_kv,
output_weights,
dropout_mask, mask,
dropout_prob_t,
sinq, cosq, sink, cosk)
del input_lin_q_results, queries
del input_lin_kv_results, keys, values
del matmul1_results, matmul2_results
del softmax_results, dropout_results
dropout_results = null_tensor
if return_coverage:
return (outputs, dropout_results)
else:
return (outputs,)
@staticmethod
@custom_bwd
def backward(ctx, *output_grads):
incremental = ctx.incremental
len_q = ctx.len_q
len_key = ctx.len_k
if ctx.return_coverage:
output_grads, coverage_grads = output_grads
else:
output_grads = output_grads[0]
output_grads = output_grads.contiguous()
if ctx.fused_all:
assert encdec_multihead_attn_bias_cuda is not None and len_key <= 2048
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
attn_scores,\
input_lin_q_results, \
input_lin_kv_results, \
inputs_q, \
inputs_kv, \
input_weights_q, \
input_weights_kv, \
input_bias_q, \
input_bias_kv, \
output_weights, \
dropout_mask, mask,\
dropout_prob_t, \
sinq, cosq, sink, cosk = ctx.saved_tensors
if input_weights_q.requires_grad:
cuda_module = encdec_multihead_attn_bias_blaslt if encdec_multihead_attn_bias_blaslt is not None \
else encdec_multihead_attn_bias_cuda
if ctx.recompute:
input_q_grads, \
input_kv_grads, \
input_weight_q_grads, \
input_weight_kv_grads, \
output_weight_grads, \
input_bias_q_grads, input_bias_kv_grads, output_bias_grads \
= cuda_module.backward_recompute(heads_t[0], output_grads,
inputs_q, inputs_kv,
input_weights_q,
input_weights_kv,
input_bias_q,
input_bias_kv,
output_weights, dropout_mask, mask,
dropout_prob_t[0])
else:
input_q_grads, \
input_kv_grads, \
input_weight_q_grads, \
input_weight_kv_grads, \
output_weight_grads, \
input_bias_q_grads, input_bias_kv_grads, output_bias_grads \
= cuda_module.backward(heads_t[0], output_grads, matmul2_results,
dropout_results,
attn_scores,
input_lin_q_results,
input_lin_kv_results,
inputs_q, inputs_kv, input_weights_q,
input_weights_kv,
output_weights, dropout_mask,
dropout_prob_t[0])
else:
input_q_grads, \
input_kv_grads, \
= encdec_multihead_attn_bias_cuda.backward_input_only(heads_t[0], output_grads, matmul2_results,
dropout_results,
attn_scores,
input_lin_q_results,
input_lin_kv_results,
inputs_q, inputs_kv, input_weights_q,
input_weights_kv,
output_weights, dropout_mask,
dropout_prob_t[0])
input_weight_q_grads, \
input_weight_kv_grads, \
output_weight_grads, \
input_bias_q_grads, input_bias_kv_grads, output_bias_grads \
= None, None, None, None, None, None
del ctx.recompute
del ctx.fused_all
return None, None, None \
, input_q_grads, input_kv_grads \
, input_weight_q_grads, input_weight_kv_grads, output_weight_grads \
, input_bias_q_grads, input_bias_kv_grads, output_bias_grads \
, None, None, None, None, None, None, None, None, None
heads_t, scale_t, matmul2_results, dropout_results, softmax_results, \
input_lin_q_results, input_lin_kv_results, \
inputs_q, inputs_kv, \
input_weights_q, input_weights_kv, input_bias_q, input_bias_kv, output_weights, \
dropout_mask, pad_mask, dropout_prob_t, \
sinq, cosq, sink, cosk, \
= ctx.saved_tensors
head_dim = inputs_q.size(2) // heads_t.item()
bsz = inputs_q.size(1)
if ctx.recompute:
assert ctx.incremental is not True
heads = heads_t[0]
# Recomputing the tensors in the forward pass here
input_lin_q_results = torch.addmm(input_bias_q,
inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)),
input_weights_q.transpose(0, 1),
beta=1., alpha=1.)
input_lin_q_results = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1), input_weights_q.size(0))
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads, head_dim)
input_lin_kv_results = torch.addmm(input_bias_kv,
inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)),
input_weights_kv.transpose(0, 1),
beta=1., alpha=1.)
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1),
input_weights_kv.size(0))
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads, 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results.baddbmm_(queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
beta=0.0, alpha=scale_t[0])
if pad_mask is not None:
batches, seql_q, seql_k = matmul1_results.size()
bsz = int(batches / heads)
matmul1_results = matmul1_results.view(bsz, heads, seql_q, seql_k)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(pad_mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, seql_q, seql_k)
if matmul1_results.type() == 'torch.cuda.HalfTensor':
softmax_results = F.softmax(matmul1_results, dim=-1, dtype=torch.float32).type_as(matmul1_results)
else:
softmax_results = F.softmax(matmul1_results, dim=-1)
nan_mask = torch.isnan(softmax_results)
if nan_mask.any():
softmax_results.masked_fill_(nan_mask, 0)
if dropout_prob_t[0] > 0:
pinv = 1.0 / (1.0 - dropout_prob_t[0])
dropout_results = softmax_results * dropout_mask * pinv
else:
dropout_results = softmax_results
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=dropout_results.device)
torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results.transpose(1, 0))
matmul2_results = matmul2_results.contiguous().view(inputs_q.size(0), inputs_q.size(1), inputs_q.size(2))
# Slice out k,v from one big Input Linear output (should only impact meta data, no copies!)
# Batch sizes and heads are combined to make the batch of the Batched GEMM
# input_lin_kv_results: [seql_k, bsz, heads(16), 2, head_dim(64)]
# input_lin_kv_results: [seql_k, batches=bsz*heads, 2, head_dim]
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads_t[0], head_dim)
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads_t[0], 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
# Slice out k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_kv_results_grads = torch.empty_like(input_lin_kv_results)
queries_grads = torch.empty_like(queries)
keys_grads = input_lin_kv_results_grads[:, :, 0, :]
values_grads = input_lin_kv_results_grads[:, :, 1, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, bsz, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
if input_weights_q.requires_grad:
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
else:
output_weight_grads, output_bias_grads = None, None
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1) * heads_t[0],
head_dim).transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
try:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
except TypeError:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# TODO:
if ctx.rotary_pos_enc:
queries_grads = queries_grads * cosq + rotate_backward(sinq * queries_grads)
keys_grads_ = keys_grads * cosk + rotate_backward(sink * keys_grads)
keys_grads.copy_(keys_grads_)
# Input Q Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim (1024), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
queries_grads = queries_grads.view(inputs_q.size(0) * inputs_q.size(1), heads_t[0] * head_dim)
input_q_grads = torch.mm(queries_grads, input_weights_q)
input_q_grads = input_q_grads.view(inputs_q.size(0), inputs_q.size(1), inputs_q.size(2))
# Input KV Linear GEMM - DGRAD
# input1: (data grads) [seql_k, seqs, 2*embed_dim(2048)]
# input2: (weights) [embed_dim*2 (2048), embed_dim (1024)]
# output: [seql_k, seqs, embed_dim]
# GEMM: ( (seql_k*seqs) x 2*embed_dim ) x ( 2*embed_dim x embed_dim ) = (seql_k*seqs x embed_dim)
# the elements of values and query grads are already stored in (shared) query_grads and values_grads
input_lin_kv_results_grads = input_lin_kv_results_grads.view(inputs_kv.size(0) * inputs_kv.size(1),
heads_t[0] * 2 * head_dim)
input_kv_grads = torch.mm(input_lin_kv_results_grads, input_weights_kv)
input_kv_grads = input_kv_grads.view(inputs_kv.size(0), inputs_kv.size(1), inputs_kv.size(2))
# Input Q Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, embed_dim(1024)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [embed_dim, embed_dim]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (embed_dim x embed_dim)
if input_weights_q.requires_grad:
input_weight_q_grads = torch.mm(queries_grads.transpose(0, 1),
inputs_q.view(inputs_q.size(0) * inputs_q.size(1), inputs_q.size(2)))
input_bias_q_grads = torch.sum(queries_grads, 0)
else:
input_weight_q_grads, input_bias_q_grads = None, None
# Input KV Linear GEMM - WGRAD
# input1: (data grads) [seql_k*seqs, 2*embed_dim(2048)]
# input2: (activations) [seql_k*seqs, embed_dim(1024)]
# output: [2*embed_dim, embed_dim]
# GEMM: ( 2*embed_dim x seql_k*seqs ) x ( seql_k*seqs x embed_dim ) = (2*embed_dim x embed_dim)
if input_weights_q.requires_grad:
input_weight_kv_grads = torch.mm(input_lin_kv_results_grads.transpose(0, 1),
inputs_kv.view(inputs_kv.size(0) * inputs_kv.size(1), inputs_kv.size(2)))
input_bias_kv_grads = torch.sum(input_lin_kv_results_grads, 0)
else:
input_weight_kv_grads, input_bias_kv_grads = None, None
return None, None, None \
, input_q_grads, input_kv_grads \
, input_weight_q_grads, input_weight_kv_grads, output_weight_grads \
, input_bias_q_grads, input_bias_kv_grads, output_bias_grads \
, None, None, None, None, None, None, None, None, None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
def encdec_attn_bias_func(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return EncdecAttnBiasFunc.apply(*args)
class EncdecAttnBiasCompactFunc(torch.autograd.Function):
@staticmethod
@custom_fwd()
def forward(ctx, recompute, is_training, heads,
input_lin_q_results, input_lin_kv_results,
mask, dropout_prob,
incremental, incremental_cache,
rotary_pos_enc, pos_emb_q, pos_emb_k,
low_precision, return_coverage):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([]).to(input_lin_q_results.device)
head_dim = input_lin_q_results.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
use_mask = (mask is not None)
bsz, len_q, len_k = input_lin_q_results.size(1), input_lin_q_results.size(0), input_lin_kv_results.size(0)
ctx.incremental = incremental
ctx.fused_softmax_dropout = False
ctx.fused_all = False
ctx.len_q = len_q
ctx.len_k = len_k
ctx.low_precision = low_precision
ctx.return_coverage = return_coverage
ctx.recompute = recompute
ctx.rotary_pos_enc = rotary_pos_enc
if encdec_multihead_attn_bias_cuda is not None and not incremental and len_k <= 2048 \
and input_lin_q_results.type() == 'torch.cuda.HalfTensor' and not rotary_pos_enc and low_precision:
mask_ = mask
mask = mask.unsqueeze(1).unsqueeze(2).bool()
cuda_module = encdec_multihead_attn_bias_blaslt
ctx.recompute = False
attn_scores, dropout_results, dropout_mask, \
matmul2_results \
= cuda_module.forward_compact(is_training, heads, input_lin_q_results, input_lin_kv_results,
mask, dropout_prob)
sinq, cosq, = null_tensor, null_tensor
sink, cosk, = null_tensor, null_tensor
ctx.save_for_backward(heads_t,
scale_t,
dropout_results,
attn_scores,
input_lin_q_results,
input_lin_kv_results,
dropout_mask, mask,
dropout_prob_t,
sinq, cosq, sink, cosk)
ctx.fused_all = True
if return_coverage:
return matmul2_results, dropout_results
else:
return (matmul2_results,)
if mask is not None:
# Self Attention Pad Mask
mask = mask.to(torch.bool)
if len(mask.shape) == 3:
mask = mask.unsqueeze(1) # for the head dimension
else:
mask = mask.unsqueeze(1).unsqueeze(2) # for the head and query dimension
queries = input_lin_q_results.view(input_lin_q_results.size(0), input_lin_q_results.size(1) * heads, head_dim)
# Input Linear GEMM KV
# input1: (activations) [seql_k, bsz, embed_dim(1024)]
# input2: (weights) [embed_dim*2 (2048), embed_dim (1024)] (transpose [0,1])
# output: [seql_k, bsz, embed_dim*2]
# GEMM: ( (seql_k*seqs) x embed_dim ) x ( embed_dim x embed_dim*2 ) = (seql_k*seqs x embed_dim*2)
# Slice out k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
keys = incremental_cache['c_k']
values = incremental_cache['c_v']
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
input_lin_kv_results = torch.stack([keys, values], dim=-2)
else:
input_lin_kv_results = input_lin_kv_results.view(input_lin_kv_results.size(0), input_lin_kv_results.size(1) * heads,
2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
if incremental:
keys = keys.contiguous().view(len_k, bsz, heads * head_dim)
values = values.contiguous().view(len_k, bsz, heads * head_dim)
incremental_cache['c_k'] = keys
incremental_cache['c_v'] = values
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
# TODO: rotary pos encoding
if rotary_pos_enc:
assert pos_emb_q is not None and pos_emb_k is not None
cosq, sinq = pos_emb_q
queries = apply_rotary_pos_emb(queries, cosq, sinq)
cosk, sink = pos_emb_k
keys_ = apply_rotary_pos_emb(keys, cosk, sink)
keys.copy_(keys_)
else:
sinq, cosq = null_tensor, null_tensor
sink, cosk = null_tensor, null_tensor
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results, beta=0.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
batches, seql_q, seql_k = matmul1_results.size()
bsz = int(batches / heads)
matmul1_results = matmul1_results.view(bsz, heads, seql_q, seql_k)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, seql_q, seql_k)
softmax_results = F.softmax(matmul1_results, dim=-1)
nan_mask = torch.isnan(softmax_results)
if nan_mask.any():
softmax_results.masked_fill_(nan_mask, 0)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty((dropout_results.size(0), dropout_results.size(1), values.size(2)),
dtype=dropout_results.dtype, device=dropout_results.device)
torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1))
matmul2_results = matmul2_results.transpose(0, 1).contiguous()
# return [len_q, bsz*heads, head_dim]
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_q_results,
input_lin_kv_results,
dropout_mask, mask,
dropout_prob_t,
sinq, cosq, sink, cosk)
if return_coverage:
return (matmul2_results, dropout_results)
else:
return (matmul2_results,)
@staticmethod
@custom_bwd
def backward(ctx, *output_grads):
incremental = ctx.incremental
len_q = ctx.len_q
len_key = ctx.len_k
if ctx.return_coverage:
output_lin_grads, coverage_grads = output_grads
else:
output_lin_grads = output_grads[0]
output_lin_grads = output_lin_grads.contiguous()
if ctx.fused_all:
assert encdec_multihead_attn_bias_cuda is not None and len_key <= 2048
heads_t, \
scale_t, \
dropout_results, \
attn_scores,\
input_lin_q_results, \
input_lin_kv_results, \
dropout_mask, mask,\
dropout_prob_t, \
sinq, cosq, sink, cosk = ctx.saved_tensors
cuda_module = encdec_multihead_attn_bias_blaslt
input_lin_q_results_grads, input_lin_kv_results_grads \
= cuda_module.backward_compact(heads_t[0], output_lin_grads,
dropout_results,
attn_scores,
input_lin_q_results,
input_lin_kv_results,
dropout_mask,
dropout_prob_t[0])
return None, None, None \
, input_lin_q_results_grads, input_lin_kv_results_grads \
, None, None, \
None, None, \
None, None, None,\
None, None
heads_t, scale_t, dropout_results, softmax_results, \
input_lin_q_results, input_lin_kv_results, \
dropout_mask, pad_mask, dropout_prob_t, \
sinq, cosq, sink, cosk, \
= ctx.saved_tensors
embed_dim = input_lin_q_results.size(2)
head_dim = embed_dim.size(2) // heads_t.item()
bsz = input_lin_q_results.size(1)
# Slice out k,v from one big Input Linear output (should only impact meta data, no copies!)
# Batch sizes and heads are combined to make the batch of the Batched GEMM
# input_lin_kv_results: [seql_k, bsz, heads(16), 2, head_dim(64)]
# input_lin_kv_results: [seql_k, batches=bsz*heads, 2, head_dim]
queries = input_lin_q_results.view(inputs_q.size(0), inputs_q.size(1) * heads_t[0], head_dim)
input_lin_kv_results = input_lin_kv_results.view(inputs_kv.size(0), inputs_kv.size(1) * heads_t[0], 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
# Slice out k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_kv_results_grads = torch.empty_like(input_lin_kv_results)
queries_grads = torch.empty_like(queries)
keys_grads = input_lin_kv_results_grads[:, :, 0, :]
values_grads = input_lin_kv_results_grads[:, :, 1, :]
# [seql_q, seqs*heads, head_dim] -> [seqs*heads, seql_q, head_dim]
output_lin_grads = output_lin_grads.transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
try:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
except TypeError: # backward compatibility
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# TODO:
if ctx.rotary_pos_enc:
queries_grads = queries_grads * cosq + rotate_backward(sinq * queries_grads)
keys_grads_ = keys_grads * cosk + rotate_backward(sink * keys_grads)
keys_grads.copy_(keys_grads_)
return None, None, None \
, input_lin_q_results_grads, input_lin_kv_results_grads \
, None, None, \
None, None, \
None, None, None, \
None, None
def encdec_attn_bias_compact_func(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return EncdecAttnBiasCompactFunc.apply(*args)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=32,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
test_function = encdec_attn_bias_func
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 4
opt.inner_size = 16
opt.head_dim = opt.model_size // opt.n_heads
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
# self.function = RelativeShiftFunction.apply
self.in_proj_weight_q = torch.Tensor(model_size, model_size)
self.in_proj_weight_kv = torch.Tensor(2 * model_size, model_size)
self.out_proj_weight = torch.Tensor(model_size, model_size)
self.in_proj_bias_q = torch.Tensor(model_size)
self.in_proj_bias_kv = torch.Tensor(2 * model_size)
self.out_proj_bias = torch.Tensor(model_size)
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
torch.nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias_q, 0.)
torch.nn.init.constant_(self.in_proj_bias_kv, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
class TestAttention(torch.nn.Module):
def __init__(self, test_function, model_size=16, heads=1):
super().__init__()
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.function = test_function
def forward(self, in_proj_weight_q, in_proj_bias_q, input, context, in_proj_weight_kv, in_proj_bias_kv,
out_proj_weight, out_proj_bias, mask,
recompute=False, use_rotary_enc=False, pos_emb_q=None, pos_emb_k=None):
is_training = True
dropout = 0.0
low_precision = False
return_coverage = False
# .apply(time_masking, is_training,
# num_heads, query, key,
# in_proj_weight_q, in_proj_weight_kv,
# out_proj_weight, attn_mask, dropout,
# incremental, incremental_cache)
return self.function(recompute, is_training, self.heads, input, context,
in_proj_weight_q, in_proj_weight_kv, out_proj_weight,
in_proj_bias_q, in_proj_bias_kv, out_proj_bias,
mask, dropout,
False, None, # For the incremental stuff
use_rotary_enc, pos_emb_q, pos_emb_k,
low_precision, return_coverage) # double precision set to true
bsz = 4
len_q = 5
len_r = 15
input_states = torch.randn(*(len_q, bsz, opt.model_size)).double().cuda()
input_states.requires_grad = True
net = TestAttention(test_function, model_size=opt.model_size, heads=opt.n_heads)
parameters = Parameters(opt.model_size, opt.n_heads)
in_proj_weight_q = parameters.in_proj_weight_q.double().cuda()
in_proj_weight_kv = parameters.in_proj_weight_kv.double().cuda()
out_proj_weight = parameters.out_proj_weight.double().cuda()
in_proj_bias_q = parameters.in_proj_bias_q.double().cuda()
in_proj_bias_kv = parameters.in_proj_bias_kv.double().cuda()
out_proj_bias = parameters.out_proj_bias.double().cuda()
in_proj_weight_q.requires_grad = True
out_proj_weight.requires_grad = True
in_proj_weight_kv.requires_grad = True
in_proj_bias_q.requires_grad = True
in_proj_bias_kv.requires_grad = True
out_proj_bias.requires_grad = True
mask = input_states.new(*(bsz, len_r)).bernoulli_(p=0.25).bool()
print("gradchecking start.")
#
context = torch.randn(*(len_r, bsz, opt.model_size)).double().cuda()
context.requires_grad = True
#
recompute = False
try:
torch.autograd.gradcheck(net, (in_proj_weight_q, in_proj_bias_q, input_states, context, in_proj_weight_kv,
in_proj_bias_kv,
out_proj_weight, out_proj_bias,
mask, recompute), atol=1e-04, rtol=0.001)
except RuntimeError as e:
print(e)
print("gradchecking completed.")
| 53,892
| 47.464928
| 128
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/feed_forward.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from onmt.modules.dropout import variational_dropout, ReLUDropout
from onmt.modules.swish import SiLU
import onmt
from torch.cuda.amp import autocast
class AGELU(torch.nn.Module):
def forward(self, input):
return agelu(input)
def agelu(x):
SQRT_M2_PI = math.sqrt(2 / math.pi)
COEFF = 0.044715
return 0.5 * x * (1.0 + torch.tanh(SQRT_M2_PI * (x + COEFF * torch.pow(x, 3))))
class PositionWiseFeedForward(nn.Module):
"""Two-layer Feed-forward neural network"""
def __init__(self, model_size, inner_size, dropout=0., variational=False,
activation='relu', glu=False, weight_drop=0.0,
dropout_residual=False, res_dropout=0.0):
super().__init__()
self.model_size = model_size
self.inner_size = inner_size
self.dropout = dropout
self.bias = True
self.variational = variational
self.activation = activation
self.glu = glu
self.weight_drop = weight_drop
self.autograd = False
self.fused_dropout_add = False
self.dropout_residual = dropout_residual
self.res_dropout = res_dropout
if self.activation == 'relu':
if self.glu:
self.act = nn.ReLU()
else:
self.act = ReLUDropout(p=self.dropout, variational=self.variational, batch_first=False)
elif self.activation == 'gelu':
self.act = nn.GELU()
elif self.activation == 'agelu':
self.act = AGELU()
elif self.activation in ['silu', 'swish']:
self.act = SiLU()
elif self.activation in ['sigmoid']:
if self.glu:
self.act = nn.functional.glu
else:
print("Sigmoid activation function is recommended to be used with -glu")
raise NotImplementedError
self.in_proj_weight = Parameter(torch.Tensor(inner_size * (2 if glu else 1), model_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, inner_size))
self.in_proj_bias = Parameter(torch.Tensor(inner_size * (2 if glu else 1)))
self.out_proj_bias = Parameter(torch.Tensor(model_size))
self.reset_parameters()
self.fused = False
# At the moment fused mlp is supported for RELU, SiLU, Swish, GELU and AGELU (approximated GELU)
if not self.glu and \
self.activation in ['relu', 'silu', 'swish', 'gelu', 'agelu'] and not self.variational:
if self.activation == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation in ['silu', 'swish']:
from onmt.modules.mlp.mlp import mlp_silu_function
if mlp_silu_function is not None:
self.fused_function = mlp_silu_function
self.fused = True
elif self.activation == 'gelu':
if self.dropout_residual:
from onmt.modules.mlp.mlp import mlp_gelu_dropout_add_function
if mlp_gelu_dropout_add_function is not None:
self.fused_function = mlp_gelu_dropout_add_function
self.fused = True
self.fused_dropout_add = True
if not self.fused:
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
elif self.activation == 'agelu':
from onmt.modules.mlp.mlp import mlp_agelu_function
if mlp_agelu_function is not None:
self.fused_function = mlp_agelu_function
self.fused = True
def reset_parameters(self, init='normal'):
if init == 'normal':
std_ = math.sqrt(2.0 / (self.model_size + self.inner_size))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
else:
std_ = math.sqrt(6.0 / (self.model_size + self.inner_size))
nn.init.uniform_(self.in_proj_weight, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj_bias, 0.0)
def convert_autograd(self):
if self.autograd:
return
with torch.no_grad():
self.autograd = True
self.linear_in = torch.nn.Linear(self.model_size, self.inner_size)
self.linear_out = torch.nn.Linear(self.inner_size, self.model_size)
self.linear_in.weight.copy_(self.in_proj_weight)
self.linear_in.bias.copy_(self.in_proj_bias)
self.linear_out.weight.copy_(self.out_proj_weight)
self.linear_out.bias.copy_(self.out_proj_bias)
del self.in_proj_weight
del self.in_proj_bias
del self.out_proj_weight
del self.out_proj_bias
def forward(self, input, *args, **kwargs):
if self.fused and input.is_cuda and not self.autograd:
# if autocast is enabled: manually cast the function args into half manually
# for some reason custom_fwd(...) doesn't work
# with autocast(enabled=False):
weights = [self.in_proj_weight, self.out_proj_weight]
biases = [self.in_proj_bias, self.out_proj_bias]
seq_len, bsz, hidden_size = input.size(0), input.size(1), input.size(2)
dropout = self.dropout if self.training else 0.0
if self.fused_dropout_add:
res_dropout = self.res_dropout if self.training else 0.0
hidden = self.fused_function(dropout, res_dropout, input.view(seq_len * bsz, -1),
*weights, *biases)
else:
recompute = onmt.constants.recompute
hidden = self.fused_function(dropout, recompute, input.view(seq_len * bsz, -1),
*weights, *biases)
hidden = hidden.view(seq_len, bsz, hidden_size)
# verification code (only with dropout = 0.0)
# with torch.no_grad():
# hidden_ = F.linear(self.act(F.linear(input, self.in_proj_weight, self.in_proj_bias)),
# self.out_proj_weight, self.out_proj_bias).type_as(hidden)
#
# if self.fused_dropout_add:
# hidden_.add_(input)
#
# comp = torch.allclose(hidden, hidden_, rtol=1e-02, atol=1e-03)
# if not comp:
# print("Warning! The fused function doesn't match the PyTorch function.")
# print(hidden - hidden_)
else:
if self.autograd:
hidden = self.linear_in(input)
else:
hidden = F.linear(input, self.in_proj_weight, self.in_proj_bias)
if self.glu and self.activation != 'sigmoid':
hidden, gate = hidden.chunk(2, dim=-1)
hidden = self.act(hidden) * gate
else: # GLU function
hidden = self.act(hidden)
if not (not self.glu and self.activation == 'relu'):
if self.variational:
hidden = variational_dropout(hidden, p=self.dropout, training=self.training,
inplace=self.activation in ['silu', 'relu', 'swish', 'gelu'])
else:
hidden = F.dropout(hidden, p=self.dropout, training=self.training,
inplace=self.activation in ['silu', 'relu', 'swish', 'gelu'])
if self.autograd:
hidden = self.linear_out(hidden)
else:
hidden = F.linear(hidden, self.out_proj_weight, self.out_proj_bias)
if self.dropout_residual:
if not self.fused_dropout_add:
if not self.variational:
hidden = F.dropout(hidden, p=self.res_dropout, training=self.training) + input
else:
hidden = variational_dropout(hidden, p=self.dropout, training=self.training) + input
return hidden
| 8,671
| 40.692308
| 110
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/self_attention_attnbias_func.py
|
"""
Self-attention with multi-head attention.
Code is taken from apex self-attention implementation
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import self_multihead_attn_cuda
except (ModuleNotFoundError, ImportError) as e:
self_multihead_attn_cuda = None
try:
import self_multihead_attn_bias_blaslt
except (ModuleNotFoundError, ImportError) as e:
self_multihead_attn_bias_blaslt = None
def rotate_half(x):
# this function works the same with 3D or 2D tensors
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0
def apply_rotary_pos_emb(q, k, cos, sin):
# q: seq_len x (bszxhead) x headsize
# k: seq_len x (bszxhead) x headsize
# cos: seq_len x 1 x head_size
# sin: seq_len x 1 x head_Size
# or
# q: (total_bsz) x head x head_size
# k: (total_bsz) x head x head_size
# sin: (total_bsz) x 1 x head_size
# cos: (total_bsz) x 1 x head_size
return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
def rotate_backward(dx):
dx2, dx1 = dx[..., :dx.shape[-1] // 2], dx[..., dx.shape[-1] // 2:]
return torch.cat((dx1, -dx2), dim=dx1.ndim - 1)
class SelfAttnBiasFunc(torch.autograd.Function):
@staticmethod
@custom_fwd()
def forward(ctx, use_time_mask, is_training, heads, inputs, attn_bias,
input_weights, output_weights,
input_biases, output_biases,
mask, dropout_prob,
rotary_pos_enc, pos_emb,
incremental, incremental_cache,
low_precision, return_coverage, recompute):
inputs = inputs.contiguous()
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
ctx.rotary_pos_enc = rotary_pos_enc
ctx.return_coverage = return_coverage
ctx.low_precision = low_precision
ctx.use_time_mask = use_time_mask
ctx.recompute = recompute
input_weights = input_weights.contiguous()
output_weights = output_weights.contiguous()
bsz, len_q = inputs.size(1), inputs.size(0)
# print(low_precision, incremental, inputs.type())
if low_precision and self_multihead_attn_bias_blaslt is not None and not incremental and len_q <= 2048 \
and inputs.type() == 'torch.cuda.HalfTensor' \
and not rotary_pos_enc:
ctx.fused = True
if mask is not None:
if use_time_mask:
mask = mask.bool()
else: # [b x len_k] -> [b x 1 x 1 x len_k]
mask = mask.unsqueeze(1).unsqueeze(2).bool()
else:
if use_time_mask:
mask = inputs.new(len_q, len_q).zero_().bool()
else:
mask = inputs.new(bsz, 1, 1, len_q).zero_().bool() # works
cuda_module = self_multihead_attn_bias_blaslt
input_lin_results, \
attn_scores, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = cuda_module.forward(use_time_mask, is_training, heads,
inputs.contiguous(), input_weights, output_weights,
input_biases, output_biases,
mask, attn_bias, dropout_prob)
if recompute:
matmul2_results, dropout_results, attn_scores, input_lin_results = None, None, None, None
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
attn_scores,
input_lin_results,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
dropout_mask,
dropout_prob_t,
mask)
return outputs, dropout_results
ctx.fused = False
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
input_lin_results = torch.addmm(input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1., alpha=1.)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
if incremental:
keys = keys.contiguous().view(len_q, bsz, heads * head_dim)
values = values.contiguous().view(len_q, bsz, heads * head_dim)
if 'k' in incremental_cache and 'v' in incremental_cache:
keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
incremental_cache['k'] = keys
values = torch.cat([incremental_cache['v'], values], dim=0) # time first
incremental_cache['v'] = values
else:
incremental_cache['k'] = keys
incremental_cache['v'] = values
keys = keys.view(-1, bsz * heads, head_dim)
values = values.view(-1, bsz * heads, head_dim)
len_k = keys.size(0)
# apply rotary position encodings
if rotary_pos_enc:
assert pos_emb is not None and pos_emb is not None
cos, sin = pos_emb
queries_, keys_ = apply_rotary_pos_emb(queries, keys, cos, sin)
queries.copy_(queries_)
keys.copy_(keys_)
else:
sin, cos = null_tensor, null_tensor
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
# matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
# device=queries.device).copy_(attn_bias)
matmul1_results = attn_bias.add(0)
matmul1_results.baddbmm_(queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
beta=1.0, alpha=scale_t[0])
# print(matmul1_results.size(), attn_bias.size())
# matmul1_results.add_(attn_bias)
# matmul1_results = attn_bias
# matmul1_results.baddbmm_(queries.transpose(0, 1).contiguous(), keys.transpose(0, 1).transpose(1, 2).contiguous(),
# beta=1.0, alpha=scale_t[0])
# matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
# keys.transpose(0, 1).transpose(1, 2),
# out=matmul1_results, beta=1.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
matmul1_results.add_(attn_bias)
# [B*H x T x T]
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert (len(mask.size()) == 2), "Timing mask is not 2D!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float('-inf'))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
# Softmax and Dropout attention
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
nan_mask = torch.isnan(dropout_results)
if nan_mask.any():
dropout_results.masked_fill_(nan_mask, 0)
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=queries.device).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1))
matmul2_results = matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1),
inputs.size(2))
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
outputs = torch.addmm(output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
del attn_bias
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
dropout_mask,
dropout_prob_t,
sin, cos)
if return_coverage:
return (outputs, dropout_results)
else:
return (outputs,)
@staticmethod
@custom_bwd
def backward(ctx, *output_grads):
if ctx.return_coverage:
output_grads, coverage_grads = output_grads
else:
output_grads = output_grads[0]
if ctx.fused:
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
attn_scores, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
mask, \
dropout_mask, \
dropout_prob_t, pad_mask = ctx.saved_tensors
if input_weights.requires_grad:
cuda_module = self_multihead_attn_bias_blaslt
if ctx.recompute:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads,\
attn_bias_grads = \
cuda_module.backward_recompute(ctx.use_time_mask, heads_t[0],
output_grads.contiguous(), inputs, input_weights,
output_weights, input_biases, output_biases,
mask, dropout_mask, dropout_prob_t[0])
else:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads,\
attn_bias_grads = \
cuda_module.backward(ctx.use_time_mask, heads_t[0],
output_grads.contiguous(), matmul2_results,
dropout_results, attn_scores,
input_lin_results, inputs, input_weights,
output_weights, dropout_mask, dropout_prob_t[0])
else:
input_grads = self_multihead_attn_cuda.backward_input_only(ctx.use_time_mask, heads_t[0],
output_grads.contiguous(), matmul2_results,
dropout_results, attn_scores,
input_lin_results, inputs, input_weights,
output_weights, dropout_mask,
dropout_prob_t[0])
input_weight_grads = None
input_bias_grads = None
output_weight_grads = None
output_bias_grads = None
return None, None, None, \
input_grads, attn_bias_grads, \
input_weight_grads, output_weight_grads, \
input_bias_grads, output_bias_grads, \
None, None, None, None, None, None, None, None, None
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
mask, \
dropout_mask, \
dropout_prob_t, \
sin, cos = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t.item()
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
len_key = keys.size(0)
# Slice out q,k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_grads = output_grads.contiguous()
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
if output_weights.requires_grad:
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
else:
output_weight_grads = None
output_bias_grads = None
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
try:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
except TypeError:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
grad_attn_bias = softmax_grads
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
if ctx.rotary_pos_enc:
queries_grads_ = queries_grads * cos + rotate_backward(sin * queries_grads)
keys_grads_ = keys_grads * cos + rotate_backward(sin * keys_grads)
queries_grads.copy_(queries_grads_)
keys_grads.copy_(keys_grads_)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(inputs.size(0) * inputs.size(1),
heads_t[0] * 3 * head_dim)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
if input_weights.requires_grad:
input_weight_grads = torch.mm(input_lin_results_grads.transpose(0, 1),
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)))
input_bias_grads = torch.sum(input_lin_results_grads, 0)
else:
input_weight_grads = None
input_bias_grads = None
return None, None, None, \
input_grads, grad_attn_bias, \
input_weight_grads, output_weight_grads, \
input_bias_grads, output_bias_grads, \
None, None, None, None, None, None, None, None, None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
def self_attn_bias_func(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return SelfAttnBiasFunc.apply(*args)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=32,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
test_function = self_attn_bias_func
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 4
opt.inner_size = 16
opt.head_dim = opt.model_size // opt.n_heads
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.in_proj_weight = torch.Tensor(3 * model_size, model_size)
self.out_proj_weight = torch.Tensor(model_size, model_size)
self.in_proj_bias = torch.Tensor(3 * model_size)
self.out_proj_bias = torch.Tensor(model_size)
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
class TestAttention(torch.nn.Module):
def __init__(self, test_function, model_size=16, heads=1):
super().__init__()
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.function = test_function
def forward(self, input_weights, output_weights, input, input_biases, output_biases, mask, attn_bias,
use_time_mask=False):
is_training = True
dropout = 0.0
double_precision = True
return_coverage = False
# use_time_mask, is_training, heads, inputs, attn_bias,
# input_weights, output_weights,
# input_biases, output_biases,
# mask, dropout_prob,
# rotary_pos_enc, pos_emb,
# incremental, incremental_cache,
# low_precision, return_coverage, recompute
return self.function(use_time_mask, is_training, self.heads, input, attn_bias,
input_weights, output_weights,
input_biases, output_biases,
mask, dropout,
False, None, # For the incremental stuff
False, None,
False, return_coverage, False) # double precision set to true
bsz = 4
len_q = 15
len_r = len_q
input_states = torch.randn(*(len_q, bsz, opt.model_size)).double().cuda()
attn_bias = torch.randn(*(bsz * opt.n_heads, len_q, len_q)).double().cuda()
input_states.requires_grad = True
net = TestAttention(test_function, model_size=opt.model_size, heads=opt.n_heads)
parameters = Parameters(opt.model_size, opt.n_heads)
in_proj_weight = parameters.in_proj_weight.double().cuda()
out_proj_weight = parameters.out_proj_weight.double().cuda()
in_proj_bias = parameters.in_proj_bias.double().cuda()
out_proj_bias = parameters.out_proj_bias.double().cuda()
in_proj_weight.requires_grad = True
out_proj_weight.requires_grad = True
in_proj_bias.requires_grad = True
out_proj_bias.requires_grad = True
mask = input_states.new(*(bsz, len_r)).fill_(0) #.bernoulli_(p=0).bool()
print("gradchecking start.")
use_time_mask = False
torch.autograd.gradcheck(net, (in_proj_weight, out_proj_weight, input_states,
in_proj_bias, out_proj_bias,
mask, attn_bias, use_time_mask), atol=1e-03, rtol=0.001)
mask = input_states.new(*(len_q, len_r)).bernoulli_(p=0.25).bool()
print("gradchecking with time mask start.")
use_time_mask = True
torch.autograd.gradcheck(net, (in_proj_weight, out_proj_weight, input_states,
in_proj_bias, out_proj_bias,
mask, attn_bias, use_time_mask), atol=1e-03, rtol=0.001)
print("gradchecking completed.")
| 29,122
| 44.082043
| 127
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/test_rel_self_attention_func.py
|
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from copy import deepcopy
from time import time
import unittest
import numpy as np
import math
from self_attention_func import self_attn_func
from relative_self_attention_func import relative_self_attn_func
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, sin_first=True, bsz=None):
"""
:param bsz: integer to repeat
:param pos_seq: sequences of RELATIVE position indices (can be negative for future)
:param sin_first: in Attention is all you need paper, sin is first then cosin
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq.type_as(pos_seq))
if sin_first:
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
else:
pos_emb = torch.cat([sinusoid_inp.cos(), sinusoid_inp.sin()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].repeat(1, bsz, 1)
else:
return pos_emb[:, None, :]
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
super(Parameters, self).__init__()
self.in_proj_weight = Parameter(torch.Tensor(3 * model_size, model_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, model_size))
self.pos_proj_weight = Parameter(torch.Tensor(model_size, model_size))
self.in_proj_bias = Parameter(torch.Tensor(3 * model_size))
self.out_proj_bias = Parameter(torch.Tensor(model_size))
self.pos_proj_bias = Parameter(torch.Tensor(model_size))
self.r_w_bias = nn.Parameter(torch.Tensor(self.heads, self.head_dim))
self.r_r_bias = nn.Parameter(torch.Tensor(self.heads, self.head_dim))
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.pos_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
nn.init.constant_(self.pos_proj_bias, 0.)
nn.init.normal_(self.r_w_bias, 0.0, 0.02)
nn.init.normal_(self.r_r_bias, 0.0, 0.02)
class RelSelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=23272123):
torch.cuda.set_device(0)
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
self.seq_length = 256
self.sequences = 64
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.positional_encoder = SinusoidalPositionalEmbedding(self.hidden_dim)
embed_dim = self.hidden_dim
klen = self.seq_length
self.ref_parameters = Parameters(model_size=self.hidden_dim, heads=self.heads)
self.ref_parameters = self.ref_parameters.cuda().half()
self.tst_parameters = deepcopy(self.ref_parameters)
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
# Reset seed so parameters are identical
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
self.tst_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
self.tst_inputs.data.copy_(self.ref_inputs.data)
x = self.ref_inputs
bsz = self.sequences
self.pos = torch.arange(klen - 1, -klen, -1.0, device=x.device, dtype=x.dtype)
self.pos_emb = self.positional_encoder(self.pos, bsz=bsz)
def test_input(self):
print("Checking if all inputs are the same ...")
self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight, self.tst_parameters.in_proj_weight,
atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.ref_parameters.pos_proj_weight, self.tst_parameters.pos_proj_weight,
atol=1e-5, rtol=1e-5))
print("Done.")
def test_output(self):
print("Testing self-attention with random mask ....")
training = True
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
with torch.no_grad():
self.tst_inputs.copy_(self.ref_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
ref_output = relative_self_attn_func(self.ref_inputs, self.pos_emb,
False, training, self.heads,
self.ref_parameters.in_proj_weight,
self.ref_parameters.out_proj_weight,
self.ref_parameters.pos_proj_weight,
self.ref_parameters.in_proj_bias,
self.ref_parameters.out_proj_bias,
self.ref_parameters.pos_proj_bias,
self.ref_parameters.r_w_bias,
self.ref_parameters.r_r_bias,
mask, self.dropout_prob,
False, None, # incremental and state
False, False, # low precision, learnable pos
False, False) # return coverage, recompute
tst_output = relative_self_attn_func(self.tst_inputs, self.pos_emb,
False, training, self.heads,
self.tst_parameters.in_proj_weight,
self.tst_parameters.out_proj_weight,
self.tst_parameters.pos_proj_weight,
self.tst_parameters.in_proj_bias,
self.tst_parameters.out_proj_bias,
self.tst_parameters.pos_proj_bias,
self.tst_parameters.r_w_bias,
self.tst_parameters.r_r_bias,
mask, self.dropout_prob,
False, None, # incremental and state
True, False, # low precision, learnable pos
False, False) # return coverage, recompute
# print(ref_output - tst_output)
# np.testing.assert_allclose(
# ref_output.detach().cpu().numpy(),
# tst_output.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
# self.assertTrue(torch.allclose(ref_output, tst_output, atol=1e-3, rtol=1e-3))
grad_outputs_ref = torch.randn_like(tst_output)
grad_outputs_tst = torch.randn_like(tst_output).copy_(grad_outputs_ref)
tst_output.data.copy_(ref_output.data)
ref_output.backward(grad_outputs_ref)
tst_output.backward(grad_outputs_tst)
# self.assertTrue(torch.allclose(self.ref_parameters.out_proj_weight.grad,
# self.tst_parameters.out_proj_weight.grad,
# atol=1e-3, rtol=1e-3))
# np.testing.assert_allclose(
# self.ref_parameters.out_proj_weight.grad.detach().cpu().numpy(),
# self.tst_parameters.out_proj_weight.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
# np.testing.assert_allclose(
# self.ref_parameters.out_proj_bias.grad.detach().cpu().numpy(),
# self.tst_parameters.out_proj_bias.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
print("GRAD TEST", self.tst_parameters.in_proj_weight.grad)
print("GRAD TEST", self.ref_parameters.in_proj_weight.grad)
print("GRAD TEST", self.ref_parameters.in_proj_weight.grad - self.tst_parameters.in_proj_weight.grad)
#
# self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight.grad,
# self.tst_parameters.in_proj_weight.grad,
# atol=1e-2, rtol=1e-2))
np.testing.assert_allclose(
self.ref_parameters.r_w_bias.grad.detach().cpu().numpy(),
self.tst_parameters.r_w_bias.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
# pass matmul ac
np.testing.assert_allclose(
self.ref_parameters.r_r_bias.grad.detach().cpu().numpy(),
self.tst_parameters.r_r_bias.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(
self.ref_parameters.pos_proj_weight.grad.detach().cpu().numpy(),
self.tst_parameters.pos_proj_weight.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(
self.ref_parameters.pos_proj_bias.grad.detach().cpu().numpy(),
self.tst_parameters.pos_proj_bias.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
# np.testing.assert_allclose(
# self.ref_parameters.in_proj_weight.grad.detach().cpu().numpy(),
# self.tst_parameters.in_proj_weight.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
# np.testing.assert_allclose(
# self.ref_parameters.in_proj_bias.grad.detach().cpu().numpy(),
# self.tst_parameters.in_proj_bias.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
# self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad,
# atol=1e-3, rtol=1e-3))
#
np.testing.assert_allclose(
self.ref_inputs.grad.detach().cpu().numpy(),
self.tst_inputs.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
#
# def test_output_autoregressive(self):
#
# print("Testing self-attention with time mask ....")
# training = True
#
# self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
# dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
#
# with torch.no_grad():
# self.tst_inputs.copy_(self.ref_inputs)
#
# # mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
# mask = torch.triu(
# self.ref_inputs.new_ones(self.seq_length, self.seq_length), diagonal=1).bool()
#
# ref_output = relative_self_attn_func(self.ref_inputs, self.pos_emb,
# True, training, self.heads,
# self.ref_parameters.in_proj_weight,
# self.ref_parameters.out_proj_weight,
# self.ref_parameters.pos_proj_weight,
# self.ref_parameters.in_proj_bias,
# self.ref_parameters.out_proj_bias,
# self.ref_parameters.pos_proj_bias,
# self.ref_parameters.r_w_bias,
# self.ref_parameters.r_r_bias,
# mask, self.dropout_prob,
# False, None, # incremental and state
# False, False, # low precision, learnable pos
# False, False) # return coverage, recompute
#
# tst_output = relative_self_attn_func(self.tst_inputs, self.pos_emb,
# True, training, self.heads,
# self.tst_parameters.in_proj_weight,
# self.tst_parameters.out_proj_weight,
# self.tst_parameters.pos_proj_weight,
# self.tst_parameters.in_proj_bias,
# self.tst_parameters.out_proj_bias,
# self.tst_parameters.pos_proj_bias,
# self.tst_parameters.r_w_bias,
# self.tst_parameters.r_r_bias,
# mask, self.dropout_prob,
# False, None, # incremental and state
# True, False, # low precision, learnable pos
# False, False) # return coverage, recompute
#
# grad_outputs_ref = torch.randn_like(tst_output)
#
# grad_outputs_tst = torch.randn_like(tst_output).copy_(grad_outputs_ref)
#
# tst_output.data.copy_(ref_output.data)
# ref_output.backward(grad_outputs_ref)
# tst_output.backward(grad_outputs_tst)
#
# # self.assertTrue(torch.allclose(self.ref_parameters.out_proj_weight.grad,
# # self.tst_parameters.out_proj_weight.grad,
# # atol=1e-3, rtol=1e-3))
#
# # np.testing.assert_allclose(
# # self.ref_parameters.out_proj_weight.grad.detach().cpu().numpy(),
# # self.tst_parameters.out_proj_weight.grad.detach().cpu().numpy(),
# # atol=1e-2, rtol=1e-2)
#
# # np.testing.assert_allclose(
# # self.ref_parameters.out_proj_bias.grad.detach().cpu().numpy(),
# # self.tst_parameters.out_proj_bias.grad.detach().cpu().numpy(),
# # atol=1e-2, rtol=1e-2)
#
# print("GRAD TEST", self.tst_parameters.in_proj_weight.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight.grad - self.tst_parameters.in_proj_weight.grad)
# #
# # self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight.grad,
# # self.tst_parameters.in_proj_weight.grad,
# # atol=1e-2, rtol=1e-2))
# np.testing.assert_allclose(
# self.ref_parameters.r_w_bias.grad.detach().cpu().numpy(),
# self.tst_parameters.r_w_bias.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
#
# # pass matmul ac
#
# np.testing.assert_allclose(
# self.ref_parameters.r_r_bias.grad.detach().cpu().numpy(),
# self.tst_parameters.r_r_bias.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
#
# np.testing.assert_allclose(
# self.ref_parameters.pos_proj_weight.grad.detach().cpu().numpy(),
# self.tst_parameters.pos_proj_weight.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
#
# np.testing.assert_allclose(
# self.ref_parameters.pos_proj_bias.grad.detach().cpu().numpy(),
# self.tst_parameters.pos_proj_bias.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
#
# # np.testing.assert_allclose(
# # self.ref_parameters.in_proj_weight.grad.detach().cpu().numpy(),
# # self.tst_parameters.in_proj_weight.grad.detach().cpu().numpy(),
# # atol=1e-3, rtol=1e-3)
#
# # np.testing.assert_allclose(
# # self.ref_parameters.in_proj_bias.grad.detach().cpu().numpy(),
# # self.tst_parameters.in_proj_bias.grad.detach().cpu().numpy(),
# # atol=1e-3, rtol=1e-3)
#
# # self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad,
# # atol=1e-3, rtol=1e-3))
# #
# np.testing.assert_allclose(
# self.ref_inputs.grad.detach().cpu().numpy(),
# self.tst_inputs.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
def test_performance(self):
training = True
for dropout in [0.0, 0.5]:
mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
num_iters = 32
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_output = relative_self_attn_func(self.ref_inputs, self.pos_emb,
False, training, self.heads,
self.ref_parameters.in_proj_weight,
self.ref_parameters.out_proj_weight,
self.ref_parameters.pos_proj_weight,
self.ref_parameters.in_proj_bias,
self.ref_parameters.out_proj_bias,
self.ref_parameters.pos_proj_bias,
self.ref_parameters.r_w_bias,
self.ref_parameters.r_r_bias,
mask, self.dropout_prob,
False, None, # incremental and state
False, False, # low precision, learnable pos
False, False) # return coverage, recompute
grad_outputs_ref = torch.randn_like(ref_output)
ref_output.backward(grad_outputs_ref)
self.ref_parameters.zero_grad()
tst_output = relative_self_attn_func(self.tst_inputs, self.pos_emb,
False, training, self.heads,
self.tst_parameters.in_proj_weight,
self.tst_parameters.out_proj_weight,
self.tst_parameters.pos_proj_weight,
self.tst_parameters.in_proj_bias,
self.tst_parameters.out_proj_bias,
self.tst_parameters.pos_proj_bias,
self.tst_parameters.r_w_bias,
self.tst_parameters.r_r_bias,
mask, self.dropout_prob,
False, None, # incremental and state
True, False, # low precision, learnable pos
False, False) # return coverage, recompute
grad_outputs_tst = torch.randn_like(tst_output)
tst_output.backward(grad_outputs_tst)
self.tst_parameters.zero_grad()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_output = relative_self_attn_func(self.ref_inputs, self.pos_emb,
False, training, self.heads,
self.ref_parameters.in_proj_weight,
self.ref_parameters.out_proj_weight,
self.ref_parameters.pos_proj_weight,
self.ref_parameters.in_proj_bias,
self.ref_parameters.out_proj_bias,
self.ref_parameters.pos_proj_bias,
self.ref_parameters.r_w_bias,
self.ref_parameters.r_r_bias,
mask, self.dropout_prob,
False, None, # incremental and state
False, False, # low precision, learnable pos
False, False) # return coverage, recompute
grad_outputs_ref = torch.randn_like(ref_output)
ref_output.backward(grad_outputs_ref)
self.ref_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self-Attn time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
tst_output = relative_self_attn_func(self.tst_inputs, self.pos_emb,
False, training, self.heads,
self.tst_parameters.in_proj_weight,
self.tst_parameters.out_proj_weight,
self.tst_parameters.pos_proj_weight,
self.tst_parameters.in_proj_bias,
self.tst_parameters.out_proj_bias,
self.tst_parameters.pos_proj_bias,
self.tst_parameters.r_w_bias,
self.tst_parameters.r_r_bias,
mask, self.dropout_prob,
False, None, # incremental and state
True, False, # low precision, learnable pos
False, False) # return coverage, recompute
grad_outputs_tst = torch.randn_like(tst_output)
tst_output.backward(grad_outputs_tst)
self.tst_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nCUDA Self-Attn time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
if __name__ == '__main__':
unittest.main()
| 23,988
| 49.932059
| 112
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/linear.py
|
import torch
from torch import Tensor
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import linear_blaslt
except (ModuleNotFoundError, ImportError) as e:
linear_blaslt = None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
class LinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
output = linear_blaslt.forward(input, weight, bias)
ctx.save_for_backward(input, weight)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
if weight.requires_grad:
d_input, d_weight, d_bias = linear_blaslt.backward(input, weight, grad_output, True)
else:
d_input = linear_blaslt.backward_input_only(input, weight, grad_output)
d_weight, d_bias = None, None
return d_input, d_weight, d_bias
if linear_blaslt:
def linear_function(input, weight, bias):
if bias is None:
return torch.nn.functional.linear(input, weight, bias)
else:
_input, _weight, _bias = _cast_if_autocast_enabled(input, weight, bias)
with torch.cuda.amp.autocast(enabled=False):
return LinearFunction.apply(_input, _weight, _bias)
else:
linear_function = torch.nn.functional.linear
class Linear(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super(Linear, self).__init__(*args, **kwargs)
def forward(self, input: Tensor) -> Tensor:
if input.is_cuda and linear_function is not None and self.bias is not None:
return linear_function(input, self.weight, self.bias)
else:
return torch.nn.functional.linear(input, self.weight, self.bias)
def factorize_linear(input, weight, bias, rm, sm):
# here we assume that rm and sm has size [rank x D]
# Todo: manually cast tensors to fp16 because autocast refuses to do it for multiplication?
if torch.is_autocast_enabled():
input = input.half()
rm = rm.half()
sm = sm.half()
weight = weight.half()
bias = bias.half()
if input.ndim == 3:
# assuming input size is [T x B x D]
bsz, qlen = input.size(1), input.size(0)
if rm.ndim == 2:
rank = rm.size(0)
rm = rm.unsqueeze(1).unsqueeze(2)
sm = sm.unsqueeze(1).unsqueeze(2)
h = input.unsqueeze(0) * sm
if rank == 1:
h = h.squeeze(0)
else:
h = h.sum(dim=0)
h = torch.mm(h.view(qlen * bsz, -1), weight.transpose(0, 1))
h = h.view(qlen, bsz, -1).unsqueeze(0) * rm
if rank == 1:
h = h.squeeze(0)
else:
h = h.sum(dim=0)
elif rm.ndim == 4:
rank = rm.size(2) # [T x B x R x D]
# W(sm * x)
h = input.unsqueeze(2) * sm
if rank == 1:
h = h.squeeze(2)
else:
h = h.sum(dim=2)
# W(sm * x)
h = torch.mm(h.view(qlen * bsz, -1), weight.transpose(0, 1))
# W(sm * x) * rm
h = h.view(qlen, bsz, -1).unsqueeze(2) * rm
if rank == 1:
h = h.squeeze(2)
else:
h = h.sum(dim=2)
# adding final bias (from normal linear)
h = h + bias.unsqueeze(0).unsqueeze(1)
return h
elif input.ndim == 2:
total_bsz = input.size(0)
if rm.ndim == 2:
rank = rm.size(0)
rm = rm.unsqueeze(1)
sm = sm.unsqueeze(1)
h = input.unsqueeze(0) * sm
if rank == 1:
h = h.squeeze(0)
else:
h = h.sum(dim=0)
h = torch.mm(h, weight.transpose(0, 1))
h = h.unsqueeze(0) * rm
if rank == 1:
h = h.squeeze(0)
else:
h = h.sum(dim=0)
elif rm.ndim == 3: # B x R x D
rank = rm.size(1)
h = input.unsqueeze(1) * sm
if rank == 1:
h = h.squeeze(1)
else:
h = h.sum(dim=2)
h = torch.mm(h, weight.transpose(0, 1))
h = h.unsqueeze(1) * rm
if rank == 1:
h = h.squeeze(1)
else:
h = h.sum(dim=1)
else:
print("factorized matrix dimension has to be either 2 or 3, get", rm.ndim)
raise NotImplementedError
h = h + bias.unsqueeze(0)
return h
else:
raise NotImplementedError
| 5,012
| 25.109375
| 96
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/relative_self_attention_func.py
|
"""
Self-attention with relative position encoding and multi-head attention.
Code is heavily adapted from apex self-attention implementation
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .compat import custom_fwd, custom_bwd
try:
import relative_self_attn_blaslt
except (ModuleNotFoundError, ImportError) as e:
relative_self_attn_blaslt = None
try:
import linear_blaslt
except (ModuleNotFoundError, ImportError) as e:
linear_blaslt = None
class RelativeShift(object):
@staticmethod
def forward(x, batch_first, emb_last):
assert len(x.shape) == 3, "Input must have 3 dimensions B x len_q x len_r or len_q x len_r x demb!"
assert (batch_first or emb_last) and not (batch_first and emb_last), \
"Batch first and Embedding last must be mutually exclusive"
if batch_first:
bsz = x.size(0)
zero_pad = torch.zeros((bsz, x.size(1), 1),
device=x.device, dtype=x.dtype)
# padded into [(B x H) T x T+1 x ]
x_padded = torch.cat([zero_pad, x], dim=2)
# view into [(B x H) T+1 x T x (BxH)]
x_view = x_padded.view(bsz, x.size(2) + 1, x.size(1))
# remove the first collumn
x = x_view[:, 1:].view_as(x)
else:
raise NotImplementedError
return x
@staticmethod
def backward(grad_x, batch_first, emb_last):
if batch_first:
# Refer to the variables in the forward to track the gradients
bsz = grad_x.size(0)
len_q, len_r = grad_x.size(1), grad_x.size(2)
grad_x_view = grad_x.view(bsz, len_r, len_q)
zero_pad = torch.zeros((bsz, 1, len_q), device=grad_x.device, dtype=grad_x.dtype)
# grad_x should have size B x len_q x len_r
# x_view should have size B x len_q+1 x len_r
# put the zeros into the missing gradients
grad_x_view = torch.cat([zero_pad, grad_x_view], dim=1)
grad_x_padded = grad_x_view.view(bsz, len_q, len_r + 1)
# because the first index in the padded dim was from zero_pad
grad_output = grad_x_padded[:, :, 1:]
else:
raise NotImplementedError
return grad_output
class RelativeSelfAttnFunc(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, inputs, pos, use_time_mask, is_training, heads,
input_weights, output_weights, pos_weights,
input_biases, output_biases, pos_biases,
r_w_bias, r_r_bias,
mask, dropout_prob,
incremental, incremental_cache,
low_precision, learnable_pos, return_coverage, recompute):
"""
:param recompute:
:param return_coverage:
:param learnable_pos:
:param low_precision: ops at float64, only for debugging
:param ctx: context object to stash information for backward
:param inputs: input hidden states [len_q x batch_size x hidden]
:param pos: [len_k x 1 x hidden]
:param use_time_mask: bool, if we use the causal mask for decoder
:param is_training: training state, for dropout
:param heads: number of heads
:param input_weights: weight matrix [hidden x 3*hidden]
:param output_weights: output weight [hidden x hidden]
:param input_biases: bias [3*hidden]
:param output_biases: output bias [bias]
:param pos_biases:
:param pos_weights:
:param r_w_bias:
:param r_r_bias:
:param mask: None or [B x T] or [T x T]
:param dropout_prob:
:param incremental:
:param incremental_cache:
:return:
"""
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([]).to(inputs.device)
head_dim = inputs.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
ctx.learnable_pos = learnable_pos
ctx.return_coverage = return_coverage
ctx.fused_all = False
ctx.recompute = recompute
ctx.use_time_mask = use_time_mask
bsz, len_q = inputs.size(1), inputs.size(0)
len_r = pos.size(0) # r can be longer than query, i.e for bidirectional attention we need 2k+1 positions
len_k = len_q # because of self-attention
if mask is not None:
mask = mask.to(torch.bool)
# Self Attention Time Mask
if use_time_mask:
assert (len(mask.size()) == 2), "Timing mask is not 2D!"
# assert (mask.size(0) == mask.size(1)), "Sequence length should match!"
# mask = mask.unsqueeze(0).unsqueeze(0)
# Key Padding Mask
else:
# attn_score = attn_score.view(bsz, heads, len_q, len_k)
mask = mask.unsqueeze(1).unsqueeze(2)
if pos.size(1) == 1 and not learnable_pos:
pos = pos.repeat(1, bsz, 1) # we have to use repeat instead of expand here because mm needs contiguous
# Input Linear GEMM
# input1: (activations) [len_q, bsz, hidden]
# input2: (weights) [hidden*3 (3072), hidden (1024)] (transpose [0,1])
# output: [len_q, bsz, hidden*3]
# GEMM: ( (len_q*bsz) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (len_q*bsz x embed_dim*3)
if linear_blaslt is not None and inputs.dtype != torch.float64 and inputs.is_cuda:
input_lin_results = linear_blaslt.forward(inputs, input_weights, input_biases)
else:
input_lin_results = torch.addmm(input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1., alpha=1.)
# reshape [len_q*bsz, embed_dim*3 -> len_q x bsz x embed_dim*3]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
if not learnable_pos:
if linear_blaslt is not None and inputs.dtype != torch.float64 and inputs.is_cuda:
pos_lin_results = linear_blaslt.forward(pos, pos_weights, pos_biases)
else:
pos_lin_results = torch.addmm(pos_biases,
pos.view(pos.size(0) * pos.size(1), pos.size(2)),
pos_weights.transpose(0, 1),
beta=1., alpha=1.)
pos_lin_results = pos_lin_results.view(pos.size(0), pos.size(1), pos_weights.size(0))
r_head_k = pos_lin_results.view(pos.size(0), bsz * heads, head_dim) # T x BxH x D
else:
# the position embedding matrix is multiplied directly with queries + w_bias
pos_lin_results = None
r_head_k = None
# Slice out q,k,v from one big Input Linear output (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [len_q, bsz, heads(16), 3, head_dim(64)]
# input_lin_results: [len_q, batches=bsz*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
if incremental:
# We have to change the heads x head_dim first and then concat to the T dim
# bsz is changed during translation due to beam search
# during translation we want to keep the actual T dim in MM as 1 constantly
keys = keys.reshape(len_q, bsz, heads * head_dim)
values = values.reshape(len_q, bsz, heads * head_dim)
if 'k' in incremental_cache and 'v' in incremental_cache:
keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
incremental_cache['k'] = keys
values = torch.cat([incremental_cache['v'], values], dim=0) # time first
incremental_cache['v'] = values
else:
incremental_cache['k'] = keys
incremental_cache['v'] = values
keys = keys.view(-1, bsz * heads, head_dim)
values = values.view(-1, bsz * heads, head_dim)
# re-update len_k to be the newly updated length of the keys
len_k = keys.size(0)
# Relative Attention from here:
# r_w_bias size: head * head_dim
rw_head_q = queries.view(len_q, bsz, heads, head_dim) + r_w_bias #
rw_head_q = rw_head_q.view(len_q, bsz * heads, head_dim)
rr_head_q = queries.view(len_q, bsz, heads, head_dim) + r_r_bias
rr_head_q = rr_head_q.view(len_q, bsz * heads, head_dim)
# matmul_ac batched GEMMs
# queries+bias: [len_q, bsz*heads, head_dim] transpose(0, 1)
# keys: [len_k, bsz*heads, head_dim] transpose(0, 1)
if queries.is_cuda:
matmul_ac = torch.empty((bsz * heads, queries.size(0), keys.size(0)), dtype=queries.dtype,
device=rw_head_q.device)
matmul_ac = torch.baddbmm(matmul_ac, rw_head_q.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2),
out=matmul_ac, beta=0.0, alpha=scale_t[0])
else:
matmul_ac = torch.bmm(rw_head_q.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2)).mul_(scale_t[0])
if not learnable_pos:
if queries.is_cuda:
# matmul2 batched GEMMs
# queries+bias: [len_q, bsz*heads, head_dim] transpose(0, 1)
# rel_positions: [len_r, bsz*heads, head_dim] transpose(0, 1)
matmul_bd = torch.empty((bsz * heads, queries.size(0), len_r), dtype=queries.dtype,
device=rw_head_q.device)
matmul_bd = torch.baddbmm(matmul_bd, rr_head_q.transpose(0, 1),
r_head_k.transpose(0, 1).transpose(1, 2),
out=matmul_bd, beta=0.0, alpha=scale_t[0])
else:
matmul_bd = torch.matmul(rr_head_q.transpose(0, 1), r_head_k.transpose(0, 1).transpose(1, 2)) \
.mul_(scale_t[0])
# shift so that the relative positions are aligned
# the first element will have 0 q-1 ... -n relative positions compared to other elements
# the last element will have n-1 n-2 ... 0
matmul_bd = RelativeShift.forward(matmul_bd, True, False)
# if len_r is longer than len_k, then we need to take the first len_k positions only
matmul_bd = matmul_bd[:, :, :len_k]
attn_score = matmul_ac + matmul_bd # both AC and BD are scaled with scale_t before in baddbmm
else:
# matmul2 batched GEMMs
# queries+bias: [len_q, bsz*heads, head_dim]
# rel_positions: [len_q, len_k, head_dim] transpose(1, 2)
# add directly into matmul_ac so we don't need to
# torch.baddbmm(matmul_ac.transpose(0, 1), rr_head_q, pos.transpose(1, 2),
# out=matmul_ac.transpose(0, 1), beta=1.0, alpha=scale_t[0])
matmul_ac.transpose(0, 1).baddbmm_(rr_head_q, pos.transpose(1, 2), beta=1.0, alpha=scale_t[0])
attn_score = matmul_ac
# no need to shift in this case
# attn_score should have size [bsz*heads, len_q, len_k] for now
if mask is not None:
attn_score.view(bsz, heads, len_q, len_k).masked_fill_(mask, float('-inf'))
dtype_ = torch.float64 if attn_score.dtype == torch.float64 else torch.float32
softmax_results = F.softmax(attn_score, dim=-1, dtype=dtype_).type_as(attn_score)
nan_mask = torch.isnan(softmax_results)
if nan_mask.any():
softmax_results.masked_fill_(nan_mask, 0)
# Dropout - is not executed for inference
if is_training and dropout_prob_t[0] > 0:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# Input1: from_softmax [bsz*heads, len_q, seql_k]
# Input2: (values) [seql_v, bsz*heads, head_dim] transpose(0,1)
# Output: [bsz*heads, len_q, head_dim]
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = (len_q x head_dim)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1)).transpose(0, 1)
matmul2_results = matmul2_results.contiguous().view(inputs.size(0), inputs.size(1), inputs.size(2))
# Output Linear GEMM
# Input1: (activations) [len_q, bsz, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ len_q, bsz, embed_dim ]
# GEMM: ( len_q*bsz x embed_dim ) x ( embed_dim x embed_dim ) = ( len_q*bsz x embed_dim )
if linear_blaslt is not None and inputs.dtype != torch.float64 and inputs.is_cuda:
outputs = linear_blaslt.forward(matmul2_results, output_weights, output_biases)
else:
outputs = torch.addmm(output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0)).contiguous()
if recompute:
ctx.save_for_backward(heads_t,
scale_t,
inputs, pos, r_head_k,
input_weights, pos_weights, output_weights,
input_biases, pos_biases, output_biases,
r_w_bias, r_r_bias,
dropout_mask, nan_mask, mask,
dropout_prob_t)
# delete stuff here
del input_lin_results, queries, keys, values
del matmul_ac, matmul2_results, attn_score, softmax_results, dropout_results
del rr_head_q, rw_head_q
if not learnable_pos:
del matmul_bd
dropout_results = null_tensor
else:
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
pos_lin_results,
# rw_head_q, rr_head_q,
r_r_bias, r_w_bias,
inputs, pos, r_head_k,
input_weights, pos_weights,
output_weights,
dropout_mask, nan_mask,
dropout_prob_t)
del attn_score
if return_coverage:
return (outputs, dropout_results)
else:
return outputs
@staticmethod
@custom_bwd
def backward(ctx, *output_grads):
"""
:param ctx:
:param output_grads: gradients w.r.t the outputs
:return:
"""
if not ctx.recompute:
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, pos_lin_results, \
r_w_bias, r_r_bias, \
inputs, pos, r_head_k, \
input_weights, pos_weights, \
output_weights, \
dropout_mask, nan_mask, \
dropout_prob_t = ctx.saved_tensors
else:
heads_t, \
scale_t, \
inputs, pos, r_head_k, \
input_weights, pos_weights, output_weights, \
input_biases, pos_biases, output_biases, \
r_w_bias, r_r_bias, \
dropout_mask, nan_mask, pad_mask, \
dropout_prob_t = ctx.saved_tensors
input_lin_results, matmul2_results, \
dropout_results, softmax_results, pos_lin_results = None, None, None, None, None
rw_head_q = None
rr_head_q = None
learnable_pos = ctx.learnable_pos
if ctx.return_coverage:
output_grads, softmax_grads = output_grads
else:
output_grads = output_grads[0]
output_grads = output_grads.contiguous()
head_dim = inputs.size(2) // heads_t[0]
len_q, bsz = inputs.size(0), inputs.size(1)
len_k = len_q
len_r = pos.size(0)
if ctx.fused_all: # only applicable for learnable position and len_k <= 2048
# softmax results -> attn scores
# rw_head_q -> r_w_bias
# rr_head_q -> r_r_bias
input_grads, \
input_weights_grads, \
pos_weights_grads, \
output_weights_grads, \
input_biases_grads, \
pos_biases_grads, \
output_biases_grads, \
r_w_bias_grads, r_r_bias_grads = relative_self_attn_blaslt.backward(
heads_t[0], output_grads, matmul2_results,
dropout_results, softmax_results,
input_lin_results, pos_lin_results,
rw_head_q, rr_head_q,
inputs, pos,
input_weights, output_weights, pos_weights,
dropout_mask, dropout_prob_t[0])
# pos_weight_grads = None
# pos_bias_grads = None
pos_grads = None
del ctx.fused_all, ctx.recompute, ctx.return_coverage
return input_grads, pos_grads, None, None, None, input_weights_grads, \
output_weights_grads, pos_weights_grads, \
input_biases_grads, output_biases_grads, pos_biases_grads, r_w_bias_grads, r_r_bias_grads, \
None, None, None, None, None, None, None, None
if ctx.recompute:
# RECOMPUTE STARTS HERE
heads = heads_t[0]
# Recomputing the activations in the forward pass here
if linear_blaslt is not None and inputs.dtype != torch.float64 and inputs.is_cuda:
input_lin_results = linear_blaslt.forward(inputs, input_weights, input_biases)
else:
input_lin_results = torch.addmm(input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1., alpha=1.)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
if not learnable_pos:
if linear_blaslt is not None and inputs.dtype != torch.float64 and inputs.is_cuda:
pos_lin_results = linear_blaslt.forward(pos, pos_weights, pos_biases)
else:
pos_lin_results = torch.addmm(pos_biases,
pos.view(pos.size(0) * pos.size(1), pos.size(2)),
pos_weights.transpose(0, 1),
beta=1., alpha=1.)
pos_lin_results = pos_lin_results.view(pos.size(0), pos.size(1), pos_weights.size(0))
r_head_k = pos_lin_results.view(pos.size(0), bsz * heads, head_dim) # T x BxH x D
else:
# pos_lin_results = pos.view(pos.size(0), bsz * heads, head_dim) # T x BxH x D
# r_head_k = pos_lin_results
pos_lin_results = None
r_head_k = None
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
rw_head_q = queries.view(len_q, bsz, heads, head_dim) + r_w_bias #
rw_head_q = rw_head_q.view(len_q, bsz * heads, head_dim)
matmul_ac = torch.empty((bsz * heads, queries.size(0), keys.size(0)), dtype=queries.dtype,
device=rw_head_q.device)
matmul_ac.baddbmm_(rw_head_q.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2),
beta=0.0, alpha=scale_t[0])
rr_head_q = queries.view(len_q, bsz, heads, head_dim) + r_r_bias
rr_head_q = rr_head_q.view(len_q, bsz * heads, head_dim)
if not learnable_pos:
matmul_bd = torch.empty((bsz * heads, queries.size(0), len_r), dtype=queries.dtype,
device=rw_head_q.device)
matmul_bd.baddbmm_(rr_head_q.transpose(0, 1),
r_head_k.transpose(0, 1).transpose(1, 2),
beta=0.0, alpha=scale_t[0])
matmul_bd = RelativeShift.forward(matmul_bd, True, False)
matmul_bd = matmul_bd[:, :, :len_k]
attn_score = matmul_ac + matmul_bd
else:
matmul_ac.transpose(0, 1).baddbmm_(rr_head_q, pos.transpose(1, 2), beta=1.0, alpha=scale_t[0])
attn_score = matmul_ac
if pad_mask is not None:
attn_score.view(bsz, heads, len_q, len_k).masked_fill_(pad_mask, float('-inf'))
dtype_ = torch.float64 if attn_score.dtype == torch.float64 else torch.float32
softmax_results = F.softmax(attn_score, dim=-1, dtype=dtype_).type_as(attn_score)
nan_mask = torch.isnan(softmax_results)
if nan_mask.any():
softmax_results.masked_fill_(nan_mask, 0)
del attn_score
if dropout_prob_t[0] > 0:
pinv = 1.0 / (1.0 - dropout_prob_t[0])
dropout_results = softmax_results * dropout_mask * pinv
else:
dropout_results = softmax_results
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1)).transpose(0, 1)
matmul2_results = matmul2_results.contiguous().view(inputs.size(0), inputs.size(1), inputs.size(2))
# BACKWARD PASS STARTS HERE
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# input_lin_results: [len_q, bsz, heads(16), 3, head_dim(64)]
# input_lin_results: [len_q, batches=bsz*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
rw_head_q = queries.view(len_q, bsz, heads_t[0], head_dim) + r_w_bias #
rw_head_q = rw_head_q.view(len_q, bsz * heads_t[0], head_dim)
rr_head_q = queries.view(len_q, bsz, heads_t[0], head_dim) + r_r_bias
rr_head_q = rr_head_q.view(len_q, bsz * heads_t[0], head_dim)
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [len_q, bsz, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ len_q, bsz, embed_dim ]
# GEMM: ( len_q*bsz x embed_dim ) x ( embed_dim x embed_dim ) = ( len_q*bsz x embed_dim )
# Output Linear GEMM - WGRAD
# Input1: (data grads) [len_q*bsz, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [len_q*bsz, embed_dim ]
# Output: [ len_q, bsz, embed_dim ]
# GEMM: ( embed_dim x len_q*bsz ) x ( len_q*bsz x embed_dim ) = ( embed_dim x embed_dim )
if linear_blaslt is not None and inputs.dtype != torch.float64 and inputs.is_cuda:
# pos_lin_results = linear_blaslt.forward(pos, pos_weights, pos_biases)
output_lin_grads, output_weight_grads, output_bias_grads = \
linear_blaslt.backward(matmul2_results, output_weights, output_grads, True)
else:
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
# Matmul2 - DGRAD1
# Input1: (data grads) [bsz*heads, len_q, head_dim]
# Input2: (activations) [seql_k, bsz*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [bsz*heads, len_q, seql_k]
# GEMM: Per batch: ( len_q x head_dim ) x ( head_dim x seql_k ) = ( len_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input2: (data grads) [bsz*heads, len_q, head_dim]
# Input1: (activations) [bsz*heads, len_q, len_k] transpose(1,2)
# Output: [bsz*heads, len_k, head_dim]
# GEMM: Per batch: ( len_k x len_q ) x ( len_q x head_dim ) = ( len_k x head_dim )
torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Input1: (data grads) [bsz*heads, len_q, head_dim].transpose(0, 1)
# Input2: (rpositions) [len_q, len_k, head_dim].transpose(1,2)
# Output: [bsz*heads, len_q, seql_k].transpose(0, 1)
# torch.baddbmm(matmul2_dgrad1.transpose(0, 1), output_lin_grads.transpose(0, 1), pos.transpose(1, 2),
# beta=1.0, alpha=1.0, out=matmul2_dgrad1.transpose(0, 1))
# Input2: (data grads) [bsz*heads, len_q, head_dim].transpose(0, 1)
# Input1: (activations) [bsz*heads, len_q, len_k] transpose(0,1).transpose(1,2)
# Output: [len_q, len_k, head_dim]
# pos_grads = torch.bmm(dropout_results.transpose(0, 1).transpose(1, 2), output_lin_grads.transpose(0, 1))
if dropout_prob_t[0] > 0.0:
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
else:
dropout_grads = matmul2_dgrad1
# Softmax Grad (not a publically documented op)
try:
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results.dtype)
except TypeError:
# catch the error for older pytorch ver
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
attn_score_grads = softmax_grads
# the grads are evenly distributed to AC and BD
matmul_ac_grads = attn_score_grads
# Matmul1 - DGRAD1
# Input1: (data grads) [bsz*heads, len_q, seql_k]
# Input2: (activations) [seql_k, bsz*heads, head_dim] transpose(0,1)
# Output: [bsz*heads, len_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = ( len_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), matmul_ac_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
queries_grads_ac = queries_grads
r_w_bias_grads = torch.sum(queries_grads_ac.view(len_q, bsz, heads_t[0], -1), dim=[0, 1]) # heads * head_dim
matmul_bd_grads = attn_score_grads
if not learnable_pos:
if len_r > len_q: # if we cut off the BDs from before, then put the zero gradients at the back
grad_cut = matmul_bd_grads.new_zeros((matmul_bd_grads.size(0), matmul_bd_grads.size(1), len_r - len_q))
matmul_bd_grads = torch.cat([matmul_bd_grads, grad_cut], dim=-1)
# backprop through the shifting
matmul_bd_grads = RelativeShift.backward(matmul_bd_grads, True, False)
# MatmulBD - DGRAD1
# Input1: (matmul_bd_grads) [bsz*heads, len_q, seql_k]
# Input2: (r_head_k) [len_q, bsz*heads, head_dim] transpose(0,1)
# Output: [bsz*heads, len_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( len_q x seql_k ) x ( seql_k x head_dim ) = ( len_q x head_dim )
queries_grads_bd = queries_grads.new_empty(*queries_grads.size())
torch.baddbmm(queries_grads_bd.transpose(0, 1), matmul_bd_grads, r_head_k.transpose(0, 1),
out=queries_grads_bd.transpose(0, 1), beta=0.0, alpha=scale_t[0])
else:
# MatmulBD - DGRAD1
# Input1: (matmul_bd_grads) [bsz*heads, len_q, len_k] transpose(0,1)
# Input2: (pos) [len_q, len_k, head_dim]
# Output: [len_q, bsz*heads, head_dim]
# GEMM: Per batch: ( bsz*heads x len_k ) x ( len_k x head_dim ) = ( bsz*heads x head_dim )
queries_grads_bd = queries_grads.new_empty(*queries_grads.size())
torch.baddbmm(queries_grads_bd, matmul_bd_grads.transpose(0, 1), pos,
out=queries_grads_bd, beta=0.0, alpha=scale_t[0])
# len_q x batch*heads x d_head
r_r_bias_grads = torch.sum(queries_grads_bd.view(len_q, bsz, heads_t[0], -1), dim=[0, 1])
# add the gradients from bd to queries
queries_grads.add_(queries_grads_bd)
# # MatmulAC - DGAD2
# Input1: (data grads) [bsz*heads, len_q, seql_k] transpose(1,2)
# Input2: (rw_head_q) [bsz*heads, head_dim, len_q] transpose(0,1)
# Output: [seql_k, bsz*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x len_q ) x ( len_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), matmul_ac_grads.transpose(1, 2),
rw_head_q.transpose(0, 1), out=keys_grads.transpose(0, 1),
beta=0.0, alpha=scale_t[0])
if not learnable_pos:
# MatmulBD - DGRAD2
# Input1: (data grads) [bsz*heads, len_q, len_r] transpose(1,2)
# Input2: (rr_head_q) [len_q, bsz*heads, head_dim] transpose(0,1)
# Output: r_head_k [len_r, bsz*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x len_q ) x ( len_q x head_dim ) = ( seql_k x head_dim )
r_head_k_grad = r_head_k.new_empty((len_r, bsz * heads_t[0], head_dim))
torch.baddbmm(r_head_k_grad.transpose(0, 1), matmul_bd_grads.transpose(1, 2).contiguous(),
rr_head_q.transpose(0, 1), out=r_head_k_grad.transpose(0, 1), beta=0.0, alpha=scale_t[0])
r_head_k_grad = r_head_k_grad.view(len_r, bsz, heads_t[0] * head_dim)
if linear_blaslt is not None and inputs.dtype != torch.float64 and inputs.is_cuda:
_, pos_weight_grads, pos_bias_grads = linear_blaslt.backward(pos, pos_weights, r_head_k_grad, False)
else:
pos_weight_grads = torch.mm(r_head_k_grad.view(len_r * bsz, heads_t[0] * head_dim).transpose(0, 1),
pos.view(pos.size(0) * pos.size(1), pos.size(2)))
pos_bias_grads = torch.sum(r_head_k_grad, [0, 1])
pos_grads = None
else:
pos_weight_grads, pos_bias_grads = None, None
pos_grads = torch.empty_like(pos)
# MatmulBD - DGRAD2
# Input1: (data grads) [bsz*heads, len_q, len_k] transpose(0,1),(1,2) -> [len_q, len_k, bsz*heads]
# Input2: (rr_head_q) [len_q, bsz*heads, head_dim]
# Output: pos_grads [len_q, len_k, head_dim]
# GEMM: Per batch: ( len_k x bsz ) x ( bsz x head_dim ) = ( len_k x head_dim )
torch.baddbmm(pos_grads, matmul_bd_grads.transpose(0, 1).transpose(1, 2).contiguous(),
rr_head_q, out=pos_grads, beta=0.0, alpha=scale_t[0])
# Input Linear GEMM - DGRAD
# input1: (data grads) [len_q, bsz, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [len_q, bsz, embed_dim]
# GEMM: ( (len_q*bsz) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (len_q*bsz x embed_dim)
if linear_blaslt is not None and inputs.dtype != torch.float64 and inputs.is_cuda:
input_grads, input_weight_grads, input_bias_grads \
= linear_blaslt.backward(inputs, input_weights, input_lin_results_grads, True)
else:
input_lin_results_grads = input_lin_results_grads.view(inputs.size(0) * inputs.size(1),
heads_t[0] * 3 * head_dim)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
input_grads = input_grads.contiguous()
# Input Linear GEMM - WGRAD
# input1: (data grads) [len_q*bsz, 3*embed_dim(3072)]
# input2: (activations) [len_q*bsz, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x len_q*bsz ) x ( len_q*bsz x embed_dim ) = (3*embed_dim x embed_dim)
input_weight_grads = torch.mm(input_lin_results_grads.transpose(0, 1),
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)))
input_bias_grads = torch.sum(input_lin_results_grads, 0)
return input_grads, pos_grads, None, None, None, input_weight_grads, output_weight_grads, pos_weight_grads, \
input_bias_grads, output_bias_grads, pos_bias_grads, r_w_bias_grads, r_r_bias_grads, \
None, None, None, None, None, None, None, None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
def relative_self_attn_func(input, pos, use_mask, is_training, num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_w_bias, r_r_bias,
mask, dropout,
incremental, incremental_cache,
low_precision, learnable_pos, return_coverage, recompute):
input, pos, use_mask, is_training, num_heads, \
in_proj_weight, out_proj_weight, pos_proj_weight, \
in_proj_bias, out_proj_bias, pos_proj_bias, \
r_w_bias, r_r_bias, \
mask, dropout, \
incremental, incremental_cache, \
low_precision, learnable_pos, return_coverage, recompute = _cast_if_autocast_enabled(
input, pos, use_mask, is_training, num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_w_bias, r_r_bias,
mask, dropout,
incremental, incremental_cache,
low_precision, learnable_pos, return_coverage, recompute)
return RelativeSelfAttnFunc.apply(input, pos, use_mask, is_training, num_heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_w_bias, r_r_bias,
mask, dropout,
incremental, incremental_cache,
low_precision, learnable_pos, return_coverage, recompute)
# TODO: write test function
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=32,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
test_function = relative_self_attn_func
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 4
opt.inner_size = 16
opt.head_dim = opt.model_size // opt.n_heads
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
# self.function = RelativeShiftFunction.apply
self.in_proj_weight = torch.Tensor(3 * model_size, model_size)
self.out_proj_weight = torch.Tensor(model_size, model_size)
self.pos_proj_weight = torch.Tensor(model_size, model_size)
self.in_proj_bias = torch.Tensor(3 * model_size)
self.out_proj_bias = torch.Tensor(model_size)
self.pos_proj_bias = torch.Tensor(model_size)
self.r_w_bias = torch.Tensor(self.heads, self.head_dim)
self.r_r_bias = torch.Tensor(self.heads, self.head_dim)
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.pos_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
torch.nn.init.constant_(self.pos_proj_bias, 0.)
torch.nn.init.normal_(self.r_w_bias, 0.0, std_)
torch.nn.init.normal_(self.r_r_bias, 0.0, std_)
class TestAttention(torch.nn.Module):
def __init__(self, test_function, model_size=16, heads=1):
super().__init__()
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.function = test_function
def forward(self, input, pos, in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias, r_w_bias, r_r_bias,
mask, learnable_embedding=False, recompute=False):
use_time_mask = False
is_training = True
dropout = 0.0
low_precision = False
return_coverage = False
return self.function(input, pos, use_time_mask, is_training, self.heads,
in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias,
r_w_bias, r_r_bias,
mask, dropout,
False, None, # For the incremental stuff
low_precision, learnable_embedding,
return_coverage, recompute) # double precision set to true
bsz = 4
len_q = 5
len_r = 15
input_states = torch.randn(*(len_q, bsz, opt.model_size)).double().cuda()
input_states.requires_grad = True
pos = torch.randn(*(len_r, 1, opt.model_size)).double().cuda()
net = TestAttention(test_function, model_size=opt.model_size, heads=opt.n_heads)
parameters = Parameters(opt.model_size, opt.n_heads)
in_proj_weight = parameters.in_proj_weight.double().cuda()
out_proj_weight = parameters.out_proj_weight.double().cuda()
pos_proj_weight = parameters.pos_proj_weight.double().cuda()
in_proj_bias = parameters.in_proj_bias.double().cuda()
out_proj_bias = parameters.out_proj_bias.double().cuda()
pos_proj_bias = parameters.pos_proj_bias.double().cuda()
r_w_bias = parameters.r_w_bias.double().cuda()
r_r_bias = parameters.r_r_bias.double().cuda()
in_proj_weight.requires_grad = True
out_proj_weight.requires_grad = True
pos_proj_weight.requires_grad = True
in_proj_bias.requires_grad = True
out_proj_bias.requires_grad = True
pos_proj_bias.requires_grad = True
r_w_bias.requires_grad = True
r_r_bias.requires_grad = True
# mask = None # input_states.new(*(bsz, len_q)).fill_(0).bool()
mask = input_states.new(*(bsz, len_q)).bernoulli_(p=0.25).bool()
# mask.requires_grad = False
learnable_pe = False
print("gradchecking start.")
torch.autograd.gradcheck(net, (input_states, pos, in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias, r_w_bias, r_r_bias,
mask, learnable_pe))
print("gradchecking completed.")
print("gradchecking w/ recompute start.")
recompute = True
torch.autograd.gradcheck(net, (input_states, pos, in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias, r_w_bias, r_r_bias,
mask, learnable_pe, recompute))
print("gradchecking completed.")
pos = torch.randn(*(len_q, len_q, opt.head_dim)).double().cuda()
pos.requires_grad = True
learnable_pe = True
print("gradchecking w/ learnable position encodings start.")
torch.autograd.gradcheck(net, (input_states, pos, in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias, r_w_bias, r_r_bias,
mask, learnable_pe),
eps=1e-6, atol=1e-5, rtol=1e-3)
print("gradchecking w/ learnable position encodings completed.")
print("gradchecking w/ learnable position encodings and recompute start.")
recompute = True
torch.autograd.gradcheck(net, (input_states, pos, in_proj_weight, out_proj_weight, pos_proj_weight,
in_proj_bias, out_proj_bias, pos_proj_bias, r_w_bias, r_r_bias,
mask, learnable_pe, recompute),
eps=1e-6, atol=1e-5, rtol=1e-3)
print("gradchecking w/ learnable position encodings completed.")
| 43,985
| 46.862894
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/test_self_attention_func.py
|
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from copy import deepcopy
from time import time
import unittest
import numpy as np
from self_attention_func import self_attn_func
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
super(Parameters, self).__init__()
self.in_proj_weight = Parameter(torch.Tensor(3 * model_size, model_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, model_size))
self.in_proj_bias = Parameter(torch.Tensor(3 * model_size))
self.out_proj_bias = Parameter(torch.Tensor(model_size))
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=23272123):
torch.cuda.set_device(0)
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
self.seq_length = 512
self.sequences = 64
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
embed_dim = self.hidden_dim
self.ref_parameters = Parameters(model_size=self.hidden_dim, heads=self.heads)
self.ref_parameters = self.ref_parameters.cuda().half()
self.tst_parameters = deepcopy(self.ref_parameters)
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
# Reset seed so parameters are identical
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
self.tst_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
self.tst_inputs.data.copy_(self.ref_inputs.data)
def test_input(self):
print("Checking if all inputs are the same ...")
self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight, self.tst_parameters.in_proj_weight,
atol=1e-5, rtol=1e-5))
print("Done.")
# def test_output(self):
#
# print("Testing self-attention with random mask ....")
# training = True
#
# self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
# dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
#
# with torch.no_grad():
# self.tst_inputs.copy_(self.ref_inputs)
#
# mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
#
# ref_output, ref_coverage = self_attn_func(False, training, self.heads, self.ref_inputs,
# self.ref_parameters.in_proj_weight,
# self.ref_parameters.out_proj_weight,
# self.ref_parameters.in_proj_bias,
# self.ref_parameters.out_proj_bias,
# mask, self.dropout_prob,
# False, None, False, None,
# False, True)
#
# tst_output, tst_coverage = self_attn_func(False, training, self.heads, self.tst_inputs,
# self.tst_parameters.in_proj_weight,
# self.tst_parameters.out_proj_weight,
# self.tst_parameters.in_proj_bias,
# self.tst_parameters.out_proj_bias,
# mask, self.dropout_prob,
# False, None, False, None,
# True, True)
#
# self.assertTrue(torch.allclose(ref_output, tst_output, atol=1e-3, rtol=1e-3))
#
# grad_outputs_ref = torch.randn_like(tst_output)
#
# grad_outputs_tst = torch.randn_like(tst_output).copy_(grad_outputs_ref)
#
# tst_output.data.copy_(ref_output.data)
# ref_output.backward(grad_outputs_ref)
# tst_output.backward(grad_outputs_tst)
#
# # self.assertTrue(torch.allclose(self.ref_parameters.out_proj_weight.grad,
# # self.tst_parameters.out_proj_weight.grad,
# # atol=1e-3, rtol=1e-3))
#
# np.testing.assert_allclose(
# self.ref_parameters.out_proj_weight.grad.detach().cpu().numpy(),
# self.tst_parameters.out_proj_weight.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
#
# np.testing.assert_allclose(
# self.ref_parameters.out_proj_bias.grad.detach().cpu().numpy(),
# self.tst_parameters.out_proj_bias.grad.detach().cpu().numpy(),
# atol=1e-2, rtol=1e-2)
#
# print("GRAD TEST", self.tst_parameters.in_proj_weight.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight.grad - self.tst_parameters.in_proj_weight.grad)
# #
# # self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight.grad,
# # self.tst_parameters.in_proj_weight.grad,
# # atol=1e-2, rtol=1e-2))
# np.testing.assert_allclose(
# self.ref_parameters.in_proj_weight.grad.detach().cpu().numpy(),
# self.tst_parameters.in_proj_weight.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# np.testing.assert_allclose(
# self.ref_parameters.in_proj_bias.grad.detach().cpu().numpy(),
# self.tst_parameters.in_proj_bias.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# # self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad,
# # atol=1e-3, rtol=1e-3))
# #
# np.testing.assert_allclose(
# self.ref_inputs.grad.detach().cpu().numpy(),
# self.tst_inputs.grad.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
# def test_output_autoregressive(self):
#
# print("Testing self-attention with time mask ....")
# training = True
#
# self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
# dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
#
# with torch.no_grad():
# self.tst_inputs.copy_(self.ref_inputs)
#
# # mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
# mask = torch.triu(
# self.ref_inputs.new_ones(self.seq_length, self.seq_length), diagonal=1).bool()
#
# ref_output, ref_coverage = self_attn_func(True, training, self.heads, self.ref_inputs,
# self.ref_parameters.in_proj_weight,
# self.ref_parameters.out_proj_weight,
# self.ref_parameters.in_proj_bias,
# self.ref_parameters.out_proj_bias,
# mask, self.dropout_prob,
# False, None, False, None,
# False, True)
#
# tst_output, tst_coverage = self_attn_func(True, training, self.heads, self.tst_inputs,
# self.tst_parameters.in_proj_weight,
# self.tst_parameters.out_proj_weight,
# self.tst_parameters.in_proj_bias,
# self.tst_parameters.out_proj_bias,
# mask, self.dropout_prob,
# False, None, False, None,
# True, True)
#
# self.assertTrue(torch.allclose(ref_output, tst_output, atol=1e-2, rtol=1e-2))
# grad_outputs_ref = torch.randn_like(tst_output)
#
# grad_outputs_tst = torch.randn_like(tst_output).copy_(grad_outputs_ref)
#
# tst_output.data.copy_(ref_output.data)
# ref_output.backward(grad_outputs_ref)
# tst_output.backward(grad_outputs_tst)
#
# self.assertTrue(torch.allclose(self.ref_parameters.out_proj_weight.grad,
# self.tst_parameters.out_proj_weight.grad,
# atol=1e-1, rtol=1e-1))
#
# print("GRAD TEST", self.tst_parameters.in_proj_weight.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight.grad - self.tst_parameters.in_proj_weight.grad)
#
# # self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight.grad,
# # self.tst_parameters.in_proj_weight.grad,
# # atol=1e-2, rtol=1e-2))
# #
# np.testing.assert_allclose(
# self.ref_parameters.in_proj_weight.grad.data.cpu().numpy(),
# self.tst_parameters.in_proj_weight.grad.data.cpu().numpy(),
# atol=1e-3, rtol=1e-3)
#
# # self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad,
# # atol=1e-3, rtol=1e-3))
# #
# np.testing.assert_allclose(
# self.ref_inputs.detach().cpu().numpy(),
# self.tst_inputs.detach().cpu().numpy(),
# atol=1e-3, rtol=1e-3)
def test_performance(self):
training = True
for dropout in [0.0, 0.5]:
mask = ((torch.randn(self.sequences, self.seq_length) > 0)).bool().cuda()
num_iters = 32
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_output, ref_coverage = self_attn_func(False, training, self.heads, self.ref_inputs,
self.ref_parameters.in_proj_weight,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias,
self.ref_parameters.out_proj_bias,
mask, dropout,
False, None, False, None,
False, True, False)
grad_outputs_ref = torch.randn_like(ref_output)
ref_output.backward(grad_outputs_ref)
self.ref_parameters.zero_grad()
tst_output, tst_coverage = self_attn_func(False, training, self.heads, self.tst_inputs,
self.tst_parameters.in_proj_weight,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias,
self.tst_parameters.out_proj_bias,
mask, dropout,
False, None, False, None,
True, True, False)
grad_outputs_tst = torch.randn_like(tst_output)
tst_output.backward(grad_outputs_tst)
self.tst_parameters.zero_grad()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_output, ref_coverage = self_attn_func(False, training, self.heads, self.ref_inputs,
self.ref_parameters.in_proj_weight,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias,
self.ref_parameters.out_proj_bias,
mask, dropout,
False, None, False, None,
False, True, False)
grad_outputs_ref = torch.randn_like(ref_output)
ref_output.backward(grad_outputs_ref)
self.ref_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self-Attn time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
tst_output, tst_coverage = self_attn_func(False, training, self.heads, self.tst_inputs,
self.tst_parameters.in_proj_weight,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias,
self.tst_parameters.out_proj_bias,
mask, dropout,
False, None, False, None,
True, True, False)
grad_outputs_tst = torch.randn_like(tst_output)
tst_output.backward(grad_outputs_tst)
self.tst_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nCUDA Self-Attn time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
if __name__ == '__main__':
unittest.main()
| 15,206
| 48.534202
| 111
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/softmax_xentropy.py
|
import torch
import xentropy_cuda
class SoftmaxCrossEntropyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False):
losses, max_log_sum_exp = xentropy_cuda.forward(
logits, labels, smoothing, half_to_float)
losses.masked_fill_(labels==padding_idx, 0)
ctx.save_for_backward(logits, max_log_sum_exp, labels,
torch.FloatTensor([smoothing]),
torch.LongTensor([padding_idx]))
return losses
@staticmethod
def backward(ctx, grad_loss):
logits, max_log_sum_exp, labels, smoothing, padding_idx = ctx.saved_tensors
if not grad_loss.is_contiguous():
grad_loss = grad_loss.contiguous()
grad_loss.masked_fill_(labels==padding_idx.item(), 0)
grad_logits = xentropy_cuda.backward(
grad_loss.contiguous(), logits, max_log_sum_exp,
labels, smoothing.item())
return grad_logits, None, None, None, None
| 1,023
| 34.310345
| 88
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/test_encdec_attention_func.py
|
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from copy import deepcopy
from time import time
import unittest
import numpy as np
from encdec_attention_func_bias import encdec_attn_bias_func
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
super(Parameters, self).__init__()
self.in_proj_weight_q = Parameter(torch.Tensor(1 * model_size, model_size))
self.in_proj_weight_kv = Parameter(torch.Tensor(2 * model_size, model_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, model_size))
self.in_proj_bias_q = Parameter(torch.Tensor(1 * model_size))
self.in_proj_bias_kv = Parameter(torch.Tensor(2 * model_size))
self.out_proj_bias = Parameter(torch.Tensor(model_size))
self.reset_parameters()
def reset_parameters(self):
std_ = 0.002
torch.nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
torch.nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias_q, 0.)
torch.nn.init.constant_(self.in_proj_bias_kv, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=8999):
torch.cuda.set_device(0)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length_q = 64
self.seq_length_kv = 512
self.sequences = 64
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
embed_dim = self.hidden_dim
self.ref_parameters = Parameters(model_size=self.hidden_dim, heads=self.heads)
self.ref_parameters = self.ref_parameters.cuda().half()
self.tst_parameters = deepcopy(self.ref_parameters)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.ref_inputs_q = torch.randn(self.seq_length_q, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
self.ref_inputs_kv = torch.randn(self.seq_length_kv, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_inputs_q = torch.randn(self.seq_length_q, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
self.tst_inputs_kv = torch.randn(self.seq_length_kv, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
def test_input(self):
print("Checking if all inputs are the same ...")
self.assertTrue(torch.allclose(self.ref_inputs_q, self.tst_inputs_q, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.ref_inputs_kv, self.tst_inputs_kv, atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight_q, self.tst_parameters.in_proj_weight_q,
atol=1e-5, rtol=1e-5))
self.assertTrue(torch.allclose(self.ref_parameters.in_proj_weight_kv, self.tst_parameters.in_proj_weight_kv,
atol=1e-5, rtol=1e-5))
print("Done.")
def test_output(self):
training = True
mask = ((torch.randn(self.sequences, self.seq_length_kv) > 0)).bool().cuda()
ref_output, ref_coverage = encdec_attn_bias_func(False, training, self.heads,
self.ref_inputs_q, self.ref_inputs_kv,
self.ref_parameters.in_proj_weight_q,
self.ref_parameters.in_proj_weight_kv,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias_q,
self.ref_parameters.in_proj_bias_kv,
self.ref_parameters.out_proj_bias,
mask, self.dropout_prob,
False, None,
False, None, None,
False, True)
tst_output, tst_coverage = encdec_attn_bias_func(False, training, self.heads,
self.tst_inputs_q, self.tst_inputs_kv,
self.tst_parameters.in_proj_weight_q,
self.tst_parameters.in_proj_weight_kv,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias_q,
self.tst_parameters.in_proj_bias_kv,
self.tst_parameters.out_proj_bias,
mask, self.dropout_prob,
False, None,
False, None, None,
True, True)
grad_outputs_ref = torch.randn_like(tst_output)
grad_outputs_tst = torch.randn_like(tst_output).copy_(grad_outputs_ref)
tst_output.data.copy_(ref_output.data)
ref_output.backward(grad_outputs_ref)
tst_output.backward(grad_outputs_tst)
self.assertTrue(torch.allclose(ref_output, tst_output, atol=1e-2, rtol=1e-2))
np.testing.assert_allclose(
self.ref_parameters.out_proj_weight.grad.detach().cpu().numpy(),
self.tst_parameters.out_proj_weight.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_parameters.out_proj_bias.grad.detach().cpu().numpy(),
self.tst_parameters.out_proj_bias.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
#
# self.assertTrue(torch.allclose(self.ref_parameters.out_proj_bias.grad,
# self.tst_parameters.out_proj_bias.grad,
# atol=1e-2, rtol=1e-2))
# print("GRAD TEST", self.tst_parameters.in_proj_weight_kv.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight_kv.grad)
print("GRAD TEST", self.ref_parameters.in_proj_weight_kv.grad - self.tst_parameters.in_proj_weight_kv.grad)
np.testing.assert_allclose(
self.ref_parameters.in_proj_weight_kv.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_weight_kv.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_parameters.in_proj_bias_kv.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_bias_kv.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(
self.ref_parameters.in_proj_weight_q.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_weight_q.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_parameters.in_proj_bias_q.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_bias_q.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(
self.ref_inputs_q.grad.detach().cpu().numpy(),
self.tst_inputs_q.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_inputs_kv.grad.detach().cpu().numpy(),
self.tst_inputs_kv.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
def test_output_recompute(self):
training = True
recompute = True
mask = ((torch.randn(self.sequences, self.seq_length_kv) > 0)).bool().cuda()
ref_output, ref_coverage = encdec_attn_bias_func(False, training, self.heads,
self.ref_inputs_q, self.ref_inputs_kv,
self.ref_parameters.in_proj_weight_q,
self.ref_parameters.in_proj_weight_kv,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias_q,
self.ref_parameters.in_proj_bias_kv,
self.ref_parameters.out_proj_bias,
mask, self.dropout_prob,
False, None,
False, None, None,
False, True)
tst_output, tst_coverage = encdec_attn_bias_func(True, training, self.heads,
self.tst_inputs_q, self.tst_inputs_kv,
self.tst_parameters.in_proj_weight_q,
self.tst_parameters.in_proj_weight_kv,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias_q,
self.tst_parameters.in_proj_bias_kv,
self.tst_parameters.out_proj_bias,
mask, self.dropout_prob,
False, None,
False, None, None,
True, True)
grad_outputs_ref = torch.randn_like(tst_output)
grad_outputs_tst = torch.randn_like(tst_output).copy_(grad_outputs_ref)
tst_output.data.copy_(ref_output.data)
ref_output.backward(grad_outputs_ref)
tst_output.backward(grad_outputs_tst)
self.assertTrue(torch.allclose(ref_output, tst_output, atol=1e-2, rtol=1e-2))
np.testing.assert_allclose(
self.ref_parameters.out_proj_weight.grad.detach().cpu().numpy(),
self.tst_parameters.out_proj_weight.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_parameters.out_proj_bias.grad.detach().cpu().numpy(),
self.tst_parameters.out_proj_bias.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
# print("GRAD TEST", self.tst_parameters.in_proj_weight_kv.grad)
# print("GRAD TEST", self.ref_parameters.in_proj_weight_kv.grad)
print("GRAD TEST", self.ref_parameters.in_proj_weight_kv.grad - self.tst_parameters.in_proj_weight_kv.grad)
np.testing.assert_allclose(
self.ref_parameters.in_proj_weight_kv.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_weight_kv.grad.detach().cpu().numpy(),
atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(
self.ref_parameters.in_proj_bias_kv.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_bias_kv.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_parameters.in_proj_weight_q.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_weight_q.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_parameters.in_proj_bias_q.grad.detach().cpu().numpy(),
self.tst_parameters.in_proj_bias_q.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_inputs_q.grad.detach().cpu().numpy(),
self.tst_inputs_q.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(
self.ref_inputs_kv.grad.detach().cpu().numpy(),
self.tst_inputs_kv.grad.detach().cpu().numpy(),
atol=1e-3, rtol=1e-3)
def test_performance(self):
training = True
for dropout in [0.0, 0.5]:
mask = ((torch.randn(self.sequences, self.seq_length_kv) > 0)).bool().cuda()
num_iters = 32
torch.cuda.profiler.start()
torch.cuda.synchronize()
for _ in range(16):
tst_output, tst_coverage = encdec_attn_bias_func(False, training, self.heads,
self.tst_inputs_q, self.tst_inputs_kv,
self.tst_parameters.in_proj_weight_q,
self.tst_parameters.in_proj_weight_kv,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias_q,
self.tst_parameters.in_proj_bias_kv,
self.tst_parameters.out_proj_bias,
mask, dropout,
False, None,
False, None, None,
True, True)
ref_output, ref_coverage = encdec_attn_bias_func(False, training, self.heads,
self.ref_inputs_q, self.ref_inputs_kv,
self.ref_parameters.in_proj_weight_q,
self.ref_parameters.in_proj_weight_kv,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias_q,
self.ref_parameters.in_proj_bias_kv,
self.ref_parameters.out_proj_bias,
mask, dropout,
False, None,
False, None, None,
False, True)
grad_outputs_tst = torch.randn_like(tst_output)
grad_outputs_ref = torch.randn_like(ref_output)
tst_output.backward(grad_outputs_tst)
ref_output.backward(grad_outputs_ref)
self.tst_parameters.zero_grad()
self.ref_parameters.zero_grad()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_output, ref_coverage = encdec_attn_bias_func(False, training, self.heads,
self.ref_inputs_q, self.ref_inputs_kv,
self.ref_parameters.in_proj_weight_q,
self.ref_parameters.in_proj_weight_kv,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias_q,
self.ref_parameters.in_proj_bias_kv,
self.ref_parameters.out_proj_bias,
mask, dropout,
False, None,
False, None, None,
False, True)
grad_outputs_ref = torch.randn_like(ref_output)
ref_output.backward(grad_outputs_ref)
self.ref_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self-Attn time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ref_output, ref_coverage = encdec_attn_bias_func(True, training, self.heads,
self.ref_inputs_q, self.ref_inputs_kv,
self.ref_parameters.in_proj_weight_q,
self.ref_parameters.in_proj_weight_kv,
self.ref_parameters.out_proj_weight,
self.ref_parameters.in_proj_bias_q,
self.ref_parameters.in_proj_bias_kv,
self.ref_parameters.out_proj_bias,
mask, dropout,
False, None,
False, None, None,
False, True)
grad_outputs_ref = torch.randn_like(ref_output)
ref_output.backward(grad_outputs_ref)
self.ref_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self-Attn Recompute time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
tst_output, tst_coverage = encdec_attn_bias_func(False, training, self.heads,
self.tst_inputs_q, self.tst_inputs_kv,
self.tst_parameters.in_proj_weight_q,
self.tst_parameters.in_proj_weight_kv,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias_q,
self.tst_parameters.in_proj_bias_kv,
self.tst_parameters.out_proj_bias,
mask, dropout,
False, None,
False, None, None,
True, True)
grad_outputs_tst = torch.randn_like(tst_output)
tst_output.backward(grad_outputs_tst)
self.tst_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nCUDA Self-Attn time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
tst_output, tst_coverage = encdec_attn_bias_func(True, training, self.heads,
self.tst_inputs_q, self.tst_inputs_kv,
self.tst_parameters.in_proj_weight_q,
self.tst_parameters.in_proj_weight_kv,
self.tst_parameters.out_proj_weight,
self.tst_parameters.in_proj_bias_q,
self.tst_parameters.in_proj_bias_kv,
self.tst_parameters.out_proj_bias,
mask, dropout,
False, None,
False, None, None,
True, True)
grad_outputs_tst = torch.randn_like(tst_output)
tst_output.backward(grad_outputs_tst)
self.tst_parameters.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nCUDA Self-Attn Recompute time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
if __name__ == '__main__':
unittest.main()
| 22,617
| 52.980907
| 116
|
py
|
NMTGMinor
|
NMTGMinor-master/onmt/modules/optimized/__init__.py
| 0
| 0
| 0
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.