|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""UniRecConfig model configuration""" |
|
|
|
|
|
from collections import OrderedDict |
|
|
from typing import Any, Mapping, Optional |
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
from transformers.configuration_utils import PretrainedConfig |
|
|
from transformers.onnx import OnnxConfig, OnnxSeq2SeqConfigWithPast |
|
|
from transformers.onnx.utils import compute_effective_axis_dimension |
|
|
from transformers.utils import TensorType, is_torch_available, logging |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
class UniRecConfig(PretrainedConfig): |
|
|
r""" |
|
|
This is the configuration class to store the configuration of a [`M2M100Model`]. It is used to instantiate an |
|
|
M2M100 model according to the specified arguments, defining the model architecture. Instantiating a configuration |
|
|
with the defaults will yield a similar configuration to that of the M2M100 |
|
|
[facebook/m2m100_418M](https://huggingface.co/facebook/m2m100_418M) architecture. |
|
|
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
|
documentation from [`PretrainedConfig`] for more information. |
|
|
|
|
|
|
|
|
Args: |
|
|
vocab_size (`int`, *optional*, defaults to 50265): |
|
|
Vocabulary size of the M2M100 model. Defines the number of different tokens that can be represented by the |
|
|
`inputs_ids` passed when calling [`M2M100Model`] or |
|
|
d_model (`int`, *optional*, defaults to 1024): |
|
|
Dimensionality of the layers and the pooler layer. |
|
|
encoder_layers (`int`, *optional*, defaults to 12): |
|
|
Number of encoder layers. |
|
|
decoder_layers (`int`, *optional*, defaults to 12): |
|
|
Number of decoder layers. |
|
|
encoder_attention_heads (`int`, *optional*, defaults to 16): |
|
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
|
decoder_attention_heads (`int`, *optional*, defaults to 16): |
|
|
Number of attention heads for each attention layer in the Transformer decoder. |
|
|
decoder_ffn_dim (`int`, *optional*, defaults to 4096): |
|
|
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. |
|
|
encoder_ffn_dim (`int`, *optional*, defaults to 4096): |
|
|
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. |
|
|
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): |
|
|
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
|
|
`"relu"`, `"silu"` and `"gelu_new"` are supported. |
|
|
dropout (`float`, *optional*, defaults to 0.1): |
|
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
|
|
attention_dropout (`float`, *optional*, defaults to 0.0): |
|
|
The dropout ratio for the attention probabilities. |
|
|
activation_dropout (`float`, *optional*, defaults to 0.0): |
|
|
The dropout ratio for activations inside the fully connected layer. |
|
|
classifier_dropout (`float`, *optional*, defaults to 0.0): |
|
|
The dropout ratio for classifier. |
|
|
max_position_embeddings (`int`, *optional*, defaults to 1024): |
|
|
The maximum sequence length that this model might ever be used with. Typically set this to something large |
|
|
just in case (e.g., 512 or 1024 or 2048). |
|
|
init_std (`float`, *optional*, defaults to 0.02): |
|
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
|
encoder_layerdrop (`float`, *optional*, defaults to 0.0): |
|
|
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) |
|
|
for more details. |
|
|
decoder_layerdrop (`float`, *optional*, defaults to 0.0): |
|
|
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) |
|
|
for more details. |
|
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
|
Whether or not the model should return the last key/values attentions (not used by all models). |
|
|
|
|
|
Example: |
|
|
|
|
|
```python |
|
|
>>> from transformers import M2M100Config, M2M100Model |
|
|
|
|
|
>>> # Initializing a M2M100 facebook/m2m100_418M style configuration |
|
|
>>> configuration = M2M100Config() |
|
|
|
|
|
>>> # Initializing a model (with random weights) from the facebook/m2m100_418M style configuration |
|
|
>>> model = M2M100Model(configuration) |
|
|
|
|
|
>>> # Accessing the model configuration |
|
|
>>> configuration = model.config |
|
|
```""" |
|
|
|
|
|
model_type = 'm2m_100' |
|
|
keys_to_ignore_at_inference = ['past_key_values'] |
|
|
attribute_map = { |
|
|
'num_attention_heads': 'encoder_attention_heads', |
|
|
'hidden_size': 'd_model' |
|
|
} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_size=50000, |
|
|
max_position_embeddings=3072, |
|
|
decoder_layers=6, |
|
|
decoder_ffn_dim=1536, |
|
|
decoder_attention_heads=6, |
|
|
encoder_layerdrop=0.0, |
|
|
decoder_layerdrop=0.0, |
|
|
use_cache=True, |
|
|
is_encoder_decoder=True, |
|
|
activation_function='relu', |
|
|
d_model=384, |
|
|
dropout=0.1, |
|
|
attention_dropout=0.1, |
|
|
activation_dropout=0.0, |
|
|
init_std=0.02, |
|
|
decoder_start_token_id=0, |
|
|
scale_embedding=True, |
|
|
pad_token_id=1, |
|
|
bos_token_id=0, |
|
|
eos_token_id=2, |
|
|
depths=[2, 2, 9, 2], |
|
|
dims=[64, 128, 256, 384], |
|
|
mixer=[['Conv'] * 2, ['Conv'] * 2, |
|
|
['Conv'] * 6 + ['FGlobal', 'Global', 'Global'], ['Global'] * 2], |
|
|
num_heads=[2, 4, 4, 6], |
|
|
sub_k=[[2, 2], [2, 2], [2, 2], [2, 2]], |
|
|
mlp_ratio=4, |
|
|
kernel_size=[3, 3], |
|
|
drop_path_rate=0.1, |
|
|
label_smoothing=0.1, |
|
|
torch_dtype='bfloat16', |
|
|
**kwargs, |
|
|
): |
|
|
self.vocab_size = vocab_size |
|
|
self.max_position_embeddings = max_position_embeddings |
|
|
self.d_model = d_model |
|
|
self.decoder_ffn_dim = decoder_ffn_dim |
|
|
self.decoder_layers = decoder_layers |
|
|
self.decoder_attention_heads = decoder_attention_heads |
|
|
self.dropout = dropout |
|
|
self.attention_dropout = attention_dropout |
|
|
self.activation_dropout = activation_dropout |
|
|
self.activation_function = activation_function |
|
|
self.init_std = init_std |
|
|
self.encoder_layerdrop = encoder_layerdrop |
|
|
self.decoder_layerdrop = decoder_layerdrop |
|
|
self.use_cache = use_cache |
|
|
self.scale_embedding = scale_embedding |
|
|
self.depths = depths |
|
|
self.dims = dims |
|
|
self.mixer = mixer |
|
|
self.num_heads = num_heads |
|
|
self.sub_k = sub_k |
|
|
self.mlp_ratio = mlp_ratio |
|
|
self.kernel_size = kernel_size |
|
|
self.drop_path_rate = drop_path_rate |
|
|
self.label_smoothing = label_smoothing |
|
|
self.torch_dtype = torch_dtype |
|
|
|
|
|
super().__init__( |
|
|
pad_token_id=pad_token_id, |
|
|
bos_token_id=bos_token_id, |
|
|
eos_token_id=eos_token_id, |
|
|
is_encoder_decoder=is_encoder_decoder, |
|
|
decoder_start_token_id=decoder_start_token_id, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
|
|
|
class UniRecOnnxConfig(OnnxSeq2SeqConfigWithPast): |
|
|
|
|
|
@property |
|
|
def inputs(self) -> Mapping[str, Mapping[int, str]]: |
|
|
common_inputs = OrderedDict([ |
|
|
('input_ids', { |
|
|
0: 'batch', |
|
|
1: 'encoder_sequence' |
|
|
}), |
|
|
('attention_mask', { |
|
|
0: 'batch', |
|
|
1: 'encoder_sequence' |
|
|
}), |
|
|
]) |
|
|
|
|
|
if self.use_past: |
|
|
common_inputs['decoder_input_ids'] = {0: 'batch'} |
|
|
common_inputs['decoder_attention_mask'] = { |
|
|
0: 'batch', |
|
|
1: 'past_decoder_sequence + sequence' |
|
|
} |
|
|
else: |
|
|
common_inputs['decoder_input_ids'] = { |
|
|
0: 'batch', |
|
|
1: 'decoder_sequence' |
|
|
} |
|
|
common_inputs['decoder_attention_mask'] = { |
|
|
0: 'batch', |
|
|
1: 'decoder_sequence' |
|
|
} |
|
|
|
|
|
if self.use_past: |
|
|
self.fill_with_past_key_values_(common_inputs, direction='inputs') |
|
|
return common_inputs |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_dummy_inputs_for_sequence_classification_and_question_answering( |
|
|
self, |
|
|
tokenizer: PreTrainedTokenizer, |
|
|
batch_size: int = -1, |
|
|
seq_length: int = -1, |
|
|
is_pair: bool = False, |
|
|
framework: Optional[TensorType] = None, |
|
|
) -> Mapping[str, Any]: |
|
|
|
|
|
|
|
|
|
|
|
batch_size = compute_effective_axis_dimension( |
|
|
batch_size, |
|
|
fixed_dimension=OnnxConfig.default_fixed_batch, |
|
|
num_token_to_add=0) |
|
|
|
|
|
|
|
|
token_to_add = tokenizer.num_special_tokens_to_add(is_pair) |
|
|
seq_length = compute_effective_axis_dimension( |
|
|
seq_length, |
|
|
fixed_dimension=OnnxConfig.default_fixed_sequence, |
|
|
num_token_to_add=token_to_add) |
|
|
|
|
|
|
|
|
dummy_input = [' '.join([tokenizer.unk_token]) * seq_length |
|
|
] * batch_size |
|
|
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) |
|
|
return common_inputs |
|
|
|
|
|
|
|
|
def _generate_dummy_inputs_for_default_and_seq2seq_lm( |
|
|
self, |
|
|
tokenizer: PreTrainedTokenizer, |
|
|
batch_size: int = -1, |
|
|
seq_length: int = -1, |
|
|
is_pair: bool = False, |
|
|
framework: Optional[TensorType] = None, |
|
|
) -> Mapping[str, Any]: |
|
|
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( |
|
|
tokenizer, batch_size, seq_length, is_pair, framework) |
|
|
|
|
|
|
|
|
decoder_seq_length = seq_length if not self.use_past else 1 |
|
|
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( |
|
|
tokenizer, batch_size, decoder_seq_length, is_pair, framework) |
|
|
decoder_inputs = { |
|
|
f'decoder_{name}': tensor |
|
|
for name, tensor in decoder_inputs.items() |
|
|
} |
|
|
common_inputs = dict(**encoder_inputs, **decoder_inputs) |
|
|
|
|
|
if self.use_past: |
|
|
if not is_torch_available(): |
|
|
raise ValueError( |
|
|
'Cannot generate dummy past_keys inputs without PyTorch installed.' |
|
|
) |
|
|
else: |
|
|
import torch |
|
|
batch, encoder_seq_length = common_inputs['input_ids'].shape |
|
|
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] |
|
|
num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads |
|
|
encoder_shape = ( |
|
|
batch, |
|
|
num_encoder_attention_heads, |
|
|
encoder_seq_length, |
|
|
self._config.hidden_size // num_encoder_attention_heads, |
|
|
) |
|
|
decoder_past_length = decoder_seq_length + 3 |
|
|
decoder_shape = ( |
|
|
batch, |
|
|
num_decoder_attention_heads, |
|
|
decoder_past_length, |
|
|
self._config.hidden_size // num_decoder_attention_heads, |
|
|
) |
|
|
|
|
|
common_inputs['decoder_attention_mask'] = torch.cat([ |
|
|
common_inputs['decoder_attention_mask'], |
|
|
torch.ones(batch, decoder_past_length) |
|
|
], |
|
|
dim=1) |
|
|
|
|
|
common_inputs['past_key_values'] = [] |
|
|
|
|
|
num_encoder_layers, num_decoder_layers = self.num_layers |
|
|
min_num_layers = min(num_encoder_layers, num_decoder_layers) |
|
|
max_num_layers = max(num_encoder_layers, |
|
|
num_decoder_layers) - min_num_layers |
|
|
remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' |
|
|
|
|
|
for _ in range(min_num_layers): |
|
|
common_inputs['past_key_values'].append(( |
|
|
torch.zeros(decoder_shape), |
|
|
torch.zeros(decoder_shape), |
|
|
torch.zeros(encoder_shape), |
|
|
torch.zeros(encoder_shape), |
|
|
)) |
|
|
|
|
|
shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape |
|
|
for _ in range(min_num_layers, max_num_layers): |
|
|
common_inputs['past_key_values'].append( |
|
|
(torch.zeros(shape), torch.zeros(shape))) |
|
|
return common_inputs |
|
|
|
|
|
generate_dummy_inputs = _generate_dummy_inputs_for_default_and_seq2seq_lm |
|
|
|
|
|
|
|
|
__all__ = ['M2M100Config', 'M2M100OnnxConfig'] |
|
|
|