| |
| |
|
|
| """ deltalm model configuration""" |
|
|
| import warnings |
| from transformers.configuration_utils import PretrainedConfig |
| from transformers.utils import logging |
| logger = logging.get_logger(__name__) |
|
|
| class DeltalmConfig(PretrainedConfig): |
|
|
| model_type = "Deltalm" |
| keys_to_ignore_at_inference = ["past_key_values"] |
| attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} |
|
|
| def __init__( |
| self, |
| vocab_size=250001, |
| max_position_embeddings=1024, |
| encoder_layers=12, |
| encoder_ffn_dim=3072, |
| encoder_attention_heads=12, |
| decoder_layers=6, |
| decoder_ffn_dim=3072, |
| decoder_attention_heads=12, |
| encoder_layerdrop=0.0, |
| decoder_layerdrop=0.0, |
| activation_function="gelu", |
| d_model=1024, |
| dropout=0.1, |
| attention_dropout=0.0, |
| activation_dropout=0.0, |
| init_std=0.02, |
| classifier_dropout=0.0, |
| scale_embedding=False, |
| use_cache=True, |
| num_labels=3, |
| pad_token_id=1, |
| bos_token_id=0, |
| eos_token_id=2, |
| is_encoder_decoder=True, |
| decoder_start_token_id=0, |
| forced_eos_token_id=2, |
| label_smoothing=0.1, |
| length_penalty=1.0, |
| encoder_normalize_before=False, |
| **kwargs |
| ): |
| self.vocab_size = vocab_size |
| self.max_position_embeddings = max_position_embeddings |
| self.d_model = d_model |
| self.encoder_ffn_dim = encoder_ffn_dim |
| self.encoder_layers = encoder_layers |
| self.encoder_attention_heads = encoder_attention_heads |
| self.decoder_ffn_dim = decoder_ffn_dim |
| self.decoder_layers = decoder_layers |
| self.decoder_attention_heads = decoder_attention_heads |
| self.dropout = dropout |
| self.attention_dropout = attention_dropout |
| self.activation_dropout = activation_dropout |
| self.activation_function = activation_function |
| self.init_std = init_std |
| self.encoder_layerdrop = encoder_layerdrop |
| self.decoder_layerdrop = decoder_layerdrop |
| self.classifier_dropout = classifier_dropout |
| self.use_cache = use_cache |
| self.num_hidden_layers = encoder_layers |
| self.scale_embedding = scale_embedding |
| self.label_smoothing = label_smoothing |
| self.encoder_normalize_before = encoder_normalize_before |
|
|
| super().__init__( |
| num_labels=num_labels, |
| pad_token_id=pad_token_id, |
| bos_token_id=bos_token_id, |
| eos_token_id=eos_token_id, |
| is_encoder_decoder=is_encoder_decoder, |
| decoder_start_token_id=decoder_start_token_id, |
| forced_eos_token_id=forced_eos_token_id, |
| length_penalty=length_penalty, |
| **kwargs, |
| ) |
|
|
| |
| if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False): |
| self.forced_bos_token_id = self.bos_token_id |
| warnings.warn( |
| f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " |
| "The config can simply be saved and uploaded again to be fixed." |
| ) |
|
|
| @property |
| def num_attention_heads(self) -> int: |
| return self.encoder_attention_heads |
|
|
| @property |
| def hidden_size(self) -> int: |
| return self.d_model |