|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from ...configuration_utils import PretrainedConfig |
|
|
from ...modeling_rope_utils import rope_config_validation |
|
|
|
|
|
|
|
|
class MyNewModel2Config(PretrainedConfig): |
|
|
r""" |
|
|
This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma |
|
|
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the |
|
|
defaults will yield a similar configuration to that of the Gemma-7B. |
|
|
e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b) |
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
|
documentation from [`PretrainedConfig`] for more information. |
|
|
Args: |
|
|
vocab_size (`int`, *optional*, defaults to 256000): |
|
|
Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the |
|
|
`inputs_ids` passed when calling [`GemmaModel`] |
|
|
```python |
|
|
>>> from transformers import GemmaModel, GemmaConfig |
|
|
>>> # Initializing a Gemma gemma-7b style configuration |
|
|
>>> configuration = GemmaConfig() |
|
|
>>> # Initializing a model from the gemma-7b style configuration |
|
|
>>> model = GemmaModel(configuration) |
|
|
>>> # Accessing the model configuration |
|
|
>>> configuration = model.config |
|
|
```""" |
|
|
|
|
|
model_type = "my_new_model2" |
|
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
|
|
|
|
base_model_tp_plan = { |
|
|
"layers.*.self_attn.q_proj": "colwise", |
|
|
"layers.*.self_attn.k_proj": "colwise", |
|
|
"layers.*.self_attn.v_proj": "colwise", |
|
|
"layers.*.self_attn.o_proj": "rowwise", |
|
|
"layers.*.mlp.gate_proj": "colwise", |
|
|
"layers.*.mlp.up_proj": "colwise", |
|
|
"layers.*.mlp.down_proj": "rowwise", |
|
|
} |
|
|
base_model_pp_plan = { |
|
|
"embed_tokens": (["input_ids"], ["inputs_embeds"]), |
|
|
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]), |
|
|
"norm": (["hidden_states"], ["hidden_states"]), |
|
|
} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_size=32000, |
|
|
hidden_size=4096, |
|
|
intermediate_size=11008, |
|
|
num_hidden_layers=32, |
|
|
num_attention_heads=32, |
|
|
num_key_value_heads=None, |
|
|
hidden_act="silu", |
|
|
max_position_embeddings=2048, |
|
|
initializer_range=0.02, |
|
|
rms_norm_eps=1e-6, |
|
|
use_cache=True, |
|
|
pad_token_id=None, |
|
|
bos_token_id=1, |
|
|
eos_token_id=2, |
|
|
pretraining_tp=1, |
|
|
tie_word_embeddings=False, |
|
|
rope_theta=10000.0, |
|
|
rope_scaling=None, |
|
|
attention_bias=False, |
|
|
attention_dropout=0.0, |
|
|
mlp_bias=False, |
|
|
head_dim=None, |
|
|
**kwargs, |
|
|
): |
|
|
self.vocab_size = vocab_size |
|
|
self.max_position_embeddings = max_position_embeddings |
|
|
self.hidden_size = hidden_size |
|
|
self.intermediate_size = intermediate_size |
|
|
self.num_hidden_layers = num_hidden_layers |
|
|
self.num_attention_heads = num_attention_heads |
|
|
|
|
|
|
|
|
if num_key_value_heads is None: |
|
|
num_key_value_heads = num_attention_heads |
|
|
|
|
|
self.num_key_value_heads = num_key_value_heads |
|
|
self.hidden_act = hidden_act |
|
|
self.initializer_range = initializer_range |
|
|
self.rms_norm_eps = rms_norm_eps |
|
|
self.pretraining_tp = pretraining_tp |
|
|
self.use_cache = use_cache |
|
|
self.rope_theta = rope_theta |
|
|
self.rope_scaling = rope_scaling |
|
|
self.attention_bias = attention_bias |
|
|
self.attention_dropout = attention_dropout |
|
|
self.mlp_bias = mlp_bias |
|
|
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads |
|
|
|
|
|
|
|
|
if self.rope_scaling is not None and "type" in self.rope_scaling: |
|
|
self.rope_scaling["rope_type"] = self.rope_scaling["type"] |
|
|
rope_config_validation(self) |
|
|
|
|
|
super().__init__( |
|
|
pad_token_id=pad_token_id, |
|
|
bos_token_id=bos_token_id, |
|
|
eos_token_id=eos_token_id, |
|
|
tie_word_embeddings=tie_word_embeddings, |
|
|
**kwargs, |
|
|
) |
|
|
|