| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | from transformers import PretrainedConfig |
| |
|
| | class AlinlightConfig(PretrainedConfig): |
| | """ |
| | Configuration class for Alinlight model. |
| | |
| | Args: |
| | vocab_size (int): Vocabulary size of the model. |
| | hidden_size (int): Dimensionality of the encoder layers and the pooler layer. |
| | intermediate_size (int): Dimensionality of the "intermediate" (i.e., feed-forward) layer. |
| | num_hidden_layers (int): Number of hidden layers in the Transformer encoder. |
| | num_attention_heads (int): Number of attention heads for each attention layer. |
| | num_key_value_heads (int): Number of key/value heads for Grouped Query Attention. |
| | max_position_embeddings (int): The maximum sequence length that this model might ever be used with. |
| | rope_theta (float): The base period of the RoPE embeddings. |
| | rope_scaling (dict, optional): Dictionary containing the scaling configuration for the RoPE embeddings. |
| | sliding_window (int, optional): Sliding window size for local attention. None to disable. |
| | attention_dropout (float): The dropout ratio for the attention probabilities. |
| | use_qk_norm (bool): Whether to apply RMSNorm to Query and Key matrices. |
| | attn_logit_softcapping (float, optional): If set, applies tanh soft-capping to attention logits (Gemma-2 style). |
| | rms_norm_eps (float): The epsilon used by the rms normalization layers. |
| | initializer_range (float): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
| | resid_pdrop (float): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
| | embed_pdrop (float): The dropout probability for the embedding layer. |
| | embed_scale (bool): Whether to scale embeddings by sqrt(hidden_size). |
| | final_logit_softcapping (float, optional): If set, applies tanh soft-capping to final LM head logits. |
| | z_loss_weight (float): Coefficient for the Z-loss regularization term (stabilizes final logits). |
| | """ |
| | model_type = "alinlight" |
| | |
| | def __init__( |
| | self, |
| | |
| | vocab_size=128000, |
| | hidden_size=2048, |
| | intermediate_size=5632, |
| | num_hidden_layers=22, |
| | num_attention_heads=32, |
| | num_key_value_heads=8, |
| | |
| | |
| | max_position_embeddings=4096, |
| | rope_theta=10000.0, |
| | rope_scaling=None, |
| | |
| | |
| | sliding_window=None, |
| | attention_dropout=0.0, |
| | use_qk_norm=True, |
| | attn_logit_softcapping=50.0, |
| | |
| | |
| | rms_norm_eps=1e-6, |
| | initializer_range=0.02, |
| | resid_pdrop=0.0, |
| | embed_pdrop=0.0, |
| | |
| | |
| | embed_scale=True, |
| | final_logit_softcapping=30.0, |
| | z_loss_weight=1e-4, |
| | |
| | |
| | use_cache=True, |
| | pad_token_id=0, |
| | bos_token_id=1, |
| | eos_token_id=2, |
| | tie_word_embeddings=True, |
| | |
| | **kwargs, |
| | ): |
| | self.vocab_size = vocab_size |
| | self.hidden_size = hidden_size |
| | self.intermediate_size = intermediate_size |
| | self.num_hidden_layers = num_hidden_layers |
| | self.num_attention_heads = num_attention_heads |
| | self.num_key_value_heads = num_key_value_heads |
| | self.max_position_embeddings = max_position_embeddings |
| | self.rope_theta = rope_theta |
| | self.rope_scaling = rope_scaling |
| | self.sliding_window = sliding_window |
| | self.attention_dropout = attention_dropout |
| | self.use_qk_norm = use_qk_norm |
| | self.attn_logit_softcapping = attn_logit_softcapping |
| | self.rms_norm_eps = rms_norm_eps |
| | self.initializer_range = initializer_range |
| | self.resid_pdrop = resid_pdrop |
| | self.embed_pdrop = embed_pdrop |
| | self.embed_scale = embed_scale |
| | self.final_logit_softcapping = final_logit_softcapping |
| | self.z_loss_weight = z_loss_weight |
| | self.use_cache = use_cache |
| |
|
| | super().__init__( |
| | pad_token_id=pad_token_id, |
| | bos_token_id=bos_token_id, |
| | eos_token_id=eos_token_id, |
| | tie_word_embeddings=tie_word_embeddings, |
| | **kwargs |
| | ) |