File size: 4,883 Bytes
bc3697f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | # -*- coding: utf-8 -*-
# Copyright 2026 EngineerGL Research.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import PretrainedConfig
class AlinlightConfig(PretrainedConfig):
"""
Configuration class for Alinlight model.
Args:
vocab_size (int): Vocabulary size of the model.
hidden_size (int): Dimensionality of the encoder layers and the pooler layer.
intermediate_size (int): Dimensionality of the "intermediate" (i.e., feed-forward) layer.
num_hidden_layers (int): Number of hidden layers in the Transformer encoder.
num_attention_heads (int): Number of attention heads for each attention layer.
num_key_value_heads (int): Number of key/value heads for Grouped Query Attention.
max_position_embeddings (int): The maximum sequence length that this model might ever be used with.
rope_theta (float): The base period of the RoPE embeddings.
rope_scaling (dict, optional): Dictionary containing the scaling configuration for the RoPE embeddings.
sliding_window (int, optional): Sliding window size for local attention. None to disable.
attention_dropout (float): The dropout ratio for the attention probabilities.
use_qk_norm (bool): Whether to apply RMSNorm to Query and Key matrices.
attn_logit_softcapping (float, optional): If set, applies tanh soft-capping to attention logits (Gemma-2 style).
rms_norm_eps (float): The epsilon used by the rms normalization layers.
initializer_range (float): The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
resid_pdrop (float): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embed_pdrop (float): The dropout probability for the embedding layer.
embed_scale (bool): Whether to scale embeddings by sqrt(hidden_size).
final_logit_softcapping (float, optional): If set, applies tanh soft-capping to final LM head logits.
z_loss_weight (float): Coefficient for the Z-loss regularization term (stabilizes final logits).
"""
model_type = "alinlight"
def __init__(
self,
# Architecture
vocab_size=128000,
hidden_size=2048,
intermediate_size=5632,
num_hidden_layers=22,
num_attention_heads=32,
num_key_value_heads=8,
# Positional Encoding
max_position_embeddings=4096,
rope_theta=10000.0,
rope_scaling=None,
# Attention
sliding_window=None,
attention_dropout=0.0,
use_qk_norm=True,
attn_logit_softcapping=50.0,
# Normalization & Regularization
rms_norm_eps=1e-6,
initializer_range=0.02,
resid_pdrop=0.0,
embed_pdrop=0.0,
# Stability Features
embed_scale=True,
final_logit_softcapping=30.0,
z_loss_weight=1e-4,
# System
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=True,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.max_position_embeddings = max_position_embeddings
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.sliding_window = sliding_window
self.attention_dropout = attention_dropout
self.use_qk_norm = use_qk_norm
self.attn_logit_softcapping = attn_logit_softcapping
self.rms_norm_eps = rms_norm_eps
self.initializer_range = initializer_range
self.resid_pdrop = resid_pdrop
self.embed_pdrop = embed_pdrop
self.embed_scale = embed_scale
self.final_logit_softcapping = final_logit_softcapping
self.z_loss_weight = z_loss_weight
self.use_cache = use_cache
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs
) |