Terminator-X / configuration_model.py
Parveshiiii's picture
Upload 5 files
27fe8df verified
from transformers import PretrainedConfig
class HybridModelConfig(PretrainedConfig):
model_type = "hybrid_model"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=151936,
hidden_size=768,
intermediate_size=2048,
num_hidden_layers=12,
num_attention_heads=12,
# MLA compression dims (DeepSeek-style naming)
kv_lora_rank=192, # KV latent/compression dimension (d_c)
q_lora_rank=384, # Query latent/compression dimension (d_c1)
qk_rope_head_dim=32, # RoPE dimension per head (d_rotate)
hidden_act="silu",
max_position_embeddings=32768,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
rope_theta=10000.0,
sliding_window=4096,
attention_dropout=0.0,
# MHC (Multi-Head Connections) settings
mhc_num_streams=4, # number of parallel streams (mhc_n)
mhc_sinkhorn_iters=20, # Sinkhorn-Knopp iterations (mhc_tmax)
mhc_alpha_init=0.01,
mhc_rmsnorm_eps=1e-6,
mhc_stream_init="paper",
mhc_readout_init="first",
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_rope_head_dim = qk_rope_head_dim
self.sliding_window = sliding_window
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_dropout = attention_dropout
self.mhc_num_streams = mhc_num_streams
self.mhc_sinkhorn_iters = mhc_sinkhorn_iters
self.mhc_alpha_init = mhc_alpha_init
self.mhc_rmsnorm_eps = mhc_rmsnorm_eps
self.mhc_stream_init = mhc_stream_init
self.mhc_readout_init = mhc_readout_init
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)