Trouter-Library commited on
Commit
ee2ec96
·
verified ·
1 Parent(s): 42d6c22

Create configuration_helion.py

Browse files
Files changed (1) hide show
  1. configuration_helion.py +103 -0
configuration_helion.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helion model configuration."""
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+
9
+ class HelionConfig(PretrainedConfig):
10
+ """
11
+ Configuration class for Helion model.
12
+
13
+ Args:
14
+ vocab_size (int, optional): Vocabulary size. Defaults to 32768.
15
+ hidden_size (int, optional): Dimensionality of hidden layers. Defaults to 4096.
16
+ intermediate_size (int, optional): Dimensionality of MLP. Defaults to 14336.
17
+ num_hidden_layers (int, optional): Number of decoder layers. Defaults to 32.
18
+ num_attention_heads (int, optional): Number of attention heads. Defaults to 32.
19
+ num_key_value_heads (int, optional): Number of key-value heads for GQA. Defaults to 8.
20
+ hidden_act (str, optional): Activation function. Defaults to "silu".
21
+ max_position_embeddings (int, optional): Maximum sequence length. Defaults to 8192.
22
+ initializer_range (float, optional): Standard deviation for weight initialization. Defaults to 0.02.
23
+ rms_norm_eps (float, optional): Epsilon for RMS normalization. Defaults to 1e-6.
24
+ use_cache (bool, optional): Whether to use KV cache. Defaults to True.
25
+ pad_token_id (int, optional): Padding token ID. Defaults to None.
26
+ bos_token_id (int, optional): Beginning of sequence token ID. Defaults to 1.
27
+ eos_token_id (int, optional): End of sequence token ID. Defaults to 2.
28
+ tie_word_embeddings (bool, optional): Tie input/output embeddings. Defaults to False.
29
+ rope_theta (float, optional): Base for RoPE. Defaults to 10000.0.
30
+ rope_scaling (dict, optional): RoPE scaling config. Defaults to None.
31
+ attention_bias (bool, optional): Add bias to attention projections. Defaults to False.
32
+ attention_dropout (float, optional): Dropout for attention. Defaults to 0.0.
33
+ mlp_bias (bool, optional): Add bias to MLP. Defaults to False.
34
+ """
35
+
36
+ model_type = "helion"
37
+ keys_to_ignore_at_inference = ["past_key_values"]
38
+
39
+ def __init__(
40
+ self,
41
+ vocab_size=32768,
42
+ hidden_size=4096,
43
+ intermediate_size=14336,
44
+ num_hidden_layers=32,
45
+ num_attention_heads=32,
46
+ num_key_value_heads=8,
47
+ hidden_act="silu",
48
+ max_position_embeddings=8192,
49
+ initializer_range=0.02,
50
+ rms_norm_eps=1e-6,
51
+ use_cache=True,
52
+ pad_token_id=None,
53
+ bos_token_id=1,
54
+ eos_token_id=2,
55
+ tie_word_embeddings=False,
56
+ rope_theta=10000.0,
57
+ rope_scaling=None,
58
+ attention_bias=False,
59
+ attention_dropout=0.0,
60
+ mlp_bias=False,
61
+ residual_dropout=0.0,
62
+ embedding_dropout=0.0,
63
+ use_sliding_window=False,
64
+ sliding_window=None,
65
+ use_flash_attention_2=True,
66
+ pretraining_tp=1,
67
+ **kwargs,
68
+ ):
69
+ self.vocab_size = vocab_size
70
+ self.max_position_embeddings = max_position_embeddings
71
+ self.hidden_size = hidden_size
72
+ self.intermediate_size = intermediate_size
73
+ self.num_hidden_layers = num_hidden_layers
74
+ self.num_attention_heads = num_attention_heads
75
+
76
+ # Grouped Query Attention
77
+ if num_key_value_heads is None:
78
+ num_key_value_heads = num_attention_heads
79
+ self.num_key_value_heads = num_key_value_heads
80
+
81
+ self.hidden_act = hidden_act
82
+ self.initializer_range = initializer_range
83
+ self.rms_norm_eps = rms_norm_eps
84
+ self.use_cache = use_cache
85
+ self.rope_theta = rope_theta
86
+ self.rope_scaling = rope_scaling
87
+ self.attention_bias = attention_bias
88
+ self.attention_dropout = attention_dropout
89
+ self.mlp_bias = mlp_bias
90
+ self.residual_dropout = residual_dropout
91
+ self.embedding_dropout = embedding_dropout
92
+ self.use_sliding_window = use_sliding_window
93
+ self.sliding_window = sliding_window
94
+ self.use_flash_attention_2 = use_flash_attention_2
95
+ self.pretraining_tp = pretraining_tp
96
+
97
+ super().__init__(
98
+ pad_token_id=pad_token_id,
99
+ bos_token_id=bos_token_id,
100
+ eos_token_id=eos_token_id,
101
+ tie_word_embeddings=tie_word_embeddings,
102
+ **kwargs,
103
+ )