talphaidze commited on
Commit
45f2977
·
verified ·
1 Parent(s): 9e0af72

Upload configuration.py

Browse files
Files changed (1) hide show
  1. configuration.py +51 -0
configuration.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class MoEGPTConfig(PretrainedConfig):
4
+ model_type = "moegpt"
5
+
6
+ def __init__(
7
+ self,
8
+ vocab_size=50304,
9
+ n_embd=1152, #768,
10
+ n_layer=24,#12,
11
+ n_head=16,#12,
12
+ sequence_length=1024,
13
+ moe=False,
14
+ moe_routing="standard_gating",
15
+ moe_num_experts=4,
16
+ moe_num_experts_per_tok=2,
17
+ moe_softmax_order="softmax_topk",
18
+ moe_router_loss="load_balancing_z_loss",
19
+ moe_aux_loss_factor=0.01,
20
+ moe_z_loss_factor=1.0,
21
+ mlp_dim_exp_factor=1.0,
22
+ dropout=0.0,
23
+ bias=False,
24
+ architectures=["MoEGPTForCausalLM"],
25
+ auto_map={
26
+ "AutoConfig": "configuration.MoEGPTConfig",
27
+ "AutoModelForCausalLM": "modeling.MoEGPTForCausalLM",
28
+ "AutoTokenizer": "GPT2TokenizerFast"
29
+ },
30
+ **kwargs,
31
+ ):
32
+ super().__init__(**kwargs)
33
+ self.vocab_size = vocab_size
34
+ self.n_embd = n_embd
35
+ self.n_layer = n_layer
36
+ self.n_head = n_head
37
+ self.sequence_length = sequence_length
38
+ self.moe = moe
39
+ self.moe_routing = moe_routing
40
+ self.moe_num_experts = moe_num_experts
41
+ self.moe_num_experts_per_tok = moe_num_experts_per_tok
42
+ self.moe_softmax_order = moe_softmax_order
43
+ self.moe_router_loss = moe_router_loss
44
+ self.moe_aux_loss_factor = moe_aux_loss_factor
45
+ self.moe_z_loss_factor = moe_z_loss_factor
46
+ self.mlp_dim_exp_factor = mlp_dim_exp_factor
47
+ self.dropout = dropout
48
+ self.bias = bias
49
+ self.architectures = architectures
50
+ self.auto_map = auto_map
51
+