ehartford commited on
Commit
70242c4
·
verified ·
1 Parent(s): fb9dd51

Delete configuration_deepseek.py

Browse files
Files changed (1) hide show
  1. configuration_deepseek.py +0 -199
configuration_deepseek.py DELETED
@@ -1,199 +0,0 @@
1
- from transformers.configuration_utils import PretrainedConfig
2
- from transformers.utils import logging
3
-
4
- logger = logging.get_logger(__name__)
5
-
6
- DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
7
- class DeepseekV3Config(PretrainedConfig):
8
- r"""
9
- This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
10
- model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
11
- defaults will yield a similar configuration to that of the DeepSeek-V3.
12
-
13
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
14
- documentation from [`PretrainedConfig`] for more information.
15
-
16
-
17
- Args:
18
- vocab_size (`int`, *optional*, defaults to 129280):
19
- Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
20
- `inputs_ids` passed when calling [`DeepseekV3Model`]
21
- hidden_size (`int`, *optional*, defaults to 4096):
22
- Dimension of the hidden representations.
23
- intermediate_size (`int`, *optional*, defaults to 11008):
24
- Dimension of the MLP representations.
25
- moe_intermediate_size (`int`, *optional*, defaults to 1407):
26
- Dimension of the MoE representations.
27
- num_hidden_layers (`int`, *optional*, defaults to 32):
28
- Number of hidden layers in the Transformer decoder.
29
- num_nextn_predict_layers (`int`, *optional*, defaults to 1):
30
- Number of nextn predict layers in the DeepSeekV3 Model.
31
- num_attention_heads (`int`, *optional*, defaults to 32):
32
- Number of attention heads for each attention layer in the Transformer decoder.
33
- n_shared_experts (`int`, *optional*, defaults to None):
34
- Number of shared experts, None means dense model.
35
- n_routed_experts (`int`, *optional*, defaults to None):
36
- Number of routed experts, None means dense model.
37
- routed_scaling_factor (`float`, *optional*, defaults to 1.0):
38
- Scaling factor or routed experts.
39
- topk_method (`str`, *optional*, defaults to `gready`):
40
- Topk method used in routed gate.
41
- n_group (`int`, *optional*, defaults to None):
42
- Number of groups for routed experts.
43
- topk_group (`int`, *optional*, defaults to None):
44
- Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
45
- num_experts_per_tok (`int`, *optional*, defaults to None):
46
- Number of selected experts, None means dense model.
47
- moe_layer_freq (`int`, *optional*, defaults to 1):
48
- The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
49
- first_k_dense_replace (`int`, *optional*, defaults to 0):
50
- Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
51
- \--k dense layers--/
52
- norm_topk_prob (`bool`, *optional*, defaults to False):
53
- Whether to normalize the weights of the routed experts.
54
- scoring_func (`str`, *optional*, defaults to 'softmax'):
55
- Method of computing expert weights.
56
- aux_loss_alpha (`float`, *optional*, defaults to 0.001):
57
- Auxiliary loss weight coefficient.
58
- seq_aux = (`bool`, *optional*, defaults to True):
59
- Whether to compute the auxiliary loss for each individual sample.
60
- num_key_value_heads (`int`, *optional*):
61
- This is the number of key_value heads that should be used to implement Grouped Query Attention. If
62
- `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
63
- `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
64
- converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
65
- by meanpooling all the original heads within that group. For more details checkout [this
66
- paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
67
- `num_attention_heads`.
68
- hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
69
- The non-linear activation function (function or string) in the decoder.
70
- max_position_embeddings (`int`, *optional*, defaults to 2048):
71
- The maximum sequence length that this model might ever be used with.
72
- initializer_range (`float`, *optional*, defaults to 0.02):
73
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
75
- The epsilon used by the rms normalization layers.
76
- use_cache (`bool`, *optional*, defaults to `True`):
77
- Whether or not the model should return the last key/values attentions (not used by all models). Only
78
- relevant if `config.is_decoder=True`.
79
- pad_token_id (`int`, *optional*):
80
- Padding token id.
81
- bos_token_id (`int`, *optional*, defaults to 1):
82
- Beginning of stream token id.
83
- eos_token_id (`int`, *optional*, defaults to 2):
84
- End of stream token id.
85
- tie_word_embeddings (`bool`, *optional*, defaults to `False`):
86
- Whether to tie weight embeddings
87
- rope_theta (`float`, *optional*, defaults to 10000.0):
88
- The base period of the RoPE embeddings.
89
- rope_scaling (`Dict`, *optional*):
90
- Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
91
- strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
92
- `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
93
- `max_position_embeddings` to the expected new maximum.
94
- attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
95
- Whether to use a bias in the query, key, value and output projection layers during self-attention.
96
- attention_dropout (`float`, *optional*, defaults to 0.0):
97
- The dropout ratio for the attention probabilities.
98
-
99
- ```python
100
- >>> from transformers import DeepseekV3Model, DeepseekV3Config
101
-
102
- >>> # Initializing a Deepseek-V3 style configuration
103
- >>> configuration = DeepseekV3Config()
104
-
105
- >>> # Accessing the model configuration
106
- >>> configuration = model.config
107
- ```"""
108
-
109
- model_type = "deepseek_v3"
110
- keys_to_ignore_at_inference = ["past_key_values"]
111
-
112
- def __init__(
113
- self,
114
- vocab_size=129280,
115
- hidden_size=7168,
116
- intermediate_size=18432,
117
- moe_intermediate_size = 2048,
118
- num_hidden_layers=61,
119
- num_nextn_predict_layers=1,
120
- num_attention_heads=128,
121
- num_key_value_heads=128,
122
- n_shared_experts = 1,
123
- n_routed_experts = 256,
124
- ep_size = 1,
125
- routed_scaling_factor = 2.5,
126
- kv_lora_rank = 512,
127
- q_lora_rank = 1536,
128
- qk_rope_head_dim = 64,
129
- v_head_dim = 128,
130
- qk_nope_head_dim = 128,
131
- topk_method = 'noaux_tc',
132
- n_group = 8,
133
- topk_group = 4,
134
- num_experts_per_tok = 8,
135
- moe_layer_freq = 1,
136
- first_k_dense_replace = 3,
137
- norm_topk_prob = True,
138
- scoring_func = 'sigmoid',
139
- hidden_act="silu",
140
- max_position_embeddings=4096,
141
- initializer_range=0.02,
142
- rms_norm_eps=1e-6,
143
- use_cache=True,
144
- pad_token_id=None,
145
- bos_token_id=0,
146
- eos_token_id=1,
147
- tie_word_embeddings=False,
148
- rope_theta=10000.0,
149
- rope_scaling=None,
150
- attention_bias=False,
151
- attention_dropout=0.0,
152
- **kwargs,
153
- ):
154
- self.vocab_size = vocab_size
155
- self.max_position_embeddings = max_position_embeddings
156
- self.hidden_size = hidden_size
157
- self.intermediate_size = intermediate_size
158
- self.moe_intermediate_size = moe_intermediate_size
159
- self.num_hidden_layers = num_hidden_layers
160
- self.num_nextn_predict_layers = num_nextn_predict_layers
161
- self.num_attention_heads = num_attention_heads
162
- self.n_shared_experts = n_shared_experts
163
- self.n_routed_experts = n_routed_experts
164
- self.ep_size = ep_size
165
- self.routed_scaling_factor = routed_scaling_factor
166
- self.kv_lora_rank = kv_lora_rank
167
- self.q_lora_rank = q_lora_rank
168
- self.qk_rope_head_dim = qk_rope_head_dim
169
- self.v_head_dim = v_head_dim
170
- self.qk_nope_head_dim = qk_nope_head_dim
171
- self.topk_method = topk_method
172
- self.n_group = n_group
173
- self.topk_group = topk_group
174
- self.num_experts_per_tok = num_experts_per_tok
175
- self.moe_layer_freq = moe_layer_freq
176
- self.first_k_dense_replace = first_k_dense_replace
177
- self.norm_topk_prob = norm_topk_prob
178
- self.scoring_func = scoring_func
179
- # for backward compatibility
180
- if num_key_value_heads is None:
181
- num_key_value_heads = num_attention_heads
182
-
183
- self.num_key_value_heads = num_key_value_heads
184
- self.hidden_act = hidden_act
185
- self.initializer_range = initializer_range
186
- self.rms_norm_eps = rms_norm_eps
187
- self.use_cache = use_cache
188
- self.rope_theta = rope_theta
189
- self.rope_scaling = rope_scaling
190
- self.attention_bias = attention_bias
191
- self.attention_dropout = attention_dropout
192
-
193
- super().__init__(
194
- pad_token_id=pad_token_id,
195
- bos_token_id=bos_token_id,
196
- eos_token_id=eos_token_id,
197
- tie_word_embeddings=tie_word_embeddings,
198
- **kwargs,
199
- )