iamPi commited on
Commit
fc1bd30
·
verified ·
1 Parent(s): 8ee70c8

Delete configuration_deepseek.py

Browse files
Files changed (1) hide show
  1. configuration_deepseek.py +0 -212
configuration_deepseek.py DELETED
@@ -1,212 +0,0 @@
1
- # Copy from https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/main/configuration_deepseek.py
2
-
3
- from transformers.configuration_utils import PretrainedConfig
4
- from transformers.utils import logging
5
-
6
- logger = logging.get_logger(__name__)
7
-
8
- DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
9
- class DeepseekV3Config(PretrainedConfig):
10
- r"""
11
- This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
12
- model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
13
- defaults will yield a similar configuration to that of the DeepSeek-V3.
14
-
15
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
16
- documentation from [`PretrainedConfig`] for more information.
17
-
18
-
19
- Args:
20
- vocab_size (`int`, *optional*, defaults to 129280):
21
- Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
22
- `inputs_ids` passed when calling [`DeepseekV3Model`]
23
- hidden_size (`int`, *optional*, defaults to 4096):
24
- Dimension of the hidden representations.
25
- intermediate_size (`int`, *optional*, defaults to 11008):
26
- Dimension of the MLP representations.
27
- moe_intermediate_size (`int`, *optional*, defaults to 1407):
28
- Dimension of the MoE representations.
29
- num_hidden_layers (`int`, *optional*, defaults to 32):
30
- Number of hidden layers in the Transformer decoder.
31
- num_nextn_predict_layers (`int`, *optional*, defaults to 1):
32
- Number of nextn predict layers in the DeepSeekV3 Model.
33
- num_attention_heads (`int`, *optional*, defaults to 32):
34
- Number of attention heads for each attention layer in the Transformer decoder.
35
- n_shared_experts (`int`, *optional*, defaults to None):
36
- Number of shared experts, None means dense model.
37
- n_routed_experts (`int`, *optional*, defaults to None):
38
- Number of routed experts, None means dense model.
39
- routed_scaling_factor (`float`, *optional*, defaults to 1.0):
40
- Scaling factor or routed experts.
41
- topk_method (`str`, *optional*, defaults to `gready`):
42
- Topk method used in routed gate.
43
- n_group (`int`, *optional*, defaults to None):
44
- Number of groups for routed experts.
45
- topk_group (`int`, *optional*, defaults to None):
46
- Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
47
- num_experts_per_tok (`int`, *optional*, defaults to None):
48
- Number of selected experts, None means dense model.
49
- moe_layer_freq (`int`, *optional*, defaults to 1):
50
- The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
51
- first_k_dense_replace (`int`, *optional*, defaults to 0):
52
- Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
53
- \--k dense layers--/
54
- norm_topk_prob (`bool`, *optional*, defaults to False):
55
- Whether to normalize the weights of the routed experts.
56
- scoring_func (`str`, *optional*, defaults to 'softmax'):
57
- Method of computing expert weights.
58
- aux_loss_alpha (`float`, *optional*, defaults to 0.001):
59
- Auxiliary loss weight coefficient.
60
- seq_aux = (`bool`, *optional*, defaults to True):
61
- Whether to compute the auxiliary loss for each individual sample.
62
- num_key_value_heads (`int`, *optional*):
63
- This is the number of key_value heads that should be used to implement Grouped Query Attention. If
64
- `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
65
- `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
66
- converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
67
- by meanpooling all the original heads within that group. For more details checkout [this
68
- paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
69
- `num_attention_heads`.
70
- hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
71
- The non-linear activation function (function or string) in the decoder.
72
- max_position_embeddings (`int`, *optional*, defaults to 2048):
73
- The maximum sequence length that this model might ever be used with.
74
- initializer_range (`float`, *optional*, defaults to 0.02):
75
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
77
- The epsilon used by the rms normalization layers.
78
- use_cache (`bool`, *optional*, defaults to `True`):
79
- Whether or not the model should return the last key/values attentions (not used by all models). Only
80
- relevant if `config.is_decoder=True`.
81
- pad_token_id (`int`, *optional*):
82
- Padding token id.
83
- bos_token_id (`int`, *optional*, defaults to 1):
84
- Beginning of stream token id.
85
- eos_token_id (`int`, *optional*, defaults to 2):
86
- End of stream token id.
87
- pretraining_tp (`int`, *optional*, defaults to 1):
88
- Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
89
- document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
90
- necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
91
- issue](https://github.com/pytorch/pytorch/issues/76232).
92
- tie_word_embeddings (`bool`, *optional*, defaults to `False`):
93
- Whether to tie weight embeddings
94
- rope_theta (`float`, *optional*, defaults to 10000.0):
95
- The base period of the RoPE embeddings.
96
- rope_scaling (`Dict`, *optional*):
97
- Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
98
- strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
99
- `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
100
- `max_position_embeddings` to the expected new maximum.
101
- attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
102
- Whether to use a bias in the query, key, value and output projection layers during self-attention.
103
- attention_dropout (`float`, *optional*, defaults to 0.0):
104
- The dropout ratio for the attention probabilities.
105
-
106
- ```python
107
- >>> from transformers import DeepseekV3Model, DeepseekV3Config
108
-
109
- >>> # Initializing a Deepseek-V3 style configuration
110
- >>> configuration = DeepseekV3Config()
111
-
112
- >>> # Accessing the model configuration
113
- >>> configuration = model.config
114
- ```"""
115
-
116
- model_type = "deepseek_v3"
117
- keys_to_ignore_at_inference = ["past_key_values"]
118
-
119
- def __init__(
120
- self,
121
- vocab_size=129280,
122
- hidden_size=7168,
123
- intermediate_size=18432,
124
- moe_intermediate_size = 2048,
125
- num_hidden_layers=61,
126
- num_nextn_predict_layers=1,
127
- num_attention_heads=128,
128
- num_key_value_heads=128,
129
- n_shared_experts = 1,
130
- n_routed_experts = 256,
131
- ep_size = 1,
132
- routed_scaling_factor = 2.5,
133
- kv_lora_rank = 512,
134
- q_lora_rank = 1536,
135
- qk_rope_head_dim = 64,
136
- v_head_dim = 128,
137
- qk_nope_head_dim = 128,
138
- topk_method = 'noaux_tc',
139
- n_group = 8,
140
- topk_group = 4,
141
- num_experts_per_tok = 8,
142
- moe_layer_freq = 1,
143
- first_k_dense_replace = 3,
144
- norm_topk_prob = True,
145
- scoring_func = 'sigmoid',
146
- aux_loss_alpha = 0.001,
147
- seq_aux = True,
148
- hidden_act="silu",
149
- max_position_embeddings=4096,
150
- initializer_range=0.02,
151
- rms_norm_eps=1e-6,
152
- use_cache=True,
153
- pad_token_id=None,
154
- bos_token_id=0,
155
- eos_token_id=1,
156
- pretraining_tp=1,
157
- tie_word_embeddings=False,
158
- rope_theta=10000.0,
159
- rope_scaling=None,
160
- attention_bias=False,
161
- attention_dropout=0.0,
162
- **kwargs,
163
- ):
164
- self.vocab_size = vocab_size
165
- self.max_position_embeddings = max_position_embeddings
166
- self.hidden_size = hidden_size
167
- self.intermediate_size = intermediate_size
168
- self.moe_intermediate_size = moe_intermediate_size
169
- self.num_hidden_layers = num_hidden_layers
170
- self.num_nextn_predict_layers = num_nextn_predict_layers
171
- self.num_attention_heads = num_attention_heads
172
- self.n_shared_experts = n_shared_experts
173
- self.n_routed_experts = n_routed_experts
174
- self.ep_size = ep_size
175
- self.routed_scaling_factor = routed_scaling_factor
176
- self.kv_lora_rank = kv_lora_rank
177
- self.q_lora_rank = q_lora_rank
178
- self.qk_rope_head_dim = qk_rope_head_dim
179
- self.v_head_dim = v_head_dim
180
- self.qk_nope_head_dim = qk_nope_head_dim
181
- self.topk_method = topk_method
182
- self.n_group = n_group
183
- self.topk_group = topk_group
184
- self.num_experts_per_tok = num_experts_per_tok
185
- self.moe_layer_freq = moe_layer_freq
186
- self.first_k_dense_replace = first_k_dense_replace
187
- self.norm_topk_prob = norm_topk_prob
188
- self.scoring_func = scoring_func
189
- self.aux_loss_alpha = aux_loss_alpha
190
- self.seq_aux = seq_aux
191
- # for backward compatibility
192
- if num_key_value_heads is None:
193
- num_key_value_heads = num_attention_heads
194
-
195
- self.num_key_value_heads = num_key_value_heads
196
- self.hidden_act = hidden_act
197
- self.initializer_range = initializer_range
198
- self.rms_norm_eps = rms_norm_eps
199
- self.pretraining_tp = pretraining_tp
200
- self.use_cache = use_cache
201
- self.rope_theta = rope_theta
202
- self.rope_scaling = rope_scaling
203
- self.attention_bias = attention_bias
204
- self.attention_dropout = attention_dropout
205
-
206
- super().__init__(
207
- pad_token_id=pad_token_id,
208
- bos_token_id=bos_token_id,
209
- eos_token_id=eos_token_id,
210
- tie_word_embeddings=tie_word_embeddings,
211
- **kwargs,
212
- )