Sync from https://atomgit.com/ascend-tribe/openPangu-R-72B-2512.git (SHA: 6de00938532c53c2c5f3f0bd0d42f74b481fb0b7)
1fe79b4 verified | # coding=utf-8 | |
| # Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. | |
| # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """ PanguProMoE model configuration""" | |
| from transformers.configuration_utils import PretrainedConfig | |
| from transformers.utils import logging | |
| logger = logging.get_logger(__name__) | |
| class PanguProMoEConfig(PretrainedConfig): | |
| model_type = "PanguProMoE" | |
| _auto_class = "AutoConfig" | |
| def __init__( | |
| self, | |
| vocab_size=153376, | |
| hidden_size=4608, | |
| intermediate_size=10240, | |
| num_hidden_layers=50, | |
| num_attention_heads=64, | |
| num_key_value_heads=4, | |
| mlp_only_layers=[0,1,2,3], | |
| hidden_act="silu", | |
| max_position_embeddings=8192, | |
| initializer_range=0.02, | |
| rms_norm_eps=1e-5, | |
| use_cache=True, | |
| tie_word_embeddings=False, | |
| rope_theta=100000, | |
| moe_intermediate_size=1280, | |
| shared_expert_intermediate_size=2560, | |
| num_experts_per_tok=8, | |
| num_experts=80, | |
| norm_topk_prob=True, | |
| router_enable_expert_bias=True, | |
| output_router_logits=False, | |
| routed_scaling_factor=2.5, | |
| qk_nope_dim = 128, | |
| qk_rope_dim = 64, | |
| v_channels = 128, | |
| sandwich_norm=True, | |
| param_sink_number = 128, | |
| param_sink_with_value=True, | |
| **kwargs, | |
| ): | |
| self.vocab_size = vocab_size | |
| self.max_position_embeddings = max_position_embeddings | |
| self.hidden_size = hidden_size | |
| self.num_hidden_layers = num_hidden_layers | |
| self.num_attention_heads = num_attention_heads | |
| self.num_key_value_heads = num_key_value_heads | |
| self.hidden_act = hidden_act | |
| self.initializer_range = initializer_range | |
| self.rms_norm_eps = rms_norm_eps | |
| self.use_cache = use_cache | |
| self.rope_theta = rope_theta | |
| self.mlp_only_layers = mlp_only_layers | |
| self.intermediate_size = intermediate_size | |
| # MoE arguments | |
| self.moe_intermediate_size = moe_intermediate_size | |
| self.shared_expert_intermediate_size = shared_expert_intermediate_size | |
| self.num_experts_per_tok = num_experts_per_tok | |
| self.num_experts = num_experts | |
| self.norm_topk_prob = norm_topk_prob | |
| self.output_router_logits = output_router_logits | |
| self.router_enable_expert_bias = router_enable_expert_bias | |
| self.routed_scaling_factor = routed_scaling_factor | |
| self.qk_nope_dim = qk_nope_dim | |
| self.qk_rope_dim = qk_rope_dim | |
| self.v_channels = v_channels | |
| self.sandwich_norm = sandwich_norm | |
| self.param_sink_number = param_sink_number | |
| self.param_sink_with_value = param_sink_with_value | |
| super().__init__( | |
| tie_word_embeddings=tie_word_embeddings, | |
| **kwargs, | |
| ) | |