intervitens commited on
Commit
c7c228f
·
verified ·
1 Parent(s): 5f0cbae

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
chat_template.jinja ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [gMASK]<sop>
2
+ {%- if tools -%}
3
+ <|system|>
4
+ # Tools
5
+
6
+ You may call one or more functions to assist with the user query.
7
+
8
+ You are provided with function signatures within <tools></tools> XML tags:
9
+ <tools>
10
+ {% for tool in tools %}
11
+ {{ tool | tojson(ensure_ascii=False) }}
12
+ {% endfor %}
13
+ </tools>
14
+
15
+ For each function call, output the function name and arguments within the following XML format:
16
+ <tool_call>{function-name}<arg_key>{arg-key-1}</arg_key><arg_value>{arg-value-1}</arg_value><arg_key>{arg-key-2}</arg_key><arg_value>{arg-value-2}</arg_value>...</tool_call>{%- endif -%}
17
+ {%- macro visible_text(content) -%}
18
+ {%- if content is string -%}
19
+ {{- content }}
20
+ {%- elif content is iterable and content is not mapping -%}
21
+ {%- for item in content -%}
22
+ {%- if item is mapping and item.type == 'text' -%}
23
+ {{- item.text }}
24
+ {%- elif item is string -%}
25
+ {{- item }}
26
+ {%- endif -%}
27
+ {%- endfor -%}
28
+ {%- else -%}
29
+ {{- content }}
30
+ {%- endif -%}
31
+ {%- endmacro -%}
32
+ {%- set ns = namespace(last_user_index=-1) %}
33
+ {%- for m in messages %}
34
+ {%- if m.role == 'user' %}
35
+ {% set ns.last_user_index = loop.index0 -%}
36
+ {%- endif %}
37
+ {%- endfor %}
38
+ {% for m in messages %}
39
+ {%- if m.role == 'user' -%}<|user|>{{ visible_text(m.content) }}
40
+ {%- elif m.role == 'assistant' -%}
41
+ <|assistant|>
42
+ {%- set reasoning_content = '' %}
43
+ {%- set content = visible_text(m.content) %}
44
+ {%- if m.reasoning_content is string %}
45
+ {%- set reasoning_content = m.reasoning_content %}
46
+ {%- else %}
47
+ {%- if '</think>' in content %}
48
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
49
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
50
+ {%- endif %}
51
+ {%- endif %}
52
+ {%- if ((clear_thinking is defined and not clear_thinking) or loop.index0 > ns.last_user_index) and reasoning_content -%}
53
+ {{ '<think>' + reasoning_content.strip() + '</think>'}}
54
+ {%- else -%}
55
+ {{ '</think>' }}
56
+ {%- endif -%}
57
+ {%- if content.strip() -%}
58
+ {{ content.strip() }}
59
+ {%- endif -%}
60
+ {% if m.tool_calls %}
61
+ {% for tc in m.tool_calls %}
62
+ {%- if tc.function %}
63
+ {%- set tc = tc.function %}
64
+ {%- endif %}
65
+ {{- '<tool_call>' + tc.name -}}
66
+ {% set _args = tc.arguments %}{% for k, v in _args.items() %}<arg_key>{{ k }}</arg_key><arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>{% endfor %}</tool_call>{% endfor %}
67
+ {% endif %}
68
+ {%- elif m.role == 'tool' -%}
69
+ {%- if m.content is string -%}
70
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
71
+ {{- '<|observation|>' }}
72
+ {%- endif %}
73
+ {{- '<tool_response>' }}
74
+ {{- m.content }}
75
+ {{- '</tool_response>' }}
76
+ {%- else -%}
77
+ <|observation|>{% for tr in m.content %}
78
+ <tool_response>{{ tr.output if tr.output is defined else tr }}</tool_response>{% endfor -%}
79
+ {% endif -%}
80
+ {%- elif m.role == 'system' -%}
81
+ <|system|>{{ visible_text(m.content) }}
82
+ {%- endif -%}
83
+ {%- endfor -%}
84
+ {%- if add_generation_prompt -%}
85
+ <|assistant|>{{- '</think>' if (enable_thinking is defined and not enable_thinking) else '<think>' -}}
86
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Glm4MoeLiteSCMForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map":
8
+ {
9
+ "AutoConfig": "configuration_glm4_moe_lite_scm.Glm4MoeLiteSCMConfig",
10
+ "AutoModel": "modeling_glm4_moe_lite_scm.Glm4MoeLiteSCMModel",
11
+ "AutoModelForCausalLM": "modeling_glm4_moe_lite_scm.Glm4MoeLiteSCMForCausalLM"
12
+ },
13
+ "bos_token_id": 0,
14
+ "dtype": "bfloat16",
15
+ "eos_token_id": [
16
+ 154820,
17
+ 154827,
18
+ 154829
19
+ ],
20
+ "first_k_dense_replace": 1,
21
+ "head_dim": 64,
22
+ "hidden_act": "silu",
23
+ "hidden_size": 2048,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 10240,
26
+ "kv_lora_rank": 512,
27
+ "max_position_embeddings": 202752,
28
+ "mlp_layer_types": [
29
+ "dense",
30
+ "sparse",
31
+ "sparse",
32
+ "sparse",
33
+ "sparse",
34
+ "sparse",
35
+ "sparse",
36
+ "sparse",
37
+ "sparse",
38
+ "sparse",
39
+ "sparse",
40
+ "sparse",
41
+ "sparse",
42
+ "sparse",
43
+ "sparse",
44
+ "sparse",
45
+ "sparse",
46
+ "sparse",
47
+ "sparse",
48
+ "sparse",
49
+ "sparse",
50
+ "sparse",
51
+ "sparse",
52
+ "sparse",
53
+ "sparse",
54
+ "sparse",
55
+ "sparse",
56
+ "sparse",
57
+ "sparse",
58
+ "sparse",
59
+ "sparse",
60
+ "sparse",
61
+ "sparse",
62
+ "sparse",
63
+ "sparse",
64
+ "sparse",
65
+ "sparse",
66
+ "sparse",
67
+ "sparse",
68
+ "sparse",
69
+ "sparse",
70
+ "sparse",
71
+ "sparse",
72
+ "sparse",
73
+ "sparse",
74
+ "sparse",
75
+ "sparse"
76
+ ],
77
+ "model_type": "glm4_moe_lite_scm",
78
+ "moe_intermediate_size": 1536,
79
+ "n_group": 1,
80
+ "n_routed_experts": 64,
81
+ "n_shared_experts": 1,
82
+ "norm_topk_prob": true,
83
+ "num_attention_heads": 20,
84
+ "num_experts_per_tok": 4,
85
+ "num_hidden_layers": 47,
86
+ "num_key_value_heads": 20,
87
+ "num_nextn_predict_layers": 0,
88
+ "pad_token_id": 154820,
89
+ "partial_rotary_factor": 1.0,
90
+ "pretraining_tp": 1,
91
+ "q_lora_rank": 768,
92
+ "qk_head_dim": 256,
93
+ "qk_nope_head_dim": 192,
94
+ "qk_rope_head_dim": 64,
95
+ "rms_norm_eps": 1e-05,
96
+ "rope_interleave": true,
97
+ "rope_parameters": {
98
+ "partial_rotary_factor": 1.0,
99
+ "rope_theta": 1000000,
100
+ "rope_type": "default"
101
+ },
102
+ "routed_scaling_factor": 1.8,
103
+ "tie_word_embeddings": false,
104
+ "topk_group": 1,
105
+ "topk_method": "noaux_tc",
106
+ "transformers_version": "5.0.0",
107
+ "use_cache": true,
108
+ "v_head_dim": 256,
109
+ "vocab_size": 154880
110
+ }
configuration_glm4_moe_lite_for_backconvert.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from transformers.configuration_utils import PreTrainedConfig, layer_type_validation
17
+ from transformers.modeling_rope_utils import RopeParameters
18
+
19
+
20
+ class Glm4MoeLiteConfig(PreTrainedConfig):
21
+ r"""
22
+ This is the configuration class to store the configuration of a [`Glm4MoeLiteModel`]. It is used to instantiate an DeepSeek
23
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
24
+ defaults will yield a similar configuration to that of the DeepSeek-V3.
25
+ e.g. [bzantium/tiny-deepseek-v3](https://huggingface.co/bzantium/tiny-deepseek-v3)
26
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
27
+ documentation from [`PreTrainedConfig`] for more information.
28
+
29
+
30
+ Args:
31
+ vocab_size (`int`, *optional*, defaults to 154880):
32
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
33
+ `inputs_ids` passed when calling [`Glm4MoeLiteModel`]
34
+ hidden_size (`int`, *optional*, defaults to 2048):
35
+ Dimension of the hidden representations.
36
+ intermediate_size (`int`, *optional*, defaults to 10240):
37
+ Dimension of the MLP representations.
38
+ moe_intermediate_size (`int`, *optional*, defaults to 1536):
39
+ Dimension of the MoE representations.
40
+ num_hidden_layers (`int`, *optional*, defaults to 47):
41
+ Number of hidden layers in the Transformer decoder.
42
+ num_attention_heads (`int`, *optional*, defaults to 20):
43
+ Number of attention heads for each attention layer in the Transformer decoder.
44
+ num_key_value_heads (`int`, *optional*, defaults to 20):
45
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
46
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
47
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
48
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
49
+ by meanpooling all the original heads within that group. For more details, check out [this
50
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
51
+ `num_attention_heads`.
52
+ n_shared_experts (`int`, *optional*, defaults to 1):
53
+ Number of shared experts.
54
+ n_routed_experts (`int`, *optional*, defaults to 64):
55
+ Number of routed experts.
56
+ routed_scaling_factor (`float`, *optional*, defaults to 1.8):
57
+ Scaling factor or routed experts.
58
+ kv_lora_rank (`int`, *optional*, defaults to 512):
59
+ Rank of the LoRA matrices for key and value projections.
60
+ q_lora_rank (`int`, *optional*, defaults to 768):
61
+ Rank of the LoRA matrices for query projections.
62
+ qk_rope_head_dim (`int`, *optional*, defaults to 64):
63
+ Dimension of the query/key heads that use rotary position embeddings.
64
+ v_head_dim (`int`, *optional*, defaults to 256):
65
+ Dimension of the value heads.
66
+ qk_nope_head_dim (`int`, *optional*, defaults to 192):
67
+ Dimension of the query/key heads that don't use rotary position embeddings.
68
+ n_group (`int`, *optional*, defaults to 1):
69
+ Number of groups for routed experts.
70
+ topk_group (`int`, *optional*, defaults to 1):
71
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
72
+ num_experts_per_tok (`int`, *optional*, defaults to 4):
73
+ Number of selected experts, None means dense model.
74
+ norm_topk_prob (`bool`, *optional*, defaults to `True`):
75
+ Whether to normalize the weights of the routed experts.
76
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
77
+ The non-linear activation function (function or string) in the decoder.
78
+ max_position_embeddings (`int`, *optional*, defaults to 202752):
79
+ The maximum sequence length that this model might ever be used with.
80
+ initializer_range (`float`, *optional*, defaults to 0.02):
81
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
82
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
83
+ The epsilon used by the rms normalization layers.
84
+ use_cache (`bool`, *optional*, defaults to `True`):
85
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
86
+ relevant if `config.is_decoder=True`.
87
+ pad_token_id (`int`, *optional*):
88
+ Padding token id.
89
+ bos_token_id (`int`, *optional*, defaults to 0):
90
+ Beginning of stream token id.
91
+ eos_token_id (`int`, *optional*, defaults to 1):
92
+ End of stream token id.
93
+ pretraining_tp (`int`, *optional*, defaults to 1):
94
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
95
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
96
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
97
+ issue](https://github.com/pytorch/pytorch/issues/76232).
98
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
99
+ Whether to tie weight embeddings
100
+ rope_parameters (`RopeParameters`, *optional*):
101
+ Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
102
+ a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
103
+ with longer `max_position_embeddings`.
104
+ rope_interleave (`bool`, *optional*, defaults to `True`):
105
+ Whether to interleave the rotary position embeddings.
106
+ mlp_layer_types (`list`, *optional*):
107
+ MLP (Moe vs Dense) pattern for each layer.
108
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
109
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
110
+ attention_dropout (`float`, *optional*, defaults to 0.0):
111
+ The dropout ratio for the attention probabilities.
112
+
113
+ ```python
114
+ >>> from transformers import Glm4MoeLiteModel, Glm4MoeLiteConfig
115
+
116
+ >>> # Initializing a Deepseek-V3 style configuration
117
+ >>> configuration = Glm4MoeLiteConfig()
118
+
119
+ >>> # Accessing the model configuration
120
+ >>> configuration = model.config
121
+ ```"""
122
+
123
+ model_type = "glm4_moe_lite"
124
+ keys_to_ignore_at_inference = ["past_key_values"]
125
+ base_model_tp_plan = {
126
+ "layers.*.self_attn.o_proj": "rowwise",
127
+ "layers.*.mlp.experts.gate_up_proj": "local_rowwise",
128
+ "layers.*.mlp.experts.down_proj": "local_rowwise",
129
+ "layers.*.mlp.experts": "gather",
130
+ "layers.*.mlp.gate_proj": "colwise",
131
+ "layers.*.mlp.up_proj": "colwise",
132
+ "layers.*.mlp.down_proj": "rowwise",
133
+ }
134
+ base_model_pp_plan = {
135
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
136
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
137
+ "norm": (["hidden_states"], ["hidden_states"]),
138
+ }
139
+ attribute_map = {
140
+ "num_local_experts": "n_routed_experts",
141
+ }
142
+
143
+ def __init__(
144
+ self,
145
+ vocab_size: int | None = 154880,
146
+ hidden_size: int | None = 2048,
147
+ intermediate_size: int | None = 10240,
148
+ moe_intermediate_size: int | None = 1536,
149
+ num_hidden_layers: int | None = 47,
150
+ num_attention_heads: int | None = 20,
151
+ num_key_value_heads: int | None = 20,
152
+ n_shared_experts: int | None = 1,
153
+ n_routed_experts: int | None = 64,
154
+ routed_scaling_factor: float | None = 1.8,
155
+ kv_lora_rank: int | None = 512,
156
+ q_lora_rank: int | None = 768,
157
+ qk_rope_head_dim: int | None = 64,
158
+ v_head_dim: int | None = 256,
159
+ qk_nope_head_dim: int | None = 192,
160
+ n_group: int | None = 1,
161
+ topk_group: int | None = 1,
162
+ num_experts_per_tok: int | None = 4,
163
+ norm_topk_prob: bool | None = True,
164
+ hidden_act: str | None = "silu",
165
+ max_position_embeddings: int | None = 202752,
166
+ initializer_range: float | None = 0.02,
167
+ rms_norm_eps: int | None = 1e-5,
168
+ use_cache: bool | None = True,
169
+ pad_token_id: int | None = None,
170
+ bos_token_id: int | None = 0,
171
+ eos_token_id: int | None = 1,
172
+ pretraining_tp: int | None = 1,
173
+ tie_word_embeddings: bool | None = False,
174
+ rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
175
+ rope_interleave: bool | None = True,
176
+ mlp_layer_types=None,
177
+ attention_bias: bool | None = False,
178
+ attention_dropout: float | None = 0.0,
179
+ **kwargs,
180
+ ):
181
+ self.vocab_size = vocab_size
182
+ self.max_position_embeddings = max_position_embeddings
183
+ self.hidden_size = hidden_size
184
+ self.intermediate_size = intermediate_size
185
+ self.num_hidden_layers = num_hidden_layers
186
+
187
+ # Default to MoE from the second layer and on
188
+ self.mlp_layer_types = mlp_layer_types
189
+ if self.mlp_layer_types is None:
190
+ self.mlp_layer_types = ["dense"] + ["sparse"] * (self.num_hidden_layers - 1)
191
+ layer_type_validation(self.mlp_layer_types, self.num_hidden_layers, attention=False)
192
+
193
+ self.moe_intermediate_size = moe_intermediate_size
194
+ self.num_attention_heads = num_attention_heads
195
+ self.n_shared_experts = n_shared_experts
196
+ self.n_routed_experts = n_routed_experts
197
+ self.routed_scaling_factor = routed_scaling_factor
198
+ self.kv_lora_rank = kv_lora_rank
199
+ self.q_lora_rank = q_lora_rank
200
+ self.qk_rope_head_dim = qk_rope_head_dim
201
+ self.v_head_dim = v_head_dim
202
+ self.qk_nope_head_dim = qk_nope_head_dim
203
+ self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
204
+ self.head_dim = qk_rope_head_dim
205
+ self.n_group = n_group
206
+ self.topk_group = topk_group
207
+ self.num_experts_per_tok = num_experts_per_tok
208
+ self.norm_topk_prob = norm_topk_prob
209
+ self.rope_interleave = rope_interleave
210
+ self.num_key_value_heads = num_key_value_heads
211
+ self.hidden_act = hidden_act
212
+ self.initializer_range = initializer_range
213
+ self.rms_norm_eps = rms_norm_eps
214
+ self.pretraining_tp = pretraining_tp
215
+ self.use_cache = use_cache
216
+ self.attention_bias = attention_bias
217
+ self.attention_dropout = attention_dropout
218
+ self.rope_parameters = rope_parameters
219
+
220
+ super().__init__(
221
+ pad_token_id=pad_token_id,
222
+ bos_token_id=bos_token_id,
223
+ eos_token_id=eos_token_id,
224
+ tie_word_embeddings=tie_word_embeddings,
225
+ **kwargs,
226
+ )
227
+
228
+
229
+ __all__ = ["Glm4MoeLiteConfig"]
configuration_glm4_moe_lite_scm.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from transformers.configuration_utils import PreTrainedConfig, layer_type_validation
17
+ from transformers.modeling_rope_utils import RopeParameters
18
+
19
+
20
+ class Glm4MoeLiteSCMConfig(PreTrainedConfig):
21
+ r"""
22
+ This is the configuration class to store the configuration of a [`Glm4MoeLiteModel`]. It is used to instantiate an DeepSeek
23
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
24
+ defaults will yield a similar configuration to that of the DeepSeek-V3.
25
+ e.g. [bzantium/tiny-deepseek-v3](https://huggingface.co/bzantium/tiny-deepseek-v3)
26
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
27
+ documentation from [`PreTrainedConfig`] for more information.
28
+
29
+
30
+ Args:
31
+ vocab_size (`int`, *optional*, defaults to 154880):
32
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
33
+ `inputs_ids` passed when calling [`Glm4MoeLiteModel`]
34
+ hidden_size (`int`, *optional*, defaults to 2048):
35
+ Dimension of the hidden representations.
36
+ intermediate_size (`int`, *optional*, defaults to 10240):
37
+ Dimension of the MLP representations.
38
+ moe_intermediate_size (`int`, *optional*, defaults to 1536):
39
+ Dimension of the MoE representations.
40
+ num_hidden_layers (`int`, *optional*, defaults to 47):
41
+ Number of hidden layers in the Transformer decoder.
42
+ num_attention_heads (`int`, *optional*, defaults to 20):
43
+ Number of attention heads for each attention layer in the Transformer decoder.
44
+ num_key_value_heads (`int`, *optional*, defaults to 20):
45
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
46
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
47
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
48
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
49
+ by meanpooling all the original heads within that group. For more details, check out [this
50
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
51
+ `num_attention_heads`.
52
+ n_shared_experts (`int`, *optional*, defaults to 1):
53
+ Number of shared experts.
54
+ n_routed_experts (`int`, *optional*, defaults to 64):
55
+ Number of routed experts.
56
+ routed_scaling_factor (`float`, *optional*, defaults to 1.8):
57
+ Scaling factor or routed experts.
58
+ kv_lora_rank (`int`, *optional*, defaults to 512):
59
+ Rank of the LoRA matrices for key and value projections.
60
+ q_lora_rank (`int`, *optional*, defaults to 768):
61
+ Rank of the LoRA matrices for query projections.
62
+ qk_rope_head_dim (`int`, *optional*, defaults to 64):
63
+ Dimension of the query/key heads that use rotary position embeddings.
64
+ v_head_dim (`int`, *optional*, defaults to 256):
65
+ Dimension of the value heads.
66
+ qk_nope_head_dim (`int`, *optional*, defaults to 192):
67
+ Dimension of the query/key heads that don't use rotary position embeddings.
68
+ n_group (`int`, *optional*, defaults to 1):
69
+ Number of groups for routed experts.
70
+ topk_group (`int`, *optional*, defaults to 1):
71
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
72
+ num_experts_per_tok (`int`, *optional*, defaults to 4):
73
+ Number of selected experts, None means dense model.
74
+ norm_topk_prob (`bool`, *optional*, defaults to `True`):
75
+ Whether to normalize the weights of the routed experts.
76
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
77
+ The non-linear activation function (function or string) in the decoder.
78
+ max_position_embeddings (`int`, *optional*, defaults to 202752):
79
+ The maximum sequence length that this model might ever be used with.
80
+ initializer_range (`float`, *optional*, defaults to 0.02):
81
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
82
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
83
+ The epsilon used by the rms normalization layers.
84
+ use_cache (`bool`, *optional*, defaults to `True`):
85
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
86
+ relevant if `config.is_decoder=True`.
87
+ pad_token_id (`int`, *optional*):
88
+ Padding token id.
89
+ bos_token_id (`int`, *optional*, defaults to 0):
90
+ Beginning of stream token id.
91
+ eos_token_id (`int`, *optional*, defaults to 1):
92
+ End of stream token id.
93
+ pretraining_tp (`int`, *optional*, defaults to 1):
94
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
95
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
96
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
97
+ issue](https://github.com/pytorch/pytorch/issues/76232).
98
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
99
+ Whether to tie weight embeddings
100
+ rope_parameters (`RopeParameters`, *optional*):
101
+ Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
102
+ a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
103
+ with longer `max_position_embeddings`.
104
+ rope_interleave (`bool`, *optional*, defaults to `True`):
105
+ Whether to interleave the rotary position embeddings.
106
+ mlp_layer_types (`list`, *optional*):
107
+ MLP (Moe vs Dense) pattern for each layer.
108
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
109
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
110
+ attention_dropout (`float`, *optional*, defaults to 0.0):
111
+ The dropout ratio for the attention probabilities.
112
+
113
+ ```python
114
+ >>> from transformers import Glm4MoeLiteModel, Glm4MoeLiteConfig
115
+
116
+ >>> # Initializing a Deepseek-V3 style configuration
117
+ >>> configuration = Glm4MoeLiteConfig()
118
+
119
+ >>> # Accessing the model configuration
120
+ >>> configuration = model.config
121
+ ```"""
122
+
123
+ model_type = "glm4_moe_lite_scm"
124
+ keys_to_ignore_at_inference = ["past_key_values"]
125
+ base_model_tp_plan = {
126
+ "layers.*.self_attn.o_proj": "rowwise",
127
+ "layers.*.mlp.experts.gate_up_proj": "local_rowwise",
128
+ "layers.*.mlp.experts.down_proj": "local_rowwise",
129
+ "layers.*.mlp.experts": "gather",
130
+ "layers.*.mlp.gate_proj": "colwise",
131
+ "layers.*.mlp.up_proj": "colwise",
132
+ "layers.*.mlp.down_proj": "rowwise",
133
+ }
134
+ base_model_pp_plan = {
135
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
136
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
137
+ "norm": (["hidden_states"], ["hidden_states"]),
138
+ }
139
+ attribute_map = {
140
+ "num_local_experts": "n_routed_experts",
141
+ }
142
+
143
+ def __init__(
144
+ self,
145
+ vocab_size: int | None = 154880,
146
+ hidden_size: int | None = 2048,
147
+ intermediate_size: int | None = 10240,
148
+ moe_intermediate_size: int | None = 1536,
149
+ num_hidden_layers: int | None = 47,
150
+ num_attention_heads: int | None = 20,
151
+ num_key_value_heads: int | None = 20,
152
+ n_shared_experts: int | None = 1,
153
+ n_routed_experts: int | None = 64,
154
+ routed_scaling_factor: float | None = 1.8,
155
+ kv_lora_rank: int | None = 512,
156
+ q_lora_rank: int | None = 768,
157
+ qk_rope_head_dim: int | None = 64,
158
+ v_head_dim: int | None = 256,
159
+ qk_nope_head_dim: int | None = 192,
160
+ n_group: int | None = 1,
161
+ topk_group: int | None = 1,
162
+ num_experts_per_tok: int | None = 4,
163
+ norm_topk_prob: bool | None = True,
164
+ hidden_act: str | None = "silu",
165
+ max_position_embeddings: int | None = 202752,
166
+ initializer_range: float | None = 0.02,
167
+ rms_norm_eps: int | None = 1e-5,
168
+ use_cache: bool | None = True,
169
+ pad_token_id: int | None = None,
170
+ bos_token_id: int | None = 0,
171
+ eos_token_id: int | None = 1,
172
+ pretraining_tp: int | None = 1,
173
+ tie_word_embeddings: bool | None = False,
174
+ rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
175
+ rope_interleave: bool | None = True,
176
+ mlp_layer_types=None,
177
+ attention_bias: bool | None = False,
178
+ attention_dropout: float | None = 0.0,
179
+ **kwargs,
180
+ ):
181
+ self.vocab_size = vocab_size
182
+ self.max_position_embeddings = max_position_embeddings
183
+ self.hidden_size = hidden_size
184
+ self.intermediate_size = intermediate_size
185
+ self.num_hidden_layers = num_hidden_layers
186
+
187
+ # Default to MoE from the second layer and on
188
+ self.mlp_layer_types = mlp_layer_types
189
+ if self.mlp_layer_types is None:
190
+ self.mlp_layer_types = ["dense"] + ["sparse"] * (self.num_hidden_layers - 1)
191
+ layer_type_validation(self.mlp_layer_types, self.num_hidden_layers, attention=False)
192
+
193
+ self.moe_intermediate_size = moe_intermediate_size
194
+ self.num_attention_heads = num_attention_heads
195
+ self.n_shared_experts = n_shared_experts
196
+ self.n_routed_experts = n_routed_experts
197
+ self.routed_scaling_factor = routed_scaling_factor
198
+ self.kv_lora_rank = kv_lora_rank
199
+ self.q_lora_rank = q_lora_rank
200
+ self.qk_rope_head_dim = qk_rope_head_dim
201
+ self.v_head_dim = v_head_dim
202
+ self.qk_nope_head_dim = qk_nope_head_dim
203
+ self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
204
+ self.head_dim = qk_rope_head_dim
205
+ self.n_group = n_group
206
+ self.topk_group = topk_group
207
+ self.num_experts_per_tok = num_experts_per_tok
208
+ self.norm_topk_prob = norm_topk_prob
209
+ self.rope_interleave = rope_interleave
210
+ self.num_key_value_heads = num_key_value_heads
211
+ self.hidden_act = hidden_act
212
+ self.initializer_range = initializer_range
213
+ self.rms_norm_eps = rms_norm_eps
214
+ self.pretraining_tp = pretraining_tp
215
+ self.use_cache = use_cache
216
+ self.attention_bias = attention_bias
217
+ self.attention_dropout = attention_dropout
218
+ self.rope_parameters = rope_parameters
219
+
220
+ super().__init__(
221
+ pad_token_id=pad_token_id,
222
+ bos_token_id=bos_token_id,
223
+ eos_token_id=eos_token_id,
224
+ tie_word_embeddings=tie_word_embeddings,
225
+ **kwargs,
226
+ )
227
+
228
+
229
+ __all__ = ["Glm4MoeLiteSCMConfig"]
convert_hf_to_scm.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import re
3
+ import shutil
4
+ import sys
5
+
6
+ import accelerate
7
+ import torch
8
+ from configuration_glm4_moe_lite_scm import Glm4MoeLiteSCMConfig
9
+ from modeling_glm4_moe_lite_scm import Glm4MoeLiteSCMForCausalLM
10
+ from transformers.models.glm4_moe_lite.configuration_glm4_moe_lite import Glm4MoeLiteConfig
11
+ from safetensors import safe_open
12
+
13
+ input_model = sys.argv[1]
14
+ output_model_path = sys.argv[2]
15
+
16
+ auto_map = {
17
+ "AutoConfig": "configuration_glm4_moe_lite_scm.Glm4MoeLiteSCMConfig",
18
+ "AutoModel": "modeling_glm4_moe_lite_scm.Glm4MoeLiteSCMModel",
19
+ "AutoModelForCausalLM": "modeling_glm4_moe_lite_scm.Glm4MoeLiteSCMForCausalLM"
20
+ },
21
+
22
+ cfg_standard_moe = Glm4MoeLiteConfig.from_pretrained(input_model)
23
+ cfg_shared_moe = Glm4MoeLiteSCMConfig(
24
+ auto_map=auto_map,
25
+ n_group=cfg_standard_moe.n_group,
26
+ topk_group=cfg_standard_moe.topk_group,
27
+ n_shared_experts=cfg_standard_moe.n_shared_experts,
28
+ n_routed_experts=cfg_standard_moe.n_routed_experts,
29
+ num_experts_per_tok=cfg_standard_moe.num_experts_per_tok,
30
+ first_k_dense_replace=cfg_standard_moe.first_k_dense_replace,
31
+ vocab_size=cfg_standard_moe.vocab_size,
32
+ hidden_size=cfg_standard_moe.hidden_size,
33
+ intermediate_size=cfg_standard_moe.intermediate_size,
34
+ num_hidden_layers=cfg_standard_moe.num_hidden_layers,
35
+ num_attention_heads=cfg_standard_moe.num_attention_heads,
36
+ num_key_value_heads=cfg_standard_moe.num_key_value_heads,
37
+ hidden_act=cfg_standard_moe.hidden_act,
38
+ max_position_embeddings=cfg_standard_moe.max_position_embeddings,
39
+ initializer_range=cfg_standard_moe.initializer_range,
40
+ rms_norm_eps=cfg_standard_moe.rms_norm_eps,
41
+ tie_word_embeddings=cfg_standard_moe.tie_word_embeddings,
42
+ rope_parameters=cfg_standard_moe.rope_parameters,
43
+ rope_scaling=cfg_standard_moe.rope_scaling,
44
+ attention_dropout=cfg_standard_moe.attention_dropout,
45
+ moe_intermediate_size=cfg_standard_moe.moe_intermediate_size,
46
+ qk_nope_head_dim=cfg_standard_moe.qk_nope_head_dim,
47
+ qk_rope_head_dim=cfg_standard_moe.qk_rope_head_dim,
48
+ v_head_dim=cfg_standard_moe.v_head_dim,
49
+ partial_rotary_factor=cfg_standard_moe.partial_rotary_factor,
50
+ num_nextn_predict_layers=0,
51
+ routed_scaling_factor=cfg_standard_moe.routed_scaling_factor,
52
+ topk_method=cfg_standard_moe.topk_method,
53
+ norm_topk_prob=cfg_standard_moe.norm_topk_prob,
54
+ attention_bias=cfg_standard_moe.attention_bias,
55
+ q_lora_rank=cfg_standard_moe.q_lora_rank,
56
+ kv_lora_rank=cfg_standard_moe.kv_lora_rank,
57
+ eos_token_id=cfg_standard_moe.eos_token_id,
58
+ pad_token_id=cfg_standard_moe.pad_token_id,
59
+ torch_dtype=cfg_standard_moe.torch_dtype,
60
+ )
61
+
62
+ num_experts = cfg_standard_moe.n_routed_experts
63
+ num_hidden_layers = cfg_standard_moe.num_hidden_layers
64
+
65
+ with accelerate.init_empty_weights():
66
+ model_shared_moe = Glm4MoeLiteSCMForCausalLM(cfg_shared_moe)
67
+
68
+ model_shared_moe = model_shared_moe.to(torch.bfloat16)
69
+ new_state_dict = {}
70
+ pattern = f"{input_model}/model-*-of-*.safetensors"
71
+ files = sorted(glob.glob(pattern))
72
+
73
+ if len(files) == 0:
74
+ raise FileNotFoundError
75
+ tensors = {}
76
+
77
+ for file_path in files:
78
+ print(f"processing {file_path}")
79
+ with safe_open(file_path, framework="pt", device="cpu") as f:
80
+ for key in f.keys():
81
+ tensor = f.get_tensor(key)
82
+ tensors[key] = tensor
83
+
84
+ for key in tensors:
85
+ if f"layers.{num_hidden_layers}" in key:
86
+ continue
87
+ if "experts" not in key or "shared_experts" in key:
88
+ new_state_dict[key] = tensors[key]
89
+ elif "experts.0" in key:
90
+ layer_num = int(re.search(r"\d+", key).group())
91
+ new_state_dict[
92
+ f"model.layers.{layer_num}.mlp.moe_mlp.output_experts.weight"
93
+ ] = torch.stack(
94
+ [
95
+ tensors[f"model.layers.{layer_num}.mlp.experts.{i}.down_proj.weight"]
96
+ for i in range(num_experts)
97
+ ]
98
+ )
99
+ new_state_dict[f"model.layers.{layer_num}.mlp.moe_mlp.experts.weight"] = (
100
+ torch.stack(
101
+ [
102
+ torch.cat(
103
+ [
104
+ tensors[
105
+ f"model.layers.{layer_num}.mlp.experts.{i}.up_proj.weight"
106
+ ],
107
+ tensors[
108
+ f"model.layers.{layer_num}.mlp.experts.{i}.gate_proj.weight"
109
+ ],
110
+ ],
111
+ dim=0,
112
+ )
113
+ for i in range(num_experts)
114
+ ]
115
+ )
116
+ )
117
+ model_shared_moe.load_state_dict(new_state_dict, strict=True, assign=True)
118
+ model_shared_moe.save_pretrained(output_model_path)
119
+ cfg_shared_moe.save_pretrained(output_model_path)
120
+
121
+
122
+ shutil.copy(
123
+ "modeling_glm4_moe_lite_scm.py",
124
+ output_model_path + "/" + "modeling_glm4_moe_lite_scm.py",
125
+ )
126
+ shutil.copy(
127
+ "configuration_glm4_moe_lite_scm.py",
128
+ output_model_path + "/" + "configuration_glm4_moe_lite_scm.py",
129
+ )
130
+ for i in ["tokenizer_config.json", "tokenizer.json", "chat_template.jinja"]:
131
+ shutil.copy(input_model + "/" + i, output_model_path + "/" + i)
convert_scm_to_hf.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import re
3
+ import shutil
4
+ import sys
5
+
6
+ import accelerate
7
+ import torch
8
+ from safetensors import safe_open
9
+ from configuration_glm4_moe_lite_scm import Glm4MoeLiteSCMConfig
10
+
11
+ from modeling_glm4_moe_lite_for_backconvert import Glm4MoeLiteForCausalLM
12
+ from configuration_glm4_moe_lite_for_backconvert import Glm4MoeLiteConfig
13
+
14
+ input_model = sys.argv[1]
15
+ output_model_path = sys.argv[2]
16
+
17
+ cfg_shared_moe = Glm4MoeLiteSCMConfig.from_pretrained(input_model)
18
+ cfg_standard_moe = Glm4MoeLiteConfig(
19
+ n_group=cfg_shared_moe.n_group,
20
+ topk_group=cfg_shared_moe.topk_group,
21
+ n_shared_experts=cfg_shared_moe.n_shared_experts,
22
+ n_routed_experts=cfg_shared_moe.n_routed_experts,
23
+ num_experts_per_tok=cfg_shared_moe.num_experts_per_tok,
24
+ first_k_dense_replace=cfg_shared_moe.first_k_dense_replace,
25
+ vocab_size=cfg_shared_moe.vocab_size,
26
+ hidden_size=cfg_shared_moe.hidden_size,
27
+ intermediate_size=cfg_shared_moe.intermediate_size,
28
+ num_hidden_layers=cfg_shared_moe.num_hidden_layers,
29
+ num_attention_heads=cfg_shared_moe.num_attention_heads,
30
+ num_key_value_heads=cfg_shared_moe.num_key_value_heads,
31
+ hidden_act=cfg_shared_moe.hidden_act,
32
+ max_position_embeddings=cfg_shared_moe.max_position_embeddings,
33
+ initializer_range=cfg_shared_moe.initializer_range,
34
+ rms_norm_eps=cfg_shared_moe.rms_norm_eps,
35
+ tie_word_embeddings=cfg_shared_moe.tie_word_embeddings,
36
+ rope_parameters=cfg_shared_moe.rope_parameters,
37
+ rope_scaling=cfg_shared_moe.rope_scaling,
38
+ attention_dropout=cfg_shared_moe.attention_dropout,
39
+ moe_intermediate_size=cfg_shared_moe.moe_intermediate_size,
40
+ qk_nope_head_dim=cfg_shared_moe.qk_nope_head_dim,
41
+ qk_rope_head_dim=cfg_shared_moe.qk_rope_head_dim,
42
+ v_head_dim=cfg_shared_moe.v_head_dim,
43
+ partial_rotary_factor=cfg_shared_moe.partial_rotary_factor,
44
+ num_nextn_predict_layers=0,
45
+ routed_scaling_factor=cfg_shared_moe.routed_scaling_factor,
46
+ topk_method=cfg_shared_moe.topk_method,
47
+ norm_topk_prob=cfg_shared_moe.norm_topk_prob,
48
+ attention_bias=cfg_shared_moe.attention_bias,
49
+ q_lora_rank=cfg_shared_moe.q_lora_rank,
50
+ kv_lora_rank=cfg_shared_moe.kv_lora_rank,
51
+ eos_token_id=cfg_shared_moe.eos_token_id,
52
+ pad_token_id=cfg_shared_moe.pad_token_id,
53
+ torch_dtype=cfg_shared_moe.torch_dtype,
54
+ )
55
+ num_experts = cfg_standard_moe.n_shared_experts
56
+
57
+ with accelerate.init_empty_weights():
58
+ model_standard_moe = Glm4MoeLiteForCausalLM(cfg_standard_moe)
59
+
60
+ model_standard_moe = model_standard_moe.to(torch.bfloat16)
61
+ new_state_dict = {}
62
+ pattern = f"{input_model}/model-*-of-*.safetensors"
63
+ files = sorted(glob.glob(pattern))
64
+
65
+ if len(files) == 0:
66
+ raise FileNotFoundError
67
+ tensors = {}
68
+
69
+ for file_path in files:
70
+ print(f"processing {file_path}")
71
+ with safe_open(file_path, framework="pt", device="cpu") as f:
72
+ for key in f.keys():
73
+ tensor = f.get_tensor(key)
74
+ tensors[key] = tensor
75
+
76
+ for key in tensors:
77
+ if "moe_mlp" not in key:
78
+ new_state_dict[key] = tensors[key]
79
+ elif "moe_mlp.output_experts" in key:
80
+ layer_num = int(re.search(r"\d+", key).group())
81
+ for i, tensor in enumerate(torch.unbind(tensors[key])):
82
+ new_state_dict[
83
+ f"model.layers.{layer_num}.mlp.experts.{i}.down_proj.weight"
84
+ ] = tensor.contiguous()
85
+ elif "moe_mlp.experts" in key:
86
+ layer_num = int(re.search(r"\d+", key).group())
87
+ for i, tensor in enumerate(torch.unbind(tensors[key])):
88
+ (
89
+ new_state_dict[
90
+ f"model.layers.{layer_num}.mlp.experts.{i}.up_proj.weight"
91
+ ],
92
+ new_state_dict[
93
+ f"model.layers.{layer_num}.mlp.experts.{i}.gate_proj.weight"
94
+ ],
95
+ ) = torch.chunk(tensor, 2, dim=0)
96
+
97
+
98
+ model_standard_moe.load_state_dict(new_state_dict, strict=True, assign=True)
99
+ model_standard_moe.save_pretrained(output_model_path)
100
+ cfg_standard_moe.save_pretrained(output_model_path)
101
+
102
+ for i in ["tokenizer_config.json", "tokenizer.json", "chat_template.jinja"]:
103
+ shutil.copy(input_model + "/" + i, output_model_path + "/" + i)
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": [
5
+ 154820,
6
+ 154827,
7
+ 154829
8
+ ],
9
+ "output_attentions": false,
10
+ "output_hidden_states": false,
11
+ "pad_token_id": 154820,
12
+ "transformers_version": "5.0.0",
13
+ "use_cache": true
14
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:098d0ed1cd2b8179767c229c863f8ec71bfbc568765c24873c45969e165d3cdf
3
+ size 49936318984
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a1a35aabc923ea036b25e8026d5278ed305521132273eefea9c1703082153e2
3
+ size 9950566272
model.safetensors.index.json ADDED
@@ -0,0 +1,759 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 29943390976,
4
+ "total_size": 59886793728
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00002-of-00002.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.0.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.0.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.0.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.0.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.0.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.0.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.1.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
23
+ "model.layers.1.mlp.gate.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.1.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.1.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.1.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.1.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.1.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.1.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.1.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.1.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.1.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.1.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.1.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.10.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
39
+ "model.layers.10.mlp.gate.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.10.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.10.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.10.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.10.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.10.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.10.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.10.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.10.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.10.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.10.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.10.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.11.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
55
+ "model.layers.11.mlp.gate.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.11.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.11.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.11.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.11.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.11.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.11.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.11.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.11.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.11.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.11.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.11.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.12.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
71
+ "model.layers.12.mlp.gate.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.12.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.12.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.12.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.12.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.12.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.12.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.12.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.12.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.12.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.12.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.12.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.13.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
87
+ "model.layers.13.mlp.gate.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.13.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.13.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.13.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.13.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.13.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
93
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
94
+ "model.layers.13.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
95
+ "model.layers.13.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
96
+ "model.layers.13.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.layers.13.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
99
+ "model.layers.13.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
100
+ "model.layers.13.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
101
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
102
+ "model.layers.14.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
103
+ "model.layers.14.mlp.gate.weight": "model-00001-of-00002.safetensors",
104
+ "model.layers.14.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
105
+ "model.layers.14.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
106
+ "model.layers.14.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
107
+ "model.layers.14.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
108
+ "model.layers.14.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
110
+ "model.layers.14.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
111
+ "model.layers.14.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
112
+ "model.layers.14.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.layers.14.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
115
+ "model.layers.14.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.layers.14.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
117
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
118
+ "model.layers.15.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
119
+ "model.layers.15.mlp.gate.weight": "model-00001-of-00002.safetensors",
120
+ "model.layers.15.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
121
+ "model.layers.15.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
122
+ "model.layers.15.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
123
+ "model.layers.15.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
124
+ "model.layers.15.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
126
+ "model.layers.15.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
127
+ "model.layers.15.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
128
+ "model.layers.15.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
129
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
130
+ "model.layers.15.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
131
+ "model.layers.15.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.layers.15.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
133
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
134
+ "model.layers.16.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
135
+ "model.layers.16.mlp.gate.weight": "model-00001-of-00002.safetensors",
136
+ "model.layers.16.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
137
+ "model.layers.16.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
138
+ "model.layers.16.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
139
+ "model.layers.16.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
140
+ "model.layers.16.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
141
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
142
+ "model.layers.16.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
143
+ "model.layers.16.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
144
+ "model.layers.16.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
145
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
146
+ "model.layers.16.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.16.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.16.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.17.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
151
+ "model.layers.17.mlp.gate.weight": "model-00001-of-00002.safetensors",
152
+ "model.layers.17.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
153
+ "model.layers.17.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
154
+ "model.layers.17.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
155
+ "model.layers.17.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
156
+ "model.layers.17.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
157
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
158
+ "model.layers.17.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
159
+ "model.layers.17.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
160
+ "model.layers.17.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
161
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
162
+ "model.layers.17.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
163
+ "model.layers.17.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
164
+ "model.layers.17.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
165
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
166
+ "model.layers.18.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
167
+ "model.layers.18.mlp.gate.weight": "model-00001-of-00002.safetensors",
168
+ "model.layers.18.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
169
+ "model.layers.18.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
170
+ "model.layers.18.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
171
+ "model.layers.18.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.layers.18.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
174
+ "model.layers.18.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
175
+ "model.layers.18.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
176
+ "model.layers.18.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
177
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
178
+ "model.layers.18.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
179
+ "model.layers.18.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
180
+ "model.layers.18.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
182
+ "model.layers.19.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
183
+ "model.layers.19.mlp.gate.weight": "model-00001-of-00002.safetensors",
184
+ "model.layers.19.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
185
+ "model.layers.19.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
186
+ "model.layers.19.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
187
+ "model.layers.19.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
188
+ "model.layers.19.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
189
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
190
+ "model.layers.19.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
191
+ "model.layers.19.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
192
+ "model.layers.19.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
193
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
194
+ "model.layers.19.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
195
+ "model.layers.19.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
196
+ "model.layers.19.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
197
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
198
+ "model.layers.2.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
199
+ "model.layers.2.mlp.gate.weight": "model-00001-of-00002.safetensors",
200
+ "model.layers.2.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
201
+ "model.layers.2.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
202
+ "model.layers.2.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
203
+ "model.layers.2.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
204
+ "model.layers.2.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
205
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
206
+ "model.layers.2.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
207
+ "model.layers.2.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
208
+ "model.layers.2.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
209
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
210
+ "model.layers.2.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
211
+ "model.layers.2.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
212
+ "model.layers.2.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
213
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
214
+ "model.layers.20.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
215
+ "model.layers.20.mlp.gate.weight": "model-00001-of-00002.safetensors",
216
+ "model.layers.20.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
217
+ "model.layers.20.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
218
+ "model.layers.20.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
219
+ "model.layers.20.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
220
+ "model.layers.20.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
221
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
222
+ "model.layers.20.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
223
+ "model.layers.20.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
224
+ "model.layers.20.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
225
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
226
+ "model.layers.20.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
227
+ "model.layers.20.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
228
+ "model.layers.20.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
229
+ "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
230
+ "model.layers.21.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
231
+ "model.layers.21.mlp.gate.weight": "model-00001-of-00002.safetensors",
232
+ "model.layers.21.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
233
+ "model.layers.21.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
234
+ "model.layers.21.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
235
+ "model.layers.21.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
236
+ "model.layers.21.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
237
+ "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
238
+ "model.layers.21.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
239
+ "model.layers.21.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
240
+ "model.layers.21.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
241
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
242
+ "model.layers.21.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
243
+ "model.layers.21.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
244
+ "model.layers.21.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
245
+ "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
246
+ "model.layers.22.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
247
+ "model.layers.22.mlp.gate.weight": "model-00001-of-00002.safetensors",
248
+ "model.layers.22.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
249
+ "model.layers.22.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
250
+ "model.layers.22.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
251
+ "model.layers.22.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
252
+ "model.layers.22.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
253
+ "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
254
+ "model.layers.22.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
255
+ "model.layers.22.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
256
+ "model.layers.22.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
257
+ "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
258
+ "model.layers.22.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
259
+ "model.layers.22.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
260
+ "model.layers.22.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
261
+ "model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
262
+ "model.layers.23.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
263
+ "model.layers.23.mlp.gate.weight": "model-00001-of-00002.safetensors",
264
+ "model.layers.23.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
265
+ "model.layers.23.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
266
+ "model.layers.23.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
267
+ "model.layers.23.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
268
+ "model.layers.23.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
269
+ "model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
270
+ "model.layers.23.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
271
+ "model.layers.23.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
272
+ "model.layers.23.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
273
+ "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
274
+ "model.layers.23.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
275
+ "model.layers.23.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
276
+ "model.layers.23.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
277
+ "model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors",
278
+ "model.layers.24.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
279
+ "model.layers.24.mlp.gate.weight": "model-00001-of-00002.safetensors",
280
+ "model.layers.24.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
281
+ "model.layers.24.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
282
+ "model.layers.24.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
283
+ "model.layers.24.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
284
+ "model.layers.24.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
285
+ "model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
286
+ "model.layers.24.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
287
+ "model.layers.24.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
288
+ "model.layers.24.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
289
+ "model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
290
+ "model.layers.24.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
291
+ "model.layers.24.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
292
+ "model.layers.24.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
293
+ "model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors",
294
+ "model.layers.25.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
295
+ "model.layers.25.mlp.gate.weight": "model-00001-of-00002.safetensors",
296
+ "model.layers.25.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
297
+ "model.layers.25.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
298
+ "model.layers.25.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
299
+ "model.layers.25.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
300
+ "model.layers.25.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
301
+ "model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
302
+ "model.layers.25.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
303
+ "model.layers.25.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
304
+ "model.layers.25.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
305
+ "model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
306
+ "model.layers.25.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
307
+ "model.layers.25.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
308
+ "model.layers.25.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
309
+ "model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors",
310
+ "model.layers.26.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
311
+ "model.layers.26.mlp.gate.weight": "model-00001-of-00002.safetensors",
312
+ "model.layers.26.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
313
+ "model.layers.26.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
314
+ "model.layers.26.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
315
+ "model.layers.26.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
316
+ "model.layers.26.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
317
+ "model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
318
+ "model.layers.26.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
319
+ "model.layers.26.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
320
+ "model.layers.26.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
321
+ "model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
322
+ "model.layers.26.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
323
+ "model.layers.26.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
324
+ "model.layers.26.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
325
+ "model.layers.27.input_layernorm.weight": "model-00001-of-00002.safetensors",
326
+ "model.layers.27.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
327
+ "model.layers.27.mlp.gate.weight": "model-00001-of-00002.safetensors",
328
+ "model.layers.27.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
329
+ "model.layers.27.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
330
+ "model.layers.27.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
331
+ "model.layers.27.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
332
+ "model.layers.27.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
333
+ "model.layers.27.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
334
+ "model.layers.27.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
335
+ "model.layers.27.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
336
+ "model.layers.27.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
337
+ "model.layers.27.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
338
+ "model.layers.27.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
339
+ "model.layers.27.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
340
+ "model.layers.27.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
341
+ "model.layers.28.input_layernorm.weight": "model-00001-of-00002.safetensors",
342
+ "model.layers.28.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
343
+ "model.layers.28.mlp.gate.weight": "model-00001-of-00002.safetensors",
344
+ "model.layers.28.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
345
+ "model.layers.28.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
346
+ "model.layers.28.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
347
+ "model.layers.28.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
348
+ "model.layers.28.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
349
+ "model.layers.28.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
350
+ "model.layers.28.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
351
+ "model.layers.28.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
352
+ "model.layers.28.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
353
+ "model.layers.28.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
354
+ "model.layers.28.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
355
+ "model.layers.28.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
356
+ "model.layers.28.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
357
+ "model.layers.29.input_layernorm.weight": "model-00001-of-00002.safetensors",
358
+ "model.layers.29.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
359
+ "model.layers.29.mlp.gate.weight": "model-00001-of-00002.safetensors",
360
+ "model.layers.29.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
361
+ "model.layers.29.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
362
+ "model.layers.29.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
363
+ "model.layers.29.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
364
+ "model.layers.29.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
365
+ "model.layers.29.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
366
+ "model.layers.29.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
367
+ "model.layers.29.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
368
+ "model.layers.29.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
369
+ "model.layers.29.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
370
+ "model.layers.29.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
371
+ "model.layers.29.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
372
+ "model.layers.29.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
373
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
374
+ "model.layers.3.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
375
+ "model.layers.3.mlp.gate.weight": "model-00001-of-00002.safetensors",
376
+ "model.layers.3.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
377
+ "model.layers.3.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
378
+ "model.layers.3.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
379
+ "model.layers.3.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
380
+ "model.layers.3.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
381
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
382
+ "model.layers.3.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
383
+ "model.layers.3.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
384
+ "model.layers.3.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
385
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
386
+ "model.layers.3.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
387
+ "model.layers.3.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
388
+ "model.layers.3.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
389
+ "model.layers.30.input_layernorm.weight": "model-00001-of-00002.safetensors",
390
+ "model.layers.30.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
391
+ "model.layers.30.mlp.gate.weight": "model-00001-of-00002.safetensors",
392
+ "model.layers.30.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
393
+ "model.layers.30.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
394
+ "model.layers.30.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
395
+ "model.layers.30.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
396
+ "model.layers.30.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
397
+ "model.layers.30.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
398
+ "model.layers.30.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
399
+ "model.layers.30.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
400
+ "model.layers.30.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
401
+ "model.layers.30.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
402
+ "model.layers.30.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
403
+ "model.layers.30.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
404
+ "model.layers.30.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
405
+ "model.layers.31.input_layernorm.weight": "model-00001-of-00002.safetensors",
406
+ "model.layers.31.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
407
+ "model.layers.31.mlp.gate.weight": "model-00001-of-00002.safetensors",
408
+ "model.layers.31.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
409
+ "model.layers.31.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
410
+ "model.layers.31.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
411
+ "model.layers.31.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
412
+ "model.layers.31.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
413
+ "model.layers.31.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
414
+ "model.layers.31.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
415
+ "model.layers.31.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
416
+ "model.layers.31.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
417
+ "model.layers.31.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
418
+ "model.layers.31.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
419
+ "model.layers.31.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
420
+ "model.layers.31.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
421
+ "model.layers.32.input_layernorm.weight": "model-00001-of-00002.safetensors",
422
+ "model.layers.32.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
423
+ "model.layers.32.mlp.gate.weight": "model-00001-of-00002.safetensors",
424
+ "model.layers.32.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
425
+ "model.layers.32.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
426
+ "model.layers.32.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
427
+ "model.layers.32.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
428
+ "model.layers.32.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
429
+ "model.layers.32.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
430
+ "model.layers.32.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
431
+ "model.layers.32.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
432
+ "model.layers.32.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
433
+ "model.layers.32.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
434
+ "model.layers.32.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
435
+ "model.layers.32.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
436
+ "model.layers.32.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
437
+ "model.layers.33.input_layernorm.weight": "model-00001-of-00002.safetensors",
438
+ "model.layers.33.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
439
+ "model.layers.33.mlp.gate.weight": "model-00001-of-00002.safetensors",
440
+ "model.layers.33.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
441
+ "model.layers.33.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
442
+ "model.layers.33.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
443
+ "model.layers.33.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
444
+ "model.layers.33.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
445
+ "model.layers.33.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
446
+ "model.layers.33.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
447
+ "model.layers.33.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
448
+ "model.layers.33.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
449
+ "model.layers.33.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
450
+ "model.layers.33.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
451
+ "model.layers.33.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
452
+ "model.layers.33.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
453
+ "model.layers.34.input_layernorm.weight": "model-00001-of-00002.safetensors",
454
+ "model.layers.34.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
455
+ "model.layers.34.mlp.gate.weight": "model-00001-of-00002.safetensors",
456
+ "model.layers.34.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
457
+ "model.layers.34.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
458
+ "model.layers.34.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
459
+ "model.layers.34.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
460
+ "model.layers.34.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
461
+ "model.layers.34.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
462
+ "model.layers.34.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
463
+ "model.layers.34.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
464
+ "model.layers.34.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
465
+ "model.layers.34.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
466
+ "model.layers.34.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
467
+ "model.layers.34.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
468
+ "model.layers.34.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
469
+ "model.layers.35.input_layernorm.weight": "model-00001-of-00002.safetensors",
470
+ "model.layers.35.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
471
+ "model.layers.35.mlp.gate.weight": "model-00001-of-00002.safetensors",
472
+ "model.layers.35.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
473
+ "model.layers.35.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
474
+ "model.layers.35.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
475
+ "model.layers.35.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
476
+ "model.layers.35.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
477
+ "model.layers.35.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
478
+ "model.layers.35.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
479
+ "model.layers.35.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
480
+ "model.layers.35.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
481
+ "model.layers.35.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
482
+ "model.layers.35.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
483
+ "model.layers.35.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
484
+ "model.layers.35.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
485
+ "model.layers.36.input_layernorm.weight": "model-00001-of-00002.safetensors",
486
+ "model.layers.36.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
487
+ "model.layers.36.mlp.gate.weight": "model-00001-of-00002.safetensors",
488
+ "model.layers.36.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
489
+ "model.layers.36.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
490
+ "model.layers.36.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
491
+ "model.layers.36.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
492
+ "model.layers.36.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
493
+ "model.layers.36.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
494
+ "model.layers.36.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
495
+ "model.layers.36.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
496
+ "model.layers.36.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
497
+ "model.layers.36.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
498
+ "model.layers.36.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
499
+ "model.layers.36.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
500
+ "model.layers.36.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
501
+ "model.layers.37.input_layernorm.weight": "model-00001-of-00002.safetensors",
502
+ "model.layers.37.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
503
+ "model.layers.37.mlp.gate.weight": "model-00001-of-00002.safetensors",
504
+ "model.layers.37.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
505
+ "model.layers.37.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
506
+ "model.layers.37.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
507
+ "model.layers.37.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
508
+ "model.layers.37.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
509
+ "model.layers.37.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
510
+ "model.layers.37.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
511
+ "model.layers.37.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
512
+ "model.layers.37.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
513
+ "model.layers.37.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
514
+ "model.layers.37.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
515
+ "model.layers.37.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
516
+ "model.layers.37.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
517
+ "model.layers.38.input_layernorm.weight": "model-00001-of-00002.safetensors",
518
+ "model.layers.38.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
519
+ "model.layers.38.mlp.gate.weight": "model-00001-of-00002.safetensors",
520
+ "model.layers.38.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
521
+ "model.layers.38.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
522
+ "model.layers.38.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
523
+ "model.layers.38.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
524
+ "model.layers.38.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
525
+ "model.layers.38.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
526
+ "model.layers.38.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
527
+ "model.layers.38.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
528
+ "model.layers.38.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
529
+ "model.layers.38.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
530
+ "model.layers.38.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
531
+ "model.layers.38.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
532
+ "model.layers.38.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
533
+ "model.layers.39.input_layernorm.weight": "model-00002-of-00002.safetensors",
534
+ "model.layers.39.mlp.gate.e_score_correction_bias": "model-00002-of-00002.safetensors",
535
+ "model.layers.39.mlp.gate.weight": "model-00002-of-00002.safetensors",
536
+ "model.layers.39.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
537
+ "model.layers.39.mlp.moe_mlp.output_experts.weight": "model-00002-of-00002.safetensors",
538
+ "model.layers.39.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
539
+ "model.layers.39.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
540
+ "model.layers.39.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
541
+ "model.layers.39.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
542
+ "model.layers.39.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
543
+ "model.layers.39.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
544
+ "model.layers.39.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
545
+ "model.layers.39.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
546
+ "model.layers.39.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
547
+ "model.layers.39.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
548
+ "model.layers.39.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
549
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
550
+ "model.layers.4.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
551
+ "model.layers.4.mlp.gate.weight": "model-00001-of-00002.safetensors",
552
+ "model.layers.4.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
553
+ "model.layers.4.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
554
+ "model.layers.4.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
555
+ "model.layers.4.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
556
+ "model.layers.4.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
557
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
558
+ "model.layers.4.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
559
+ "model.layers.4.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
560
+ "model.layers.4.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
561
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
562
+ "model.layers.4.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
563
+ "model.layers.4.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
564
+ "model.layers.4.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
565
+ "model.layers.40.input_layernorm.weight": "model-00002-of-00002.safetensors",
566
+ "model.layers.40.mlp.gate.e_score_correction_bias": "model-00002-of-00002.safetensors",
567
+ "model.layers.40.mlp.gate.weight": "model-00002-of-00002.safetensors",
568
+ "model.layers.40.mlp.moe_mlp.experts.weight": "model-00002-of-00002.safetensors",
569
+ "model.layers.40.mlp.moe_mlp.output_experts.weight": "model-00002-of-00002.safetensors",
570
+ "model.layers.40.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
571
+ "model.layers.40.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
572
+ "model.layers.40.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
573
+ "model.layers.40.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
574
+ "model.layers.40.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
575
+ "model.layers.40.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
576
+ "model.layers.40.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
577
+ "model.layers.40.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
578
+ "model.layers.40.self_attn.q_a_layernorm.weight": "model-00002-of-00002.safetensors",
579
+ "model.layers.40.self_attn.q_a_proj.weight": "model-00002-of-00002.safetensors",
580
+ "model.layers.40.self_attn.q_b_proj.weight": "model-00002-of-00002.safetensors",
581
+ "model.layers.41.input_layernorm.weight": "model-00002-of-00002.safetensors",
582
+ "model.layers.41.mlp.gate.e_score_correction_bias": "model-00002-of-00002.safetensors",
583
+ "model.layers.41.mlp.gate.weight": "model-00002-of-00002.safetensors",
584
+ "model.layers.41.mlp.moe_mlp.experts.weight": "model-00002-of-00002.safetensors",
585
+ "model.layers.41.mlp.moe_mlp.output_experts.weight": "model-00002-of-00002.safetensors",
586
+ "model.layers.41.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
587
+ "model.layers.41.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
588
+ "model.layers.41.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
589
+ "model.layers.41.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
590
+ "model.layers.41.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
591
+ "model.layers.41.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
592
+ "model.layers.41.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
593
+ "model.layers.41.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
594
+ "model.layers.41.self_attn.q_a_layernorm.weight": "model-00002-of-00002.safetensors",
595
+ "model.layers.41.self_attn.q_a_proj.weight": "model-00002-of-00002.safetensors",
596
+ "model.layers.41.self_attn.q_b_proj.weight": "model-00002-of-00002.safetensors",
597
+ "model.layers.42.input_layernorm.weight": "model-00002-of-00002.safetensors",
598
+ "model.layers.42.mlp.gate.e_score_correction_bias": "model-00002-of-00002.safetensors",
599
+ "model.layers.42.mlp.gate.weight": "model-00002-of-00002.safetensors",
600
+ "model.layers.42.mlp.moe_mlp.experts.weight": "model-00002-of-00002.safetensors",
601
+ "model.layers.42.mlp.moe_mlp.output_experts.weight": "model-00002-of-00002.safetensors",
602
+ "model.layers.42.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
603
+ "model.layers.42.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
604
+ "model.layers.42.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
605
+ "model.layers.42.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
606
+ "model.layers.42.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
607
+ "model.layers.42.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
608
+ "model.layers.42.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
609
+ "model.layers.42.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
610
+ "model.layers.42.self_attn.q_a_layernorm.weight": "model-00002-of-00002.safetensors",
611
+ "model.layers.42.self_attn.q_a_proj.weight": "model-00002-of-00002.safetensors",
612
+ "model.layers.42.self_attn.q_b_proj.weight": "model-00002-of-00002.safetensors",
613
+ "model.layers.43.input_layernorm.weight": "model-00002-of-00002.safetensors",
614
+ "model.layers.43.mlp.gate.e_score_correction_bias": "model-00002-of-00002.safetensors",
615
+ "model.layers.43.mlp.gate.weight": "model-00002-of-00002.safetensors",
616
+ "model.layers.43.mlp.moe_mlp.experts.weight": "model-00002-of-00002.safetensors",
617
+ "model.layers.43.mlp.moe_mlp.output_experts.weight": "model-00002-of-00002.safetensors",
618
+ "model.layers.43.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
619
+ "model.layers.43.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
620
+ "model.layers.43.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
621
+ "model.layers.43.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
622
+ "model.layers.43.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
623
+ "model.layers.43.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
624
+ "model.layers.43.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
625
+ "model.layers.43.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
626
+ "model.layers.43.self_attn.q_a_layernorm.weight": "model-00002-of-00002.safetensors",
627
+ "model.layers.43.self_attn.q_a_proj.weight": "model-00002-of-00002.safetensors",
628
+ "model.layers.43.self_attn.q_b_proj.weight": "model-00002-of-00002.safetensors",
629
+ "model.layers.44.input_layernorm.weight": "model-00002-of-00002.safetensors",
630
+ "model.layers.44.mlp.gate.e_score_correction_bias": "model-00002-of-00002.safetensors",
631
+ "model.layers.44.mlp.gate.weight": "model-00002-of-00002.safetensors",
632
+ "model.layers.44.mlp.moe_mlp.experts.weight": "model-00002-of-00002.safetensors",
633
+ "model.layers.44.mlp.moe_mlp.output_experts.weight": "model-00002-of-00002.safetensors",
634
+ "model.layers.44.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
635
+ "model.layers.44.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
636
+ "model.layers.44.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
637
+ "model.layers.44.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
638
+ "model.layers.44.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
639
+ "model.layers.44.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
640
+ "model.layers.44.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
641
+ "model.layers.44.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
642
+ "model.layers.44.self_attn.q_a_layernorm.weight": "model-00002-of-00002.safetensors",
643
+ "model.layers.44.self_attn.q_a_proj.weight": "model-00002-of-00002.safetensors",
644
+ "model.layers.44.self_attn.q_b_proj.weight": "model-00002-of-00002.safetensors",
645
+ "model.layers.45.input_layernorm.weight": "model-00002-of-00002.safetensors",
646
+ "model.layers.45.mlp.gate.e_score_correction_bias": "model-00002-of-00002.safetensors",
647
+ "model.layers.45.mlp.gate.weight": "model-00002-of-00002.safetensors",
648
+ "model.layers.45.mlp.moe_mlp.experts.weight": "model-00002-of-00002.safetensors",
649
+ "model.layers.45.mlp.moe_mlp.output_experts.weight": "model-00002-of-00002.safetensors",
650
+ "model.layers.45.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
651
+ "model.layers.45.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
652
+ "model.layers.45.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
653
+ "model.layers.45.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
654
+ "model.layers.45.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
655
+ "model.layers.45.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
656
+ "model.layers.45.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
657
+ "model.layers.45.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
658
+ "model.layers.45.self_attn.q_a_layernorm.weight": "model-00002-of-00002.safetensors",
659
+ "model.layers.45.self_attn.q_a_proj.weight": "model-00002-of-00002.safetensors",
660
+ "model.layers.45.self_attn.q_b_proj.weight": "model-00002-of-00002.safetensors",
661
+ "model.layers.46.input_layernorm.weight": "model-00002-of-00002.safetensors",
662
+ "model.layers.46.mlp.gate.e_score_correction_bias": "model-00002-of-00002.safetensors",
663
+ "model.layers.46.mlp.gate.weight": "model-00002-of-00002.safetensors",
664
+ "model.layers.46.mlp.moe_mlp.experts.weight": "model-00002-of-00002.safetensors",
665
+ "model.layers.46.mlp.moe_mlp.output_experts.weight": "model-00002-of-00002.safetensors",
666
+ "model.layers.46.mlp.shared_experts.down_proj.weight": "model-00002-of-00002.safetensors",
667
+ "model.layers.46.mlp.shared_experts.gate_proj.weight": "model-00002-of-00002.safetensors",
668
+ "model.layers.46.mlp.shared_experts.up_proj.weight": "model-00002-of-00002.safetensors",
669
+ "model.layers.46.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
670
+ "model.layers.46.self_attn.kv_a_layernorm.weight": "model-00002-of-00002.safetensors",
671
+ "model.layers.46.self_attn.kv_a_proj_with_mqa.weight": "model-00002-of-00002.safetensors",
672
+ "model.layers.46.self_attn.kv_b_proj.weight": "model-00002-of-00002.safetensors",
673
+ "model.layers.46.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
674
+ "model.layers.46.self_attn.q_a_layernorm.weight": "model-00002-of-00002.safetensors",
675
+ "model.layers.46.self_attn.q_a_proj.weight": "model-00002-of-00002.safetensors",
676
+ "model.layers.46.self_attn.q_b_proj.weight": "model-00002-of-00002.safetensors",
677
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
678
+ "model.layers.5.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
679
+ "model.layers.5.mlp.gate.weight": "model-00001-of-00002.safetensors",
680
+ "model.layers.5.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
681
+ "model.layers.5.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
682
+ "model.layers.5.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
683
+ "model.layers.5.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
684
+ "model.layers.5.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
685
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
686
+ "model.layers.5.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
687
+ "model.layers.5.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
688
+ "model.layers.5.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
689
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
690
+ "model.layers.5.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
691
+ "model.layers.5.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
692
+ "model.layers.5.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
693
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
694
+ "model.layers.6.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
695
+ "model.layers.6.mlp.gate.weight": "model-00001-of-00002.safetensors",
696
+ "model.layers.6.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
697
+ "model.layers.6.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
698
+ "model.layers.6.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
699
+ "model.layers.6.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
700
+ "model.layers.6.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
701
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
702
+ "model.layers.6.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
703
+ "model.layers.6.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
704
+ "model.layers.6.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
705
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
706
+ "model.layers.6.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
707
+ "model.layers.6.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
708
+ "model.layers.6.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
709
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
710
+ "model.layers.7.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
711
+ "model.layers.7.mlp.gate.weight": "model-00001-of-00002.safetensors",
712
+ "model.layers.7.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
713
+ "model.layers.7.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
714
+ "model.layers.7.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
715
+ "model.layers.7.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
716
+ "model.layers.7.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
717
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
718
+ "model.layers.7.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
719
+ "model.layers.7.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
720
+ "model.layers.7.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
721
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
722
+ "model.layers.7.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
723
+ "model.layers.7.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
724
+ "model.layers.7.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
725
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
726
+ "model.layers.8.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
727
+ "model.layers.8.mlp.gate.weight": "model-00001-of-00002.safetensors",
728
+ "model.layers.8.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
729
+ "model.layers.8.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
730
+ "model.layers.8.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
731
+ "model.layers.8.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
732
+ "model.layers.8.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
733
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
734
+ "model.layers.8.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
735
+ "model.layers.8.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
736
+ "model.layers.8.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
737
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
738
+ "model.layers.8.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
739
+ "model.layers.8.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
740
+ "model.layers.8.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
741
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
742
+ "model.layers.9.mlp.gate.e_score_correction_bias": "model-00001-of-00002.safetensors",
743
+ "model.layers.9.mlp.gate.weight": "model-00001-of-00002.safetensors",
744
+ "model.layers.9.mlp.moe_mlp.experts.weight": "model-00001-of-00002.safetensors",
745
+ "model.layers.9.mlp.moe_mlp.output_experts.weight": "model-00001-of-00002.safetensors",
746
+ "model.layers.9.mlp.shared_experts.down_proj.weight": "model-00001-of-00002.safetensors",
747
+ "model.layers.9.mlp.shared_experts.gate_proj.weight": "model-00001-of-00002.safetensors",
748
+ "model.layers.9.mlp.shared_experts.up_proj.weight": "model-00001-of-00002.safetensors",
749
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
750
+ "model.layers.9.self_attn.kv_a_layernorm.weight": "model-00001-of-00002.safetensors",
751
+ "model.layers.9.self_attn.kv_a_proj_with_mqa.weight": "model-00001-of-00002.safetensors",
752
+ "model.layers.9.self_attn.kv_b_proj.weight": "model-00001-of-00002.safetensors",
753
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
754
+ "model.layers.9.self_attn.q_a_layernorm.weight": "model-00001-of-00002.safetensors",
755
+ "model.layers.9.self_attn.q_a_proj.weight": "model-00001-of-00002.safetensors",
756
+ "model.layers.9.self_attn.q_b_proj.weight": "model-00001-of-00002.safetensors",
757
+ "model.norm.weight": "model-00002-of-00002.safetensors"
758
+ }
759
+ }
modeling_glm4_moe_lite_for_backconvert.py ADDED
@@ -0,0 +1,743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import math
17
+ from collections.abc import Callable
18
+ from typing import Optional
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+
24
+ from transformers import initialization as init
25
+ from transformers.activations import ACT2FN
26
+ from transformers.cache_utils import Cache, DynamicCache
27
+ from transformers.generation import GenerationMixin
28
+ from transformers.integrations import use_experts_implementation, use_kernel_forward_from_hub, use_kernel_func_from_hub
29
+ from transformers.masking_utils import create_causal_mask
30
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
31
+ from transformers.modeling_layers import GradientCheckpointingLayer
32
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
33
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
34
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
35
+ from transformers.processing_utils import Unpack
36
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple, is_grouped_mm_available
37
+ from transformers.utils.generic import check_model_inputs, is_flash_attention_requested, maybe_autocast
38
+
39
+ try:
40
+ from .configuration_glm4_moe_lite_for_backconvert import Glm4MoeLiteConfig
41
+ except:
42
+ from configuration_glm4_moe_lite_for_backconvert import Glm4MoeLiteConfig
43
+
44
+
45
+ class Glm4MoeLiteRotaryEmbedding(nn.Module):
46
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
47
+
48
+ def __init__(self, config: Glm4MoeLiteConfig, device=None):
49
+ super().__init__()
50
+ self.max_seq_len_cached = config.max_position_embeddings
51
+ self.original_max_seq_len = config.max_position_embeddings
52
+
53
+ self.config = config
54
+
55
+ self.rope_type = self.config.rope_parameters["rope_type"]
56
+ rope_init_fn: Callable = self.compute_default_rope_parameters
57
+ if self.rope_type != "default":
58
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
59
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
60
+
61
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
62
+ self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
63
+
64
+ @staticmethod
65
+ def compute_default_rope_parameters(
66
+ config: Glm4MoeLiteConfig | None = None,
67
+ device: Optional["torch.device"] = None,
68
+ seq_len: int | None = None,
69
+ ) -> tuple["torch.Tensor", float]:
70
+ """
71
+ Computes the inverse frequencies according to the original RoPE implementation
72
+ Args:
73
+ config ([`~transformers.PreTrainedConfig`]):
74
+ The model configuration.
75
+ device (`torch.device`):
76
+ The device to use for initialization of the inverse frequencies.
77
+ seq_len (`int`, *optional*):
78
+ The current sequence length. Unused for this type of RoPE.
79
+ Returns:
80
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
81
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
82
+ """
83
+ base = config.rope_parameters["rope_theta"]
84
+ partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
85
+ head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
86
+ dim = int(head_dim * partial_rotary_factor)
87
+
88
+ attention_factor = 1.0 # Unused in this type of RoPE
89
+
90
+ # Compute the inverse frequencies
91
+ inv_freq = 1.0 / (
92
+ base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
93
+ )
94
+ return inv_freq, attention_factor
95
+
96
+ @torch.no_grad()
97
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
98
+ def forward(self, x, position_ids):
99
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
100
+ position_ids_expanded = position_ids[:, None, :].float()
101
+
102
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
103
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
104
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
105
+ emb = torch.cat((freqs, freqs), dim=-1)
106
+ cos = emb.cos() * self.attention_scaling
107
+ sin = emb.sin() * self.attention_scaling
108
+
109
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
110
+
111
+
112
+ def rotate_half(x):
113
+ """Rotates half the hidden dims of the input."""
114
+ x1 = x[..., : x.shape[-1] // 2]
115
+ x2 = x[..., x.shape[-1] // 2 :]
116
+ return torch.cat((-x2, x1), dim=-1)
117
+
118
+
119
+ @use_kernel_func_from_hub("rotary_pos_emb")
120
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
121
+ """Applies Rotary Position Embedding to the query and key tensors.
122
+
123
+ Args:
124
+ q (`torch.Tensor`): The query tensor.
125
+ k (`torch.Tensor`): The key tensor.
126
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
127
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
128
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
129
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
130
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
131
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
132
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
133
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
134
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
135
+ Returns:
136
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
137
+ """
138
+ cos = cos.unsqueeze(unsqueeze_dim)
139
+ sin = sin.unsqueeze(unsqueeze_dim)
140
+ q_embed = (q * cos) + (rotate_half(q) * sin)
141
+ k_embed = (k * cos) + (rotate_half(k) * sin)
142
+ return q_embed, k_embed
143
+
144
+
145
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
146
+ """
147
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
148
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
149
+ """
150
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
151
+ if n_rep == 1:
152
+ return hidden_states
153
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
154
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
155
+
156
+
157
+ def eager_attention_forward(
158
+ module: nn.Module,
159
+ query: torch.Tensor,
160
+ key: torch.Tensor,
161
+ value: torch.Tensor,
162
+ attention_mask: torch.Tensor | None,
163
+ scaling: float,
164
+ dropout: float = 0.0,
165
+ **kwargs: Unpack[TransformersKwargs],
166
+ ):
167
+ key_states = repeat_kv(key, module.num_key_value_groups)
168
+ value_states = repeat_kv(value, module.num_key_value_groups)
169
+
170
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
171
+ if attention_mask is not None:
172
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
173
+ attn_weights = attn_weights + causal_mask
174
+
175
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
176
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
177
+ attn_output = torch.matmul(attn_weights, value_states)
178
+ attn_output = attn_output.transpose(1, 2).contiguous()
179
+
180
+ return attn_output, attn_weights
181
+
182
+
183
+ def apply_rotary_pos_emb_interleave(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
184
+ r"""
185
+ TODO let's just use the original freqcis computation to not have the view
186
+ transpose + reshape! This is not optimized!
187
+ Applies Rotary Position Embedding to the query and key tensors.
188
+
189
+ Args:
190
+ q (`torch.Tensor`): The query tensor.
191
+ k (`torch.Tensor`): The key tensor.
192
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
193
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
194
+ position_ids (`torch.Tensor`):
195
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
196
+ used to pass offsetted position ids when working with a KV-cache.
197
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
198
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
199
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
200
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
201
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
202
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
203
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
204
+ Returns:
205
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
206
+ """
207
+ cos = cos.unsqueeze(unsqueeze_dim)
208
+ sin = sin.unsqueeze(unsqueeze_dim)
209
+
210
+ b, h, s, d = q.shape
211
+ q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
212
+
213
+ b, h, s, d = k.shape
214
+ k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
215
+
216
+ q_embed = (q * cos) + (rotate_half(q) * sin)
217
+ k_embed = (k * cos) + (rotate_half(k) * sin)
218
+ return q_embed, k_embed
219
+
220
+
221
+ def yarn_get_mscale(scale=1, mscale=1):
222
+ if scale <= 1:
223
+ return 1.0
224
+ return 0.1 * mscale * math.log(scale) + 1.0
225
+
226
+
227
+ class Glm4MoeLiteAttention(nn.Module):
228
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
229
+
230
+ def __init__(self, config: Glm4MoeLiteConfig, layer_idx: int):
231
+ super().__init__()
232
+ self.config = config
233
+ self.layer_idx = layer_idx
234
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
235
+ self.attention_dropout = config.attention_dropout
236
+ self.num_heads = config.num_attention_heads
237
+
238
+ self.q_lora_rank = config.q_lora_rank
239
+ self.qk_rope_head_dim = config.qk_rope_head_dim
240
+ self.kv_lora_rank = config.kv_lora_rank
241
+ self.v_head_dim = config.v_head_dim
242
+ self.qk_nope_head_dim = config.qk_nope_head_dim
243
+ self.qk_head_dim = config.qk_head_dim
244
+
245
+ self.is_causal = True
246
+ if self.q_lora_rank is None:
247
+ self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.qk_head_dim, bias=False)
248
+ else:
249
+ self.q_a_proj = nn.Linear(config.hidden_size, config.q_lora_rank, bias=config.attention_bias)
250
+ self.q_a_layernorm = Glm4MoeLiteRMSNorm(config.q_lora_rank)
251
+ self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False)
252
+
253
+ self.kv_a_proj_with_mqa = nn.Linear(
254
+ config.hidden_size,
255
+ self.kv_lora_rank + self.qk_rope_head_dim,
256
+ bias=config.attention_bias,
257
+ )
258
+ self.kv_a_layernorm = Glm4MoeLiteRMSNorm(self.kv_lora_rank)
259
+ self.kv_b_proj = nn.Linear(
260
+ self.kv_lora_rank,
261
+ self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
262
+ bias=False,
263
+ )
264
+
265
+ self.o_proj = nn.Linear(
266
+ self.num_heads * self.v_head_dim,
267
+ config.hidden_size,
268
+ bias=config.attention_bias,
269
+ )
270
+
271
+ self.scaling = self.qk_head_dim ** (-0.5)
272
+ if self.config.rope_parameters.get("rope_type", "default") != "default":
273
+ mscale_all_dim = self.config.rope_parameters.get("mscale_all_dim", 0)
274
+ scaling_factor = self.config.rope_parameters["factor"]
275
+ if mscale_all_dim:
276
+ mscale = yarn_get_mscale(scaling_factor, mscale_all_dim)
277
+ self.scaling = self.scaling * mscale * mscale
278
+
279
+ def forward(
280
+ self,
281
+ hidden_states: torch.Tensor,
282
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
283
+ attention_mask: torch.Tensor | None,
284
+ past_key_values: Cache | None = None,
285
+ cache_position: torch.LongTensor | None = None,
286
+ **kwargs: Unpack[FlashAttentionKwargs],
287
+ ) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
288
+ batch_size, seq_length = hidden_states.shape[:-1]
289
+ query_shape = (batch_size, seq_length, -1, self.qk_head_dim)
290
+ key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim)
291
+
292
+ if self.q_lora_rank is None:
293
+ q_states = self.q_proj(hidden_states)
294
+ else:
295
+ q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
296
+ q_states = q_states.view(query_shape).transpose(1, 2)
297
+ q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
298
+
299
+ compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
300
+ k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
301
+
302
+ k_pass = self.kv_b_proj(self.kv_a_layernorm(k_pass)).view(key_shape).transpose(1, 2)
303
+ k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
304
+
305
+ k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim)
306
+
307
+ cos, sin = position_embeddings
308
+ if self.config.rope_interleave: # support using interleaved weights for efficiency
309
+ q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin)
310
+ else:
311
+ q_rot, k_rot = apply_rotary_pos_emb(q_rot, k_rot, cos, sin)
312
+ k_rot = k_rot.expand(*k_pass.shape[:-1], -1)
313
+
314
+ query_states = torch.cat((q_pass, q_rot), dim=-1)
315
+ key_states = torch.cat((k_pass, k_rot), dim=-1)
316
+
317
+ if past_key_values is not None:
318
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
319
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
320
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
321
+
322
+ if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
323
+ value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
324
+
325
+ attention_interface: Callable = eager_attention_forward
326
+ if self.config._attn_implementation != "eager":
327
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
328
+
329
+ attn_output, attn_weights = attention_interface(
330
+ self,
331
+ query_states,
332
+ key_states,
333
+ value_states,
334
+ attention_mask,
335
+ dropout=0.0 if not self.training else self.attention_dropout,
336
+ scaling=self.scaling,
337
+ **kwargs,
338
+ )
339
+
340
+ if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
341
+ attn_output = attn_output[:, :, :, : self.v_head_dim]
342
+
343
+ attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
344
+ attn_output = self.o_proj(attn_output)
345
+ return attn_output, attn_weights
346
+
347
+
348
+ class Glm4MoeLiteMLP(nn.Module):
349
+ def __init__(self, config, intermediate_size=None):
350
+ super().__init__()
351
+ self.config = config
352
+ self.hidden_size = config.hidden_size
353
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
354
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
355
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
356
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
357
+ self.act_fn = ACT2FN[config.hidden_act]
358
+
359
+ def forward(self, x):
360
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
361
+ return down_proj
362
+
363
+
364
+ class Glm4MoeLiteTopkRouter(nn.Module):
365
+ def __init__(self, config: Glm4MoeLiteConfig):
366
+ super().__init__()
367
+ self.config = config
368
+ self.top_k = config.num_experts_per_tok
369
+ self.n_routed_experts = config.n_routed_experts
370
+ self.routed_scaling_factor = config.routed_scaling_factor
371
+ self.n_group = config.n_group
372
+ self.topk_group = config.topk_group
373
+ self.norm_topk_prob = config.norm_topk_prob
374
+
375
+ self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size)))
376
+ self.register_buffer("e_score_correction_bias", torch.zeros((self.n_routed_experts), dtype=torch.float32))
377
+
378
+ def forward(self, hidden_states):
379
+ hidden_states = hidden_states.view(-1, self.config.hidden_size)
380
+ router_logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32))
381
+ return router_logits
382
+
383
+
384
+ @use_kernel_forward_from_hub("RMSNorm")
385
+ class Glm4MoeLiteRMSNorm(nn.Module):
386
+ def __init__(self, hidden_size, eps=1e-6):
387
+ """
388
+ Glm4MoeLiteRMSNorm is equivalent to T5LayerNorm
389
+ """
390
+ super().__init__()
391
+ self.weight = nn.Parameter(torch.ones(hidden_size))
392
+ self.variance_epsilon = eps
393
+
394
+ def forward(self, hidden_states):
395
+ input_dtype = hidden_states.dtype
396
+ hidden_states = hidden_states.to(torch.float32)
397
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
398
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
399
+ return self.weight * hidden_states.to(input_dtype)
400
+
401
+ def extra_repr(self):
402
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
403
+
404
+
405
+ #@use_experts_implementation
406
+ # class Glm4MoeLiteNaiveMoe(nn.Module):
407
+ # """Collection of expert weights stored as 3D tensors."""
408
+
409
+ # def __init__(self, config):
410
+ # super().__init__()
411
+ # self.num_experts = config.num_local_experts
412
+ # self.hidden_dim = config.hidden_size
413
+ # self.intermediate_dim = config.moe_intermediate_size
414
+ # self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
415
+ # self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
416
+ # self.act_fn = ACT2FN[config.hidden_act]
417
+
418
+ # def forward(
419
+ # self,
420
+ # hidden_states: torch.Tensor,
421
+ # top_k_index: torch.Tensor,
422
+ # top_k_weights: torch.Tensor,
423
+ # ) -> torch.Tensor:
424
+ # final_hidden_states = torch.zeros_like(hidden_states)
425
+ # with torch.no_grad():
426
+ # expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
427
+ # expert_mask = expert_mask.permute(2, 1, 0)
428
+ # expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
429
+
430
+ # for expert_idx in expert_hit:
431
+ # expert_idx = expert_idx[0]
432
+ # if expert_idx == self.num_experts:
433
+ # continue
434
+ # top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
435
+ # current_state = hidden_states[token_idx]
436
+ # gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
437
+ # current_hidden_states = self.act_fn(gate) * up
438
+ # current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
439
+ # current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
440
+ # final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
441
+
442
+ # return final_hidden_states
443
+
444
+
445
+ class Glm4MoeLiteMoE(nn.Module):
446
+ """
447
+ A mixed expert module containing shared experts.
448
+ """
449
+
450
+ def __init__(self, config):
451
+ super().__init__()
452
+ self.config = config
453
+ #self.experts = Glm4MoeLiteNaiveMoe(config)
454
+ self.experts = nn.ModuleList(
455
+ [Glm4MoeLiteMLP(
456
+ config, intermediate_size=config.moe_intermediate_size
457
+ ) for _ in range(config.n_routed_experts)]
458
+ )
459
+ self.gate = Glm4MoeLiteTopkRouter(config)
460
+ self.shared_experts = Glm4MoeLiteMLP(
461
+ config=config, intermediate_size=config.moe_intermediate_size * config.n_shared_experts
462
+ )
463
+ self.n_routed_experts = config.n_routed_experts
464
+ self.n_group = config.n_group
465
+ self.topk_group = config.topk_group
466
+ self.norm_topk_prob = config.norm_topk_prob
467
+ self.routed_scaling_factor = config.routed_scaling_factor
468
+ self.top_k = config.num_experts_per_tok
469
+
470
+ def route_tokens_to_experts(self, router_logits):
471
+ router_logits = router_logits.sigmoid()
472
+ router_logits_for_choice = router_logits + self.gate.e_score_correction_bias
473
+ group_scores = (
474
+ router_logits_for_choice.view(-1, self.n_group, self.n_routed_experts // self.n_group)
475
+ .topk(2, dim=-1)[0]
476
+ .sum(dim=-1)
477
+ )
478
+ group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
479
+ group_mask = torch.zeros_like(group_scores)
480
+ group_mask.scatter_(1, group_idx, 1)
481
+ score_mask = (
482
+ group_mask.unsqueeze(-1)
483
+ .expand(-1, self.n_group, self.n_routed_experts // self.n_group)
484
+ .reshape(-1, self.n_routed_experts)
485
+ )
486
+ scores_for_choice = router_logits_for_choice.masked_fill(~score_mask.bool(), 0.0)
487
+ topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1]
488
+ topk_weights = router_logits.gather(1, topk_indices)
489
+ if self.norm_topk_prob:
490
+ denominator = topk_weights.sum(dim=-1, keepdim=True) + 1e-20
491
+ topk_weights /= denominator
492
+ topk_weights = topk_weights * self.routed_scaling_factor
493
+ return topk_indices, topk_weights
494
+
495
+ def forward(self, hidden_states):
496
+ residuals = hidden_states
497
+ orig_shape = hidden_states.shape
498
+ router_logits = self.gate(hidden_states)
499
+ topk_indices, topk_weights = self.route_tokens_to_experts(router_logits)
500
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
501
+ hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape)
502
+ hidden_states = hidden_states + self.shared_experts(residuals)
503
+ return hidden_states
504
+
505
+
506
+ class Glm4MoeLiteDecoderLayer(GradientCheckpointingLayer):
507
+ def __init__(self, config: Glm4MoeLiteConfig, layer_idx: int):
508
+ super().__init__()
509
+ self.hidden_size = config.hidden_size
510
+ self.self_attn = Glm4MoeLiteAttention(config, layer_idx)
511
+
512
+ if config.mlp_layer_types[layer_idx] == "sparse":
513
+ self.mlp = Glm4MoeLiteMoE(config)
514
+ else:
515
+ self.mlp = Glm4MoeLiteMLP(config)
516
+
517
+ self.input_layernorm = Glm4MoeLiteRMSNorm(config.hidden_size, config.rms_norm_eps)
518
+ self.post_attention_layernorm = Glm4MoeLiteRMSNorm(config.hidden_size, config.rms_norm_eps)
519
+
520
+ def forward(
521
+ self,
522
+ hidden_states: torch.Tensor,
523
+ attention_mask: torch.Tensor | None = None,
524
+ position_ids: torch.LongTensor | None = None,
525
+ past_key_values: Cache | None = None,
526
+ use_cache: bool | None = False,
527
+ cache_position: torch.LongTensor | None = None,
528
+ position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
529
+ **kwargs: Unpack[TransformersKwargs],
530
+ ) -> torch.Tensor:
531
+ residual = hidden_states
532
+ hidden_states = self.input_layernorm(hidden_states)
533
+ # Self Attention
534
+ hidden_states, _ = self.self_attn(
535
+ hidden_states=hidden_states,
536
+ attention_mask=attention_mask,
537
+ position_ids=position_ids,
538
+ past_key_values=past_key_values,
539
+ use_cache=use_cache,
540
+ cache_position=cache_position,
541
+ position_embeddings=position_embeddings,
542
+ **kwargs,
543
+ )
544
+ hidden_states = residual + hidden_states
545
+
546
+ # Fully Connected
547
+ residual = hidden_states
548
+ hidden_states = self.post_attention_layernorm(hidden_states)
549
+ hidden_states = self.mlp(hidden_states)
550
+ hidden_states = residual + hidden_states
551
+ return hidden_states
552
+
553
+
554
+ @auto_docstring
555
+ class Glm4MoeLitePreTrainedModel(PreTrainedModel):
556
+ config: Glm4MoeLiteConfig
557
+ base_model_prefix = "model"
558
+ supports_gradient_checkpointing = True
559
+ _no_split_modules = ["Glm4MoeLiteDecoderLayer"]
560
+ _skip_keys_device_placement = ["past_key_values"]
561
+ _supports_flash_attn = True
562
+ _supports_sdpa = True
563
+ _supports_flex_attn = True
564
+ _can_compile_fullgraph = (
565
+ is_grouped_mm_available()
566
+ ) # https://huggingface.co/docs/transformers/experts_interface#torchcompile
567
+ _supports_attention_backend = True
568
+ _can_record_outputs = {
569
+ "hidden_states": Glm4MoeLiteDecoderLayer,
570
+ "attentions": Glm4MoeLiteAttention,
571
+ }
572
+ _keep_in_fp32_modules_strict = ["e_score_correction_bias"]
573
+
574
+ @torch.no_grad()
575
+ def _init_weights(self, module):
576
+ super()._init_weights(module)
577
+ if isinstance(module, Glm4MoeLiteTopkRouter):
578
+ init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
579
+ init.zeros_(module.e_score_correction_bias)
580
+ # elif isinstance(module, Glm4MoeLiteNaiveMoe):
581
+ # init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
582
+ # init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
583
+
584
+
585
+ @auto_docstring
586
+ class Glm4MoeLiteModel(Glm4MoeLitePreTrainedModel):
587
+ _keys_to_ignore_on_load_unexpected = [r"model\.layers\.47.*"]
588
+
589
+ def __init__(self, config: Glm4MoeLiteConfig):
590
+ super().__init__(config)
591
+ self.padding_idx = config.pad_token_id
592
+ self.vocab_size = config.vocab_size
593
+
594
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
595
+ self.layers = nn.ModuleList(
596
+ [Glm4MoeLiteDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
597
+ )
598
+ self.norm = Glm4MoeLiteRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
599
+ self.rotary_emb = Glm4MoeLiteRotaryEmbedding(config=config)
600
+ self.gradient_checkpointing = False
601
+
602
+ # Initialize weights and apply final processing
603
+ self.post_init()
604
+
605
+ @check_model_inputs
606
+ @auto_docstring
607
+ def forward(
608
+ self,
609
+ input_ids: torch.LongTensor | None = None,
610
+ attention_mask: torch.Tensor | None = None,
611
+ position_ids: torch.LongTensor | None = None,
612
+ past_key_values: Cache | None = None,
613
+ inputs_embeds: torch.FloatTensor | None = None,
614
+ cache_position: torch.LongTensor | None = None,
615
+ use_cache: bool | None = None,
616
+ **kwargs: Unpack[TransformersKwargs],
617
+ ) -> BaseModelOutputWithPast:
618
+ if (input_ids is None) ^ (inputs_embeds is not None):
619
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
620
+
621
+ if inputs_embeds is None:
622
+ inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
623
+
624
+ if use_cache and past_key_values is None:
625
+ past_key_values = DynamicCache(config=self.config)
626
+
627
+ if cache_position is None:
628
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
629
+ cache_position: torch.Tensor = (
630
+ torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
631
+ )
632
+
633
+ if position_ids is None:
634
+ position_ids = cache_position.unsqueeze(0)
635
+
636
+ causal_mask = create_causal_mask(
637
+ config=self.config,
638
+ input_embeds=inputs_embeds,
639
+ attention_mask=attention_mask,
640
+ cache_position=cache_position,
641
+ past_key_values=past_key_values,
642
+ position_ids=position_ids,
643
+ )
644
+
645
+ hidden_states = inputs_embeds
646
+ position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
647
+
648
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
649
+ hidden_states = decoder_layer(
650
+ hidden_states,
651
+ attention_mask=causal_mask,
652
+ position_embeddings=position_embeddings,
653
+ position_ids=position_ids,
654
+ past_key_values=past_key_values,
655
+ use_cache=use_cache,
656
+ cache_position=cache_position,
657
+ **kwargs,
658
+ )
659
+
660
+ hidden_states = self.norm(hidden_states)
661
+ return BaseModelOutputWithPast(
662
+ last_hidden_state=hidden_states,
663
+ past_key_values=past_key_values,
664
+ )
665
+
666
+
667
+ @auto_docstring
668
+ class Glm4MoeLiteForCausalLM(Glm4MoeLitePreTrainedModel, GenerationMixin):
669
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
670
+ _tp_plan = {"lm_head": "colwise_rep"}
671
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
672
+
673
+ def __init__(self, config):
674
+ super().__init__(config)
675
+ self.model = Glm4MoeLiteModel(config)
676
+ self.vocab_size = config.vocab_size
677
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
678
+
679
+ # Initialize weights and apply final processing
680
+ self.post_init()
681
+
682
+ @can_return_tuple
683
+ @auto_docstring
684
+ def forward(
685
+ self,
686
+ input_ids: torch.LongTensor | None = None,
687
+ attention_mask: torch.Tensor | None = None,
688
+ position_ids: torch.LongTensor | None = None,
689
+ past_key_values: Cache | None = None,
690
+ inputs_embeds: torch.FloatTensor | None = None,
691
+ labels: torch.LongTensor | None = None,
692
+ use_cache: bool | None = None,
693
+ cache_position: torch.LongTensor | None = None,
694
+ logits_to_keep: int | torch.Tensor = 0,
695
+ **kwargs: Unpack[TransformersKwargs],
696
+ ) -> CausalLMOutputWithPast:
697
+ r"""
698
+ Example:
699
+
700
+ ```python
701
+ >>> from transformers import AutoTokenizer, Glm4MoeLiteForCausalLM
702
+
703
+ >>> model = Glm4MoeLiteForCausalLM.from_pretrained("meta-glm4_moe_lite/Glm4MoeLite-2-7b-hf")
704
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-glm4_moe_lite/Glm4MoeLite-2-7b-hf")
705
+
706
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
707
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
708
+
709
+ >>> # Generate
710
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
711
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
712
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
713
+ ```"""
714
+ outputs: BaseModelOutputWithPast = self.model(
715
+ input_ids=input_ids,
716
+ attention_mask=attention_mask,
717
+ position_ids=position_ids,
718
+ past_key_values=past_key_values,
719
+ inputs_embeds=inputs_embeds,
720
+ use_cache=use_cache,
721
+ cache_position=cache_position,
722
+ **kwargs,
723
+ )
724
+
725
+ hidden_states = outputs.last_hidden_state
726
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
727
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
728
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
729
+
730
+ loss = None
731
+ if labels is not None:
732
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
733
+
734
+ return CausalLMOutputWithPast(
735
+ loss=loss,
736
+ logits=logits,
737
+ past_key_values=outputs.past_key_values,
738
+ hidden_states=outputs.hidden_states,
739
+ attentions=outputs.attentions,
740
+ )
741
+
742
+
743
+ __all__ = ["Glm4MoeLitePreTrainedModel", "Glm4MoeLiteModel", "Glm4MoeLiteForCausalLM"]
modeling_glm4_moe_lite_scm.py ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import math
17
+ from collections.abc import Callable
18
+ from typing import Optional
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+
24
+ from transformers import initialization as init
25
+ from transformers.activations import ACT2FN
26
+ from transformers.cache_utils import Cache, DynamicCache
27
+ from transformers.generation import GenerationMixin
28
+ from transformers.integrations import use_experts_implementation, use_kernel_forward_from_hub, use_kernel_func_from_hub
29
+ from transformers.masking_utils import create_causal_mask
30
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
31
+ from transformers.modeling_layers import GradientCheckpointingLayer
32
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
33
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
34
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
35
+ from transformers.processing_utils import Unpack
36
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple, is_grouped_mm_available
37
+ from transformers.utils.generic import check_model_inputs, is_flash_attention_requested, maybe_autocast
38
+
39
+ import scattermoe
40
+
41
+ try:
42
+ from .configuration_glm4_moe_lite_scm import Glm4MoeLiteSCMConfig
43
+ except:
44
+ from configuration_glm4_moe_lite_scm import Glm4MoeLiteSCMConfig
45
+
46
+
47
+ class Glm4MoeLiteSCMRotaryEmbedding(nn.Module):
48
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
49
+
50
+ def __init__(self, config: Glm4MoeLiteSCMConfig, device=None):
51
+ super().__init__()
52
+ self.max_seq_len_cached = config.max_position_embeddings
53
+ self.original_max_seq_len = config.max_position_embeddings
54
+
55
+ self.config = config
56
+
57
+ self.rope_type = self.config.rope_parameters["rope_type"]
58
+ rope_init_fn: Callable = self.compute_default_rope_parameters
59
+ if self.rope_type != "default":
60
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
61
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
62
+
63
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
64
+ self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
65
+
66
+ @staticmethod
67
+ def compute_default_rope_parameters(
68
+ config: Glm4MoeLiteSCMConfig | None = None,
69
+ device: Optional["torch.device"] = None,
70
+ seq_len: int | None = None,
71
+ ) -> tuple["torch.Tensor", float]:
72
+ """
73
+ Computes the inverse frequencies according to the original RoPE implementation
74
+ Args:
75
+ config ([`~transformers.PreTrainedConfig`]):
76
+ The model configuration.
77
+ device (`torch.device`):
78
+ The device to use for initialization of the inverse frequencies.
79
+ seq_len (`int`, *optional*):
80
+ The current sequence length. Unused for this type of RoPE.
81
+ Returns:
82
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
83
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
84
+ """
85
+ base = config.rope_parameters["rope_theta"]
86
+ partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
87
+ head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
88
+ dim = int(head_dim * partial_rotary_factor)
89
+
90
+ attention_factor = 1.0 # Unused in this type of RoPE
91
+
92
+ # Compute the inverse frequencies
93
+ inv_freq = 1.0 / (
94
+ base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
95
+ )
96
+ return inv_freq, attention_factor
97
+
98
+ @torch.no_grad()
99
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
100
+ def forward(self, x, position_ids):
101
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
102
+ position_ids_expanded = position_ids[:, None, :].float()
103
+
104
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
105
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
106
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
107
+ emb = torch.cat((freqs, freqs), dim=-1)
108
+ cos = emb.cos() * self.attention_scaling
109
+ sin = emb.sin() * self.attention_scaling
110
+
111
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
112
+
113
+
114
+ def rotate_half(x):
115
+ """Rotates half the hidden dims of the input."""
116
+ x1 = x[..., : x.shape[-1] // 2]
117
+ x2 = x[..., x.shape[-1] // 2 :]
118
+ return torch.cat((-x2, x1), dim=-1)
119
+
120
+
121
+ @use_kernel_func_from_hub("rotary_pos_emb")
122
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
123
+ """Applies Rotary Position Embedding to the query and key tensors.
124
+
125
+ Args:
126
+ q (`torch.Tensor`): The query tensor.
127
+ k (`torch.Tensor`): The key tensor.
128
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
129
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
130
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
131
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
132
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
133
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
134
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
135
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
136
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
137
+ Returns:
138
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
139
+ """
140
+ cos = cos.unsqueeze(unsqueeze_dim)
141
+ sin = sin.unsqueeze(unsqueeze_dim)
142
+ q_embed = (q * cos) + (rotate_half(q) * sin)
143
+ k_embed = (k * cos) + (rotate_half(k) * sin)
144
+ return q_embed, k_embed
145
+
146
+
147
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
148
+ """
149
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
150
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
151
+ """
152
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
153
+ if n_rep == 1:
154
+ return hidden_states
155
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
156
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
157
+
158
+
159
+ def eager_attention_forward(
160
+ module: nn.Module,
161
+ query: torch.Tensor,
162
+ key: torch.Tensor,
163
+ value: torch.Tensor,
164
+ attention_mask: torch.Tensor | None,
165
+ scaling: float,
166
+ dropout: float = 0.0,
167
+ **kwargs: Unpack[TransformersKwargs],
168
+ ):
169
+ key_states = repeat_kv(key, module.num_key_value_groups)
170
+ value_states = repeat_kv(value, module.num_key_value_groups)
171
+
172
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
173
+ if attention_mask is not None:
174
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
175
+ attn_weights = attn_weights + causal_mask
176
+
177
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
178
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
179
+ attn_output = torch.matmul(attn_weights, value_states)
180
+ attn_output = attn_output.transpose(1, 2).contiguous()
181
+
182
+ return attn_output, attn_weights
183
+
184
+
185
+ def apply_rotary_pos_emb_interleave(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
186
+ r"""
187
+ TODO let's just use the original freqcis computation to not have the view
188
+ transpose + reshape! This is not optimized!
189
+ Applies Rotary Position Embedding to the query and key tensors.
190
+
191
+ Args:
192
+ q (`torch.Tensor`): The query tensor.
193
+ k (`torch.Tensor`): The key tensor.
194
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
195
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
196
+ position_ids (`torch.Tensor`):
197
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
198
+ used to pass offsetted position ids when working with a KV-cache.
199
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
200
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
201
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
202
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
203
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
204
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
205
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
206
+ Returns:
207
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
208
+ """
209
+ cos = cos.unsqueeze(unsqueeze_dim)
210
+ sin = sin.unsqueeze(unsqueeze_dim)
211
+
212
+ b, h, s, d = q.shape
213
+ q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
214
+
215
+ b, h, s, d = k.shape
216
+ k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
217
+
218
+ q_embed = (q * cos) + (rotate_half(q) * sin)
219
+ k_embed = (k * cos) + (rotate_half(k) * sin)
220
+ return q_embed, k_embed
221
+
222
+
223
+ def yarn_get_mscale(scale=1, mscale=1):
224
+ if scale <= 1:
225
+ return 1.0
226
+ return 0.1 * mscale * math.log(scale) + 1.0
227
+
228
+
229
+ class Glm4MoeLiteSCMAttention(nn.Module):
230
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
231
+
232
+ def __init__(self, config: Glm4MoeLiteSCMConfig, layer_idx: int):
233
+ super().__init__()
234
+ self.config = config
235
+ self.layer_idx = layer_idx
236
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
237
+ self.attention_dropout = config.attention_dropout
238
+ self.num_heads = config.num_attention_heads
239
+
240
+ self.q_lora_rank = config.q_lora_rank
241
+ self.qk_rope_head_dim = config.qk_rope_head_dim
242
+ self.kv_lora_rank = config.kv_lora_rank
243
+ self.v_head_dim = config.v_head_dim
244
+ self.qk_nope_head_dim = config.qk_nope_head_dim
245
+ self.qk_head_dim = config.qk_head_dim
246
+
247
+ self.is_causal = True
248
+ if self.q_lora_rank is None:
249
+ self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.qk_head_dim, bias=False)
250
+ else:
251
+ self.q_a_proj = nn.Linear(config.hidden_size, config.q_lora_rank, bias=config.attention_bias)
252
+ self.q_a_layernorm = Glm4MoeLiteSCMRMSNorm(config.q_lora_rank)
253
+ self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False)
254
+
255
+ self.kv_a_proj_with_mqa = nn.Linear(
256
+ config.hidden_size,
257
+ self.kv_lora_rank + self.qk_rope_head_dim,
258
+ bias=config.attention_bias,
259
+ )
260
+ self.kv_a_layernorm = Glm4MoeLiteSCMRMSNorm(self.kv_lora_rank)
261
+ self.kv_b_proj = nn.Linear(
262
+ self.kv_lora_rank,
263
+ self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
264
+ bias=False,
265
+ )
266
+
267
+ self.o_proj = nn.Linear(
268
+ self.num_heads * self.v_head_dim,
269
+ config.hidden_size,
270
+ bias=config.attention_bias,
271
+ )
272
+
273
+ self.scaling = self.qk_head_dim ** (-0.5)
274
+ if self.config.rope_parameters.get("rope_type", "default") != "default":
275
+ mscale_all_dim = self.config.rope_parameters.get("mscale_all_dim", 0)
276
+ scaling_factor = self.config.rope_parameters["factor"]
277
+ if mscale_all_dim:
278
+ mscale = yarn_get_mscale(scaling_factor, mscale_all_dim)
279
+ self.scaling = self.scaling * mscale * mscale
280
+
281
+ def forward(
282
+ self,
283
+ hidden_states: torch.Tensor,
284
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
285
+ attention_mask: torch.Tensor | None,
286
+ past_key_values: Cache | None = None,
287
+ cache_position: torch.LongTensor | None = None,
288
+ **kwargs: Unpack[FlashAttentionKwargs],
289
+ ) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
290
+ batch_size, seq_length = hidden_states.shape[:-1]
291
+ query_shape = (batch_size, seq_length, -1, self.qk_head_dim)
292
+ key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim)
293
+
294
+ if self.q_lora_rank is None:
295
+ q_states = self.q_proj(hidden_states)
296
+ else:
297
+ q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
298
+ q_states = q_states.view(query_shape).transpose(1, 2)
299
+ q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
300
+
301
+ compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
302
+ k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
303
+
304
+ k_pass = self.kv_b_proj(self.kv_a_layernorm(k_pass)).view(key_shape).transpose(1, 2)
305
+ k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
306
+
307
+ k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim)
308
+
309
+ cos, sin = position_embeddings
310
+ if self.config.rope_interleave: # support using interleaved weights for efficiency
311
+ q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin)
312
+ else:
313
+ q_rot, k_rot = apply_rotary_pos_emb(q_rot, k_rot, cos, sin)
314
+ k_rot = k_rot.expand(*k_pass.shape[:-1], -1)
315
+
316
+ query_states = torch.cat((q_pass, q_rot), dim=-1)
317
+ key_states = torch.cat((k_pass, k_rot), dim=-1)
318
+
319
+ if past_key_values is not None:
320
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
321
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
322
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
323
+
324
+ if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
325
+ value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
326
+
327
+ attention_interface: Callable = eager_attention_forward
328
+ if self.config._attn_implementation != "eager":
329
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
330
+
331
+ attn_output, attn_weights = attention_interface(
332
+ self,
333
+ query_states,
334
+ key_states,
335
+ value_states,
336
+ attention_mask,
337
+ dropout=0.0 if not self.training else self.attention_dropout,
338
+ scaling=self.scaling,
339
+ **kwargs,
340
+ )
341
+
342
+ if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
343
+ attn_output = attn_output[:, :, :, : self.v_head_dim]
344
+
345
+ attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
346
+ attn_output = self.o_proj(attn_output)
347
+ return attn_output, attn_weights
348
+
349
+
350
+ class Glm4MoeLiteSCMMLP(nn.Module):
351
+ def __init__(self, config, intermediate_size=None):
352
+ super().__init__()
353
+ self.config = config
354
+ self.hidden_size = config.hidden_size
355
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
356
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
357
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
358
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
359
+ self.act_fn = ACT2FN[config.hidden_act]
360
+
361
+ def forward(self, x):
362
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
363
+ return down_proj
364
+
365
+
366
+ class Glm4MoeLiteSCMTopkRouter(nn.Module):
367
+ def __init__(self, config: Glm4MoeLiteSCMConfig):
368
+ super().__init__()
369
+ self.config = config
370
+ self.top_k = config.num_experts_per_tok
371
+ self.n_routed_experts = config.n_routed_experts
372
+ self.routed_scaling_factor = config.routed_scaling_factor
373
+ self.n_group = config.n_group
374
+ self.topk_group = config.topk_group
375
+ self.norm_topk_prob = config.norm_topk_prob
376
+
377
+ self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size)))
378
+ self.register_buffer("e_score_correction_bias", torch.zeros((self.n_routed_experts), dtype=torch.float32))
379
+
380
+ def forward(self, hidden_states):
381
+ hidden_states = hidden_states.view(-1, self.config.hidden_size)
382
+ router_logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32))
383
+ return router_logits
384
+
385
+
386
+ @use_kernel_forward_from_hub("RMSNorm")
387
+ class Glm4MoeLiteSCMRMSNorm(nn.Module):
388
+ def __init__(self, hidden_size, eps=1e-6):
389
+ """
390
+ Glm4MoeLiteSCMRMSNorm is equivalent to T5LayerNorm
391
+ """
392
+ super().__init__()
393
+ self.weight = nn.Parameter(torch.ones(hidden_size))
394
+ self.variance_epsilon = eps
395
+
396
+ def forward(self, hidden_states):
397
+ input_dtype = hidden_states.dtype
398
+ hidden_states = hidden_states.to(torch.float32)
399
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
400
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
401
+ return self.weight * hidden_states.to(input_dtype)
402
+
403
+ def extra_repr(self):
404
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
405
+
406
+
407
+ #@use_experts_implementation
408
+ # class _Glm4MoeLiteSCMNaiveMoe(nn.Module):
409
+ # """Collection of expert weights stored as 3D tensors."""
410
+
411
+ # def __init__(self, config):
412
+ # super().__init__()
413
+ # self.num_experts = config.num_local_experts
414
+ # self.hidden_dim = config.hidden_size
415
+ # self.intermediate_dim = config.moe_intermediate_size
416
+ # self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
417
+ # self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
418
+ # self.act_fn = ACT2FN[config.hidden_act]
419
+
420
+ # def forward(
421
+ # self,
422
+ # hidden_states: torch.Tensor,
423
+ # top_k_index: torch.Tensor,
424
+ # top_k_weights: torch.Tensor,
425
+ # ) -> torch.Tensor:
426
+ # final_hidden_states = torch.zeros_like(hidden_states)
427
+ # with torch.no_grad():
428
+ # expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
429
+ # expert_mask = expert_mask.permute(2, 1, 0)
430
+ # expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
431
+
432
+ # for expert_idx in expert_hit:
433
+ # expert_idx = expert_idx[0]
434
+ # if expert_idx == self.num_experts:
435
+ # continue
436
+ # top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
437
+ # current_state = hidden_states[token_idx]
438
+ # gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
439
+ # current_hidden_states = self.act_fn(gate) * up
440
+ # current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
441
+ # current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
442
+ # final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
443
+
444
+ # return final_hidden_states
445
+
446
+
447
+ class Glm4MoeLiteSCMMoE(nn.Module):
448
+ """
449
+ A mixed expert module containing shared experts.
450
+ """
451
+
452
+ def __init__(self, config):
453
+ super().__init__()
454
+ self.config = config
455
+ self.moe_mlp = scattermoe.mlp.GLUMLP(
456
+ input_size=self.config.hidden_size,
457
+ hidden_size=self.config.moe_intermediate_size,
458
+ num_experts=self.config.n_routed_experts,
459
+ top_k=self.config.num_experts_per_tok,
460
+ activation=ACT2FN[config.hidden_act],
461
+ )
462
+ self.gate = Glm4MoeLiteSCMTopkRouter(config)
463
+ self.shared_experts = Glm4MoeLiteSCMMLP(
464
+ config=config, intermediate_size=config.moe_intermediate_size * config.n_shared_experts
465
+ )
466
+ self.n_routed_experts = config.n_routed_experts
467
+ self.n_group = config.n_group
468
+ self.topk_group = config.topk_group
469
+ self.norm_topk_prob = config.norm_topk_prob
470
+ self.routed_scaling_factor = config.routed_scaling_factor
471
+ self.top_k = config.num_experts_per_tok
472
+
473
+ def route_tokens_to_experts(self, router_logits):
474
+ router_logits = router_logits.sigmoid()
475
+ router_logits_for_choice = router_logits + self.gate.e_score_correction_bias
476
+ group_scores = (
477
+ router_logits_for_choice.view(-1, self.n_group, self.n_routed_experts // self.n_group)
478
+ .topk(2, dim=-1)[0]
479
+ .sum(dim=-1)
480
+ )
481
+ group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
482
+ group_mask = torch.zeros_like(group_scores)
483
+ group_mask.scatter_(1, group_idx, 1)
484
+ score_mask = (
485
+ group_mask.unsqueeze(-1)
486
+ .expand(-1, self.n_group, self.n_routed_experts // self.n_group)
487
+ .reshape(-1, self.n_routed_experts)
488
+ )
489
+ scores_for_choice = router_logits_for_choice.masked_fill(~score_mask.bool(), 0.0)
490
+ topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1]
491
+ topk_weights = router_logits.gather(1, topk_indices)
492
+ if self.norm_topk_prob:
493
+ denominator = topk_weights.sum(dim=-1, keepdim=True) + 1e-20
494
+ topk_weights /= denominator
495
+ topk_weights = topk_weights * self.routed_scaling_factor
496
+ return topk_indices, topk_weights
497
+
498
+ def forward(self, hidden_states):
499
+ residuals = hidden_states
500
+ orig_shape = hidden_states.shape
501
+ router_logits = self.gate(hidden_states)
502
+ topk_indices, topk_weights = self.route_tokens_to_experts(router_logits)
503
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
504
+
505
+ hidden_states = self.moe_mlp(hidden_states, topk_weights.to(torch.bfloat16), topk_indices).view(*orig_shape)
506
+
507
+ hidden_states = hidden_states + self.shared_experts(residuals)
508
+ return hidden_states
509
+
510
+
511
+ class Glm4MoeLiteSCMDecoderLayer(GradientCheckpointingLayer):
512
+ def __init__(self, config: Glm4MoeLiteSCMConfig, layer_idx: int):
513
+ super().__init__()
514
+ self.hidden_size = config.hidden_size
515
+ self.self_attn = Glm4MoeLiteSCMAttention(config, layer_idx)
516
+
517
+ if config.mlp_layer_types[layer_idx] == "sparse":
518
+ self.mlp = Glm4MoeLiteSCMMoE(config)
519
+ else:
520
+ self.mlp = Glm4MoeLiteSCMMLP(config)
521
+
522
+ self.input_layernorm = Glm4MoeLiteSCMRMSNorm(config.hidden_size, config.rms_norm_eps)
523
+ self.post_attention_layernorm = Glm4MoeLiteSCMRMSNorm(config.hidden_size, config.rms_norm_eps)
524
+
525
+ def forward(
526
+ self,
527
+ hidden_states: torch.Tensor,
528
+ attention_mask: torch.Tensor | None = None,
529
+ position_ids: torch.LongTensor | None = None,
530
+ past_key_values: Cache | None = None,
531
+ use_cache: bool | None = False,
532
+ cache_position: torch.LongTensor | None = None,
533
+ position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
534
+ **kwargs: Unpack[TransformersKwargs],
535
+ ) -> torch.Tensor:
536
+ residual = hidden_states
537
+ hidden_states = self.input_layernorm(hidden_states)
538
+ # Self Attention
539
+ hidden_states, _ = self.self_attn(
540
+ hidden_states=hidden_states,
541
+ attention_mask=attention_mask,
542
+ position_ids=position_ids,
543
+ past_key_values=past_key_values,
544
+ use_cache=use_cache,
545
+ cache_position=cache_position,
546
+ position_embeddings=position_embeddings,
547
+ **kwargs,
548
+ )
549
+ hidden_states = residual + hidden_states
550
+
551
+ # Fully Connected
552
+ residual = hidden_states
553
+ hidden_states = self.post_attention_layernorm(hidden_states)
554
+ hidden_states = self.mlp(hidden_states)
555
+ hidden_states = residual + hidden_states
556
+ return hidden_states
557
+
558
+
559
+ @auto_docstring
560
+ class Glm4MoeLiteSCMPreTrainedModel(PreTrainedModel):
561
+ config: Glm4MoeLiteSCMConfig
562
+ base_model_prefix = "model"
563
+ supports_gradient_checkpointing = True
564
+ _no_split_modules = ["Glm4MoeLiteSCMDecoderLayer"]
565
+ _skip_keys_device_placement = ["past_key_values"]
566
+ _supports_flash_attn = True
567
+ _supports_sdpa = True
568
+ _supports_flex_attn = True
569
+ _can_compile_fullgraph = (
570
+ is_grouped_mm_available()
571
+ ) # https://huggingface.co/docs/transformers/experts_interface#torchcompile
572
+ _supports_attention_backend = True
573
+ _can_record_outputs = {
574
+ "hidden_states": Glm4MoeLiteSCMDecoderLayer,
575
+ "attentions": Glm4MoeLiteSCMAttention,
576
+ }
577
+ _keep_in_fp32_modules_strict = ["e_score_correction_bias"]
578
+
579
+ @torch.no_grad()
580
+ def _init_weights(self, module):
581
+ super()._init_weights(module)
582
+ if isinstance(module, Glm4MoeLiteSCMTopkRouter):
583
+ init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
584
+ init.zeros_(module.e_score_correction_bias)
585
+
586
+
587
+ @auto_docstring
588
+ class Glm4MoeLiteSCMModel(Glm4MoeLiteSCMPreTrainedModel):
589
+ _keys_to_ignore_on_load_unexpected = [r"model\.layers\.47.*"]
590
+
591
+ def __init__(self, config: Glm4MoeLiteSCMConfig):
592
+ super().__init__(config)
593
+ self.padding_idx = config.pad_token_id
594
+ self.vocab_size = config.vocab_size
595
+
596
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
597
+ self.layers = nn.ModuleList(
598
+ [Glm4MoeLiteSCMDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
599
+ )
600
+ self.norm = Glm4MoeLiteSCMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
601
+ self.rotary_emb = Glm4MoeLiteSCMRotaryEmbedding(config=config)
602
+ self.gradient_checkpointing = False
603
+
604
+ # Initialize weights and apply final processing
605
+ self.post_init()
606
+
607
+ @check_model_inputs
608
+ @auto_docstring
609
+ def forward(
610
+ self,
611
+ input_ids: torch.LongTensor | None = None,
612
+ attention_mask: torch.Tensor | None = None,
613
+ position_ids: torch.LongTensor | None = None,
614
+ past_key_values: Cache | None = None,
615
+ inputs_embeds: torch.FloatTensor | None = None,
616
+ cache_position: torch.LongTensor | None = None,
617
+ use_cache: bool | None = None,
618
+ **kwargs: Unpack[TransformersKwargs],
619
+ ) -> BaseModelOutputWithPast:
620
+ if (input_ids is None) ^ (inputs_embeds is not None):
621
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
622
+
623
+ if inputs_embeds is None:
624
+ inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
625
+
626
+ if use_cache and past_key_values is None:
627
+ past_key_values = DynamicCache(config=self.config)
628
+
629
+ if cache_position is None:
630
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
631
+ cache_position: torch.Tensor = (
632
+ torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
633
+ )
634
+
635
+ if position_ids is None:
636
+ position_ids = cache_position.unsqueeze(0)
637
+
638
+ causal_mask = create_causal_mask(
639
+ config=self.config,
640
+ input_embeds=inputs_embeds,
641
+ attention_mask=attention_mask,
642
+ cache_position=cache_position,
643
+ past_key_values=past_key_values,
644
+ position_ids=position_ids,
645
+ )
646
+
647
+ hidden_states = inputs_embeds
648
+ position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
649
+
650
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
651
+ hidden_states = decoder_layer(
652
+ hidden_states,
653
+ attention_mask=causal_mask,
654
+ position_embeddings=position_embeddings,
655
+ position_ids=position_ids,
656
+ past_key_values=past_key_values,
657
+ use_cache=use_cache,
658
+ cache_position=cache_position,
659
+ **kwargs,
660
+ )
661
+
662
+ hidden_states = self.norm(hidden_states)
663
+ return BaseModelOutputWithPast(
664
+ last_hidden_state=hidden_states,
665
+ past_key_values=past_key_values,
666
+ )
667
+
668
+
669
+ @auto_docstring
670
+ class Glm4MoeLiteSCMForCausalLM(Glm4MoeLiteSCMPreTrainedModel, GenerationMixin):
671
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
672
+ _tp_plan = {"lm_head": "colwise_rep"}
673
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
674
+
675
+ def __init__(self, config):
676
+ super().__init__(config)
677
+ self.model = Glm4MoeLiteSCMModel(config)
678
+ self.vocab_size = config.vocab_size
679
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
680
+
681
+ # Initialize weights and apply final processing
682
+ self.post_init()
683
+
684
+ @can_return_tuple
685
+ @auto_docstring
686
+ def forward(
687
+ self,
688
+ input_ids: torch.LongTensor | None = None,
689
+ attention_mask: torch.Tensor | None = None,
690
+ position_ids: torch.LongTensor | None = None,
691
+ past_key_values: Cache | None = None,
692
+ inputs_embeds: torch.FloatTensor | None = None,
693
+ labels: torch.LongTensor | None = None,
694
+ use_cache: bool | None = None,
695
+ cache_position: torch.LongTensor | None = None,
696
+ logits_to_keep: int | torch.Tensor = 0,
697
+ **kwargs: Unpack[TransformersKwargs],
698
+ ) -> CausalLMOutputWithPast:
699
+ r"""
700
+ Example:
701
+
702
+ ```python
703
+ >>> from transformers import AutoTokenizer, Glm4MoeLiteSCMForCausalLM
704
+
705
+ >>> model = Glm4MoeLiteSCMForCausalLM.from_pretrained("meta-glm4_moe_lite/Glm4MoeLiteSCM-2-7b-hf")
706
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-glm4_moe_lite/Glm4MoeLiteSCM-2-7b-hf")
707
+
708
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
709
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
710
+
711
+ >>> # Generate
712
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
713
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
714
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
715
+ ```"""
716
+ outputs: BaseModelOutputWithPast = self.model(
717
+ input_ids=input_ids,
718
+ attention_mask=attention_mask,
719
+ position_ids=position_ids,
720
+ past_key_values=past_key_values,
721
+ inputs_embeds=inputs_embeds,
722
+ use_cache=use_cache,
723
+ cache_position=cache_position,
724
+ **kwargs,
725
+ )
726
+
727
+ hidden_states = outputs.last_hidden_state
728
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
729
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
730
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
731
+
732
+ loss = None
733
+ if labels is not None:
734
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
735
+
736
+ return CausalLMOutputWithPast(
737
+ loss=loss,
738
+ logits=logits,
739
+ past_key_values=outputs.past_key_values,
740
+ hidden_states=outputs.hidden_states,
741
+ attentions=outputs.attentions,
742
+ )
743
+
744
+
745
+ __all__ = ["Glm4MoeLiteSCMPreTrainedModel", "Glm4MoeLiteSCMModel", "Glm4MoeLiteSCMForCausalLM"]
modeling_glm4_moe_lite_scm_liger.py ADDED
@@ -0,0 +1,724 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import math
17
+ from collections.abc import Callable
18
+ from typing import Optional
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+
24
+ from transformers import initialization as init
25
+ from transformers.activations import ACT2FN
26
+ from transformers.cache_utils import Cache, DynamicCache
27
+ from transformers.generation import GenerationMixin
28
+ from transformers.integrations import use_experts_implementation, use_kernel_forward_from_hub, use_kernel_func_from_hub
29
+ from transformers.masking_utils import create_causal_mask
30
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
31
+ from transformers.modeling_layers import GradientCheckpointingLayer
32
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
33
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
34
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
35
+ from transformers.processing_utils import Unpack
36
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple, is_grouped_mm_available
37
+ from transformers.utils.generic import check_model_inputs, is_flash_attention_requested, maybe_autocast
38
+
39
+ import scattermoe
40
+
41
+ from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
42
+
43
+ try:
44
+ from .configuration_glm4_moe_lite_scm import Glm4MoeLiteSCMConfig
45
+ except:
46
+ from configuration_glm4_moe_lite_scm import Glm4MoeLiteSCMConfig
47
+
48
+
49
+ class Glm4MoeLiteSCMRotaryEmbedding(nn.Module):
50
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
51
+
52
+ def __init__(self, config: Glm4MoeLiteSCMConfig, device=None):
53
+ super().__init__()
54
+ self.max_seq_len_cached = config.max_position_embeddings
55
+ self.original_max_seq_len = config.max_position_embeddings
56
+
57
+ self.config = config
58
+
59
+ self.rope_type = self.config.rope_parameters["rope_type"]
60
+ rope_init_fn: Callable = self.compute_default_rope_parameters
61
+ if self.rope_type != "default":
62
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
63
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
64
+
65
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
66
+ self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
67
+
68
+ @staticmethod
69
+ def compute_default_rope_parameters(
70
+ config: Glm4MoeLiteSCMConfig | None = None,
71
+ device: Optional["torch.device"] = None,
72
+ seq_len: int | None = None,
73
+ ) -> tuple["torch.Tensor", float]:
74
+ """
75
+ Computes the inverse frequencies according to the original RoPE implementation
76
+ Args:
77
+ config ([`~transformers.PreTrainedConfig`]):
78
+ The model configuration.
79
+ device (`torch.device`):
80
+ The device to use for initialization of the inverse frequencies.
81
+ seq_len (`int`, *optional*):
82
+ The current sequence length. Unused for this type of RoPE.
83
+ Returns:
84
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
85
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
86
+ """
87
+ base = config.rope_parameters["rope_theta"]
88
+ partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
89
+ head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
90
+ dim = int(head_dim * partial_rotary_factor)
91
+
92
+ attention_factor = 1.0 # Unused in this type of RoPE
93
+
94
+ # Compute the inverse frequencies
95
+ inv_freq = 1.0 / (
96
+ base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
97
+ )
98
+ return inv_freq, attention_factor
99
+
100
+ @torch.no_grad()
101
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
102
+ def forward(self, x, position_ids):
103
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
104
+ position_ids_expanded = position_ids[:, None, :].float()
105
+
106
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
107
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
108
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
109
+ emb = torch.cat((freqs, freqs), dim=-1)
110
+ cos = emb.cos() * self.attention_scaling
111
+ sin = emb.sin() * self.attention_scaling
112
+
113
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
114
+
115
+
116
+ def rotate_half(x):
117
+ """Rotates half the hidden dims of the input."""
118
+ x1 = x[..., : x.shape[-1] // 2]
119
+ x2 = x[..., x.shape[-1] // 2 :]
120
+ return torch.cat((-x2, x1), dim=-1)
121
+
122
+
123
+ @use_kernel_func_from_hub("rotary_pos_emb")
124
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
125
+ """Applies Rotary Position Embedding to the query and key tensors.
126
+
127
+ Args:
128
+ q (`torch.Tensor`): The query tensor.
129
+ k (`torch.Tensor`): The key tensor.
130
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
131
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
132
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
133
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
134
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
135
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
136
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
137
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
138
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
139
+ Returns:
140
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
141
+ """
142
+ cos = cos.unsqueeze(unsqueeze_dim)
143
+ sin = sin.unsqueeze(unsqueeze_dim)
144
+ q_embed = (q * cos) + (rotate_half(q) * sin)
145
+ k_embed = (k * cos) + (rotate_half(k) * sin)
146
+ return q_embed, k_embed
147
+
148
+
149
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
150
+ """
151
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
152
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
153
+ """
154
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
155
+ if n_rep == 1:
156
+ return hidden_states
157
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
158
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
159
+
160
+
161
+ def eager_attention_forward(
162
+ module: nn.Module,
163
+ query: torch.Tensor,
164
+ key: torch.Tensor,
165
+ value: torch.Tensor,
166
+ attention_mask: torch.Tensor | None,
167
+ scaling: float,
168
+ dropout: float = 0.0,
169
+ **kwargs: Unpack[TransformersKwargs],
170
+ ):
171
+ key_states = repeat_kv(key, module.num_key_value_groups)
172
+ value_states = repeat_kv(value, module.num_key_value_groups)
173
+
174
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
175
+ if attention_mask is not None:
176
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
177
+ attn_weights = attn_weights + causal_mask
178
+
179
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
180
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
181
+ attn_output = torch.matmul(attn_weights, value_states)
182
+ attn_output = attn_output.transpose(1, 2).contiguous()
183
+
184
+ return attn_output, attn_weights
185
+
186
+
187
+ def apply_rotary_pos_emb_interleave(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
188
+ r"""
189
+ TODO let's just use the original freqcis computation to not have the view
190
+ transpose + reshape! This is not optimized!
191
+ Applies Rotary Position Embedding to the query and key tensors.
192
+
193
+ Args:
194
+ q (`torch.Tensor`): The query tensor.
195
+ k (`torch.Tensor`): The key tensor.
196
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
197
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
198
+ position_ids (`torch.Tensor`):
199
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
200
+ used to pass offsetted position ids when working with a KV-cache.
201
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
202
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
203
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
204
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
205
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
206
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
207
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
208
+ Returns:
209
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
210
+ """
211
+ cos = cos.unsqueeze(unsqueeze_dim)
212
+ sin = sin.unsqueeze(unsqueeze_dim)
213
+
214
+ b, h, s, d = q.shape
215
+ q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
216
+
217
+ b, h, s, d = k.shape
218
+ k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
219
+
220
+ q_embed = (q * cos) + (rotate_half(q) * sin)
221
+ k_embed = (k * cos) + (rotate_half(k) * sin)
222
+ return q_embed, k_embed
223
+
224
+
225
+ def yarn_get_mscale(scale=1, mscale=1):
226
+ if scale <= 1:
227
+ return 1.0
228
+ return 0.1 * mscale * math.log(scale) + 1.0
229
+
230
+
231
+ class Glm4MoeLiteSCMAttention(nn.Module):
232
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
233
+
234
+ def __init__(self, config: Glm4MoeLiteSCMConfig, layer_idx: int):
235
+ super().__init__()
236
+ self.config = config
237
+ self.layer_idx = layer_idx
238
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
239
+ self.attention_dropout = config.attention_dropout
240
+ self.num_heads = config.num_attention_heads
241
+
242
+ self.q_lora_rank = config.q_lora_rank
243
+ self.qk_rope_head_dim = config.qk_rope_head_dim
244
+ self.kv_lora_rank = config.kv_lora_rank
245
+ self.v_head_dim = config.v_head_dim
246
+ self.qk_nope_head_dim = config.qk_nope_head_dim
247
+ self.qk_head_dim = config.qk_head_dim
248
+
249
+ self.is_causal = True
250
+ if self.q_lora_rank is None:
251
+ self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.qk_head_dim, bias=False)
252
+ else:
253
+ self.q_a_proj = nn.Linear(config.hidden_size, config.q_lora_rank, bias=config.attention_bias)
254
+ self.q_a_layernorm = Glm4MoeLiteSCMRMSNorm(config.q_lora_rank)
255
+ self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False)
256
+
257
+ self.kv_a_proj_with_mqa = nn.Linear(
258
+ config.hidden_size,
259
+ self.kv_lora_rank + self.qk_rope_head_dim,
260
+ bias=config.attention_bias,
261
+ )
262
+ self.kv_a_layernorm = Glm4MoeLiteSCMRMSNorm(self.kv_lora_rank)
263
+ self.kv_b_proj = nn.Linear(
264
+ self.kv_lora_rank,
265
+ self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
266
+ bias=False,
267
+ )
268
+
269
+ self.o_proj = nn.Linear(
270
+ self.num_heads * self.v_head_dim,
271
+ config.hidden_size,
272
+ bias=config.attention_bias,
273
+ )
274
+
275
+ self.scaling = self.qk_head_dim ** (-0.5)
276
+ if self.config.rope_parameters.get("rope_type", "default") != "default":
277
+ mscale_all_dim = self.config.rope_parameters.get("mscale_all_dim", 0)
278
+ scaling_factor = self.config.rope_parameters["factor"]
279
+ if mscale_all_dim:
280
+ mscale = yarn_get_mscale(scaling_factor, mscale_all_dim)
281
+ self.scaling = self.scaling * mscale * mscale
282
+
283
+ def forward(
284
+ self,
285
+ hidden_states: torch.Tensor,
286
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
287
+ attention_mask: torch.Tensor | None,
288
+ past_key_values: Cache | None = None,
289
+ cache_position: torch.LongTensor | None = None,
290
+ **kwargs: Unpack[FlashAttentionKwargs],
291
+ ) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
292
+ batch_size, seq_length = hidden_states.shape[:-1]
293
+ query_shape = (batch_size, seq_length, -1, self.qk_head_dim)
294
+ key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim)
295
+
296
+ if self.q_lora_rank is None:
297
+ q_states = self.q_proj(hidden_states)
298
+ else:
299
+ q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
300
+ q_states = q_states.view(query_shape).transpose(1, 2)
301
+ q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
302
+
303
+ compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
304
+ k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
305
+
306
+ k_pass = self.kv_b_proj(self.kv_a_layernorm(k_pass)).view(key_shape).transpose(1, 2)
307
+ k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
308
+
309
+ k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim)
310
+
311
+ cos, sin = position_embeddings
312
+ if self.config.rope_interleave: # support using interleaved weights for efficiency
313
+ q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin)
314
+ else:
315
+ q_rot, k_rot = apply_rotary_pos_emb(q_rot, k_rot, cos, sin)
316
+ k_rot = k_rot.expand(*k_pass.shape[:-1], -1)
317
+
318
+ query_states = torch.cat((q_pass, q_rot), dim=-1)
319
+ key_states = torch.cat((k_pass, k_rot), dim=-1)
320
+
321
+ if past_key_values is not None:
322
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
323
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
324
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
325
+
326
+ if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
327
+ value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
328
+
329
+ attention_interface: Callable = eager_attention_forward
330
+ if self.config._attn_implementation != "eager":
331
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
332
+
333
+ attn_output, attn_weights = attention_interface(
334
+ self,
335
+ query_states,
336
+ key_states,
337
+ value_states,
338
+ attention_mask,
339
+ dropout=0.0 if not self.training else self.attention_dropout,
340
+ scaling=self.scaling,
341
+ **kwargs,
342
+ )
343
+
344
+ if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
345
+ attn_output = attn_output[:, :, :, : self.v_head_dim]
346
+
347
+ attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
348
+ attn_output = self.o_proj(attn_output)
349
+ return attn_output, attn_weights
350
+
351
+
352
+ class Glm4MoeLiteSCMMLP(nn.Module):
353
+ def __init__(self, config, intermediate_size=None):
354
+ super().__init__()
355
+ self.config = config
356
+ self.hidden_size = config.hidden_size
357
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
358
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
359
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
360
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
361
+ self.act_fn = ACT2FN[config.hidden_act]
362
+
363
+ def forward(self, x):
364
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
365
+ return down_proj
366
+
367
+
368
+ class Glm4MoeLiteSCMTopkRouter(nn.Module):
369
+ def __init__(self, config: Glm4MoeLiteSCMConfig):
370
+ super().__init__()
371
+ self.config = config
372
+ self.top_k = config.num_experts_per_tok
373
+ self.n_routed_experts = config.n_routed_experts
374
+ self.routed_scaling_factor = config.routed_scaling_factor
375
+ self.n_group = config.n_group
376
+ self.topk_group = config.topk_group
377
+ self.norm_topk_prob = config.norm_topk_prob
378
+
379
+ self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size)))
380
+ self.register_buffer("e_score_correction_bias", torch.zeros((self.n_routed_experts), dtype=torch.float32))
381
+
382
+ def forward(self, hidden_states):
383
+ hidden_states = hidden_states.view(-1, self.config.hidden_size)
384
+ router_logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32))
385
+ return router_logits
386
+
387
+
388
+ @use_kernel_forward_from_hub("RMSNorm")
389
+ class Glm4MoeLiteSCMRMSNorm(nn.Module):
390
+ def __init__(self, hidden_size, eps=1e-6):
391
+ """
392
+ Glm4MoeLiteSCMRMSNorm is equivalent to T5LayerNorm
393
+ """
394
+ super().__init__()
395
+ self.weight = nn.Parameter(torch.ones(hidden_size))
396
+ self.variance_epsilon = eps
397
+
398
+ def forward(self, hidden_states):
399
+ input_dtype = hidden_states.dtype
400
+ hidden_states = hidden_states.to(torch.float32)
401
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
402
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
403
+ return self.weight * hidden_states.to(input_dtype)
404
+
405
+ def extra_repr(self):
406
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
407
+
408
+
409
+ class Glm4MoeLiteSCMMoE(nn.Module):
410
+ """
411
+ A mixed expert module containing shared experts.
412
+ """
413
+
414
+ def __init__(self, config):
415
+ super().__init__()
416
+ self.config = config
417
+ self.moe_mlp = scattermoe.mlp.GLUMLP(
418
+ input_size=self.config.hidden_size,
419
+ hidden_size=self.config.moe_intermediate_size,
420
+ num_experts=self.config.n_routed_experts,
421
+ top_k=self.config.num_experts_per_tok,
422
+ activation=ACT2FN[config.hidden_act],
423
+ )
424
+ self.gate = Glm4MoeLiteSCMTopkRouter(config)
425
+ self.shared_experts = Glm4MoeLiteSCMMLP(
426
+ config=config, intermediate_size=config.moe_intermediate_size * config.n_shared_experts
427
+ )
428
+ self.n_routed_experts = config.n_routed_experts
429
+ self.n_group = config.n_group
430
+ self.topk_group = config.topk_group
431
+ self.norm_topk_prob = config.norm_topk_prob
432
+ self.routed_scaling_factor = config.routed_scaling_factor
433
+ self.top_k = config.num_experts_per_tok
434
+
435
+ def route_tokens_to_experts(self, router_logits):
436
+ router_logits = router_logits.sigmoid()
437
+ router_logits_for_choice = router_logits + self.gate.e_score_correction_bias
438
+ group_scores = (
439
+ router_logits_for_choice.view(-1, self.n_group, self.n_routed_experts // self.n_group)
440
+ .topk(2, dim=-1)[0]
441
+ .sum(dim=-1)
442
+ )
443
+ group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
444
+ group_mask = torch.zeros_like(group_scores)
445
+ group_mask.scatter_(1, group_idx, 1)
446
+ score_mask = (
447
+ group_mask.unsqueeze(-1)
448
+ .expand(-1, self.n_group, self.n_routed_experts // self.n_group)
449
+ .reshape(-1, self.n_routed_experts)
450
+ )
451
+ scores_for_choice = router_logits_for_choice.masked_fill(~score_mask.bool(), 0.0)
452
+ topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1]
453
+ topk_weights = router_logits.gather(1, topk_indices)
454
+ if self.norm_topk_prob:
455
+ denominator = topk_weights.sum(dim=-1, keepdim=True) + 1e-20
456
+ topk_weights /= denominator
457
+ topk_weights = topk_weights * self.routed_scaling_factor
458
+ return topk_indices, topk_weights
459
+
460
+ def forward(self, hidden_states):
461
+ residuals = hidden_states
462
+ orig_shape = hidden_states.shape
463
+ router_logits = self.gate(hidden_states)
464
+ topk_indices, topk_weights = self.route_tokens_to_experts(router_logits)
465
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
466
+
467
+ hidden_states = self.moe_mlp(hidden_states, topk_weights.to(torch.bfloat16), topk_indices).view(*orig_shape)
468
+
469
+ hidden_states = hidden_states + self.shared_experts(residuals)
470
+ return hidden_states
471
+
472
+
473
+ class Glm4MoeLiteSCMDecoderLayer(GradientCheckpointingLayer):
474
+ def __init__(self, config: Glm4MoeLiteSCMConfig, layer_idx: int):
475
+ super().__init__()
476
+ self.hidden_size = config.hidden_size
477
+ self.self_attn = Glm4MoeLiteSCMAttention(config, layer_idx)
478
+
479
+ if config.mlp_layer_types[layer_idx] == "sparse":
480
+ self.mlp = Glm4MoeLiteSCMMoE(config)
481
+ else:
482
+ self.mlp = Glm4MoeLiteSCMMLP(config)
483
+
484
+ self.input_layernorm = Glm4MoeLiteSCMRMSNorm(config.hidden_size, config.rms_norm_eps)
485
+ self.post_attention_layernorm = Glm4MoeLiteSCMRMSNorm(config.hidden_size, config.rms_norm_eps)
486
+
487
+ def forward(
488
+ self,
489
+ hidden_states: torch.Tensor,
490
+ attention_mask: torch.Tensor | None = None,
491
+ position_ids: torch.LongTensor | None = None,
492
+ past_key_values: Cache | None = None,
493
+ use_cache: bool | None = False,
494
+ cache_position: torch.LongTensor | None = None,
495
+ position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
496
+ **kwargs: Unpack[TransformersKwargs],
497
+ ) -> torch.Tensor:
498
+ residual = hidden_states
499
+ hidden_states = self.input_layernorm(hidden_states)
500
+ # Self Attention
501
+ hidden_states, _ = self.self_attn(
502
+ hidden_states=hidden_states,
503
+ attention_mask=attention_mask,
504
+ position_ids=position_ids,
505
+ past_key_values=past_key_values,
506
+ use_cache=use_cache,
507
+ cache_position=cache_position,
508
+ position_embeddings=position_embeddings,
509
+ **kwargs,
510
+ )
511
+ hidden_states = residual + hidden_states
512
+
513
+ # Fully Connected
514
+ residual = hidden_states
515
+ hidden_states = self.post_attention_layernorm(hidden_states)
516
+ hidden_states = self.mlp(hidden_states)
517
+ hidden_states = residual + hidden_states
518
+ return hidden_states
519
+
520
+
521
+ @auto_docstring
522
+ class Glm4MoeLiteSCMPreTrainedModel(PreTrainedModel):
523
+ config: Glm4MoeLiteSCMConfig
524
+ base_model_prefix = "model"
525
+ supports_gradient_checkpointing = True
526
+ _no_split_modules = ["Glm4MoeLiteSCMDecoderLayer"]
527
+ _skip_keys_device_placement = ["past_key_values"]
528
+ _supports_flash_attn = True
529
+ _supports_sdpa = True
530
+ _supports_flex_attn = True
531
+ _can_compile_fullgraph = (
532
+ is_grouped_mm_available()
533
+ ) # https://huggingface.co/docs/transformers/experts_interface#torchcompile
534
+ _supports_attention_backend = True
535
+ _can_record_outputs = {
536
+ "hidden_states": Glm4MoeLiteSCMDecoderLayer,
537
+ "attentions": Glm4MoeLiteSCMAttention,
538
+ }
539
+ _keep_in_fp32_modules_strict = ["e_score_correction_bias"]
540
+
541
+ @torch.no_grad()
542
+ def _init_weights(self, module):
543
+ super()._init_weights(module)
544
+ if isinstance(module, Glm4MoeLiteSCMTopkRouter):
545
+ init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
546
+ init.zeros_(module.e_score_correction_bias)
547
+
548
+
549
+ @auto_docstring
550
+ class Glm4MoeLiteSCMModel(Glm4MoeLiteSCMPreTrainedModel):
551
+ _keys_to_ignore_on_load_unexpected = [r"model\.layers\.47.*"]
552
+
553
+ def __init__(self, config: Glm4MoeLiteSCMConfig):
554
+ super().__init__(config)
555
+ self.padding_idx = config.pad_token_id
556
+ self.vocab_size = config.vocab_size
557
+
558
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
559
+ self.layers = nn.ModuleList(
560
+ [Glm4MoeLiteSCMDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
561
+ )
562
+ self.norm = Glm4MoeLiteSCMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
563
+ self.rotary_emb = Glm4MoeLiteSCMRotaryEmbedding(config=config)
564
+ self.gradient_checkpointing = False
565
+
566
+ # Initialize weights and apply final processing
567
+ self.post_init()
568
+
569
+ @check_model_inputs
570
+ @auto_docstring
571
+ def forward(
572
+ self,
573
+ input_ids: torch.LongTensor | None = None,
574
+ attention_mask: torch.Tensor | None = None,
575
+ position_ids: torch.LongTensor | None = None,
576
+ past_key_values: Cache | None = None,
577
+ inputs_embeds: torch.FloatTensor | None = None,
578
+ cache_position: torch.LongTensor | None = None,
579
+ use_cache: bool | None = None,
580
+ **kwargs: Unpack[TransformersKwargs],
581
+ ) -> BaseModelOutputWithPast:
582
+ if (input_ids is None) ^ (inputs_embeds is not None):
583
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
584
+
585
+ if inputs_embeds is None:
586
+ inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
587
+
588
+ if use_cache and past_key_values is None:
589
+ past_key_values = DynamicCache(config=self.config)
590
+
591
+ if cache_position is None:
592
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
593
+ cache_position: torch.Tensor = (
594
+ torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
595
+ )
596
+
597
+ if position_ids is None:
598
+ position_ids = cache_position.unsqueeze(0)
599
+
600
+ causal_mask = create_causal_mask(
601
+ config=self.config,
602
+ input_embeds=inputs_embeds,
603
+ attention_mask=attention_mask,
604
+ cache_position=cache_position,
605
+ past_key_values=past_key_values,
606
+ position_ids=position_ids,
607
+ )
608
+
609
+ hidden_states = inputs_embeds
610
+ position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
611
+
612
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
613
+ hidden_states = decoder_layer(
614
+ hidden_states,
615
+ attention_mask=causal_mask,
616
+ position_embeddings=position_embeddings,
617
+ position_ids=position_ids,
618
+ past_key_values=past_key_values,
619
+ use_cache=use_cache,
620
+ cache_position=cache_position,
621
+ **kwargs,
622
+ )
623
+
624
+ hidden_states = self.norm(hidden_states)
625
+ return BaseModelOutputWithPast(
626
+ last_hidden_state=hidden_states,
627
+ past_key_values=past_key_values,
628
+ )
629
+
630
+
631
+ @auto_docstring
632
+ class Glm4MoeLiteSCMForCausalLM(Glm4MoeLiteSCMPreTrainedModel, GenerationMixin):
633
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
634
+ _tp_plan = {"lm_head": "colwise_rep"}
635
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
636
+
637
+ def __init__(self, config):
638
+ super().__init__(config)
639
+ self.model = Glm4MoeLiteSCMModel(config)
640
+ self.vocab_size = config.vocab_size
641
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
642
+
643
+ # Initialize weights and apply final processing
644
+ self.post_init()
645
+
646
+ @can_return_tuple
647
+ @auto_docstring
648
+ def forward(
649
+ self,
650
+ input_ids: torch.LongTensor | None = None,
651
+ attention_mask: torch.Tensor | None = None,
652
+ position_ids: torch.LongTensor | None = None,
653
+ past_key_values: Cache | None = None,
654
+ inputs_embeds: torch.FloatTensor | None = None,
655
+ labels: torch.LongTensor | None = None,
656
+ use_cache: bool | None = None,
657
+ cache_position: torch.LongTensor | None = None,
658
+ logits_to_keep: int | torch.Tensor = 0,
659
+ **kwargs: Unpack[TransformersKwargs],
660
+ ) -> CausalLMOutputWithPast:
661
+ r"""
662
+ Example:
663
+
664
+ ```python
665
+ >>> from transformers import AutoTokenizer, Glm4MoeLiteSCMForCausalLM
666
+
667
+ >>> model = Glm4MoeLiteSCMForCausalLM.from_pretrained("meta-glm4_moe_lite/Glm4MoeLiteSCM-2-7b-hf")
668
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-glm4_moe_lite/Glm4MoeLiteSCM-2-7b-hf")
669
+
670
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
671
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
672
+
673
+ >>> # Generate
674
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
675
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
676
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
677
+ ```"""
678
+ outputs: BaseModelOutputWithPast = self.model(
679
+ input_ids=input_ids,
680
+ attention_mask=attention_mask,
681
+ position_ids=position_ids,
682
+ past_key_values=past_key_values,
683
+ inputs_embeds=inputs_embeds,
684
+ use_cache=use_cache,
685
+ cache_position=cache_position,
686
+ **kwargs,
687
+ )
688
+
689
+ hidden_states = outputs.last_hidden_state
690
+
691
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
692
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
693
+ kept_hidden_states = hidden_states[:, slice_indices, :]
694
+ shift_labels = kwargs.pop("shift_labels", None)
695
+ logits = None
696
+
697
+ loss = None
698
+
699
+ skip_logits = self.training and (labels is not None or shift_labels is not None)
700
+ if skip_logits:
701
+ loss = LigerForCausalLMLoss(
702
+ hidden_states=kept_hidden_states,
703
+ lm_head_weight=self.lm_head.weight,
704
+ labels=labels,
705
+ shift_labels=shift_labels,
706
+ hidden_size=self.config.hidden_size,
707
+ **kwargs,
708
+ )
709
+ else:
710
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
711
+
712
+ if labels is not None:
713
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
714
+
715
+ return CausalLMOutputWithPast(
716
+ loss=loss,
717
+ logits=logits,
718
+ past_key_values=outputs.past_key_values,
719
+ hidden_states=outputs.hidden_states,
720
+ attentions=outputs.attentions,
721
+ )
722
+
723
+
724
+ __all__ = ["Glm4MoeLiteSCMPreTrainedModel", "Glm4MoeLiteSCMModel", "Glm4MoeLiteSCMForCausalLM"]
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19e773648cb4e65de8660ea6365e10acca112d42a854923df93db4a6f333a82d
3
+ size 20217442
tokenizer_config.json ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "154820": {
4
+ "content": "<|endoftext|>",
5
+ "single_word": false,
6
+ "lstrip": false,
7
+ "rstrip": false,
8
+ "normalized": false,
9
+ "special": true
10
+ },
11
+ "154821": {
12
+ "content": "[MASK]",
13
+ "single_word": false,
14
+ "lstrip": false,
15
+ "rstrip": false,
16
+ "normalized": false,
17
+ "special": true
18
+ },
19
+ "154822": {
20
+ "content": "[gMASK]",
21
+ "single_word": false,
22
+ "lstrip": false,
23
+ "rstrip": false,
24
+ "normalized": false,
25
+ "special": true
26
+ },
27
+ "154823": {
28
+ "content": "[sMASK]",
29
+ "single_word": false,
30
+ "lstrip": false,
31
+ "rstrip": false,
32
+ "normalized": false,
33
+ "special": true
34
+ },
35
+ "154824": {
36
+ "content": "<sop>",
37
+ "single_word": false,
38
+ "lstrip": false,
39
+ "rstrip": false,
40
+ "normalized": false,
41
+ "special": true
42
+ },
43
+ "154825": {
44
+ "content": "<eop>",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ },
51
+ "154826": {
52
+ "content": "<|system|>",
53
+ "single_word": false,
54
+ "lstrip": false,
55
+ "rstrip": false,
56
+ "normalized": false,
57
+ "special": true
58
+ },
59
+ "154827": {
60
+ "content": "<|user|>",
61
+ "single_word": false,
62
+ "lstrip": false,
63
+ "rstrip": false,
64
+ "normalized": false,
65
+ "special": true
66
+ },
67
+ "154828": {
68
+ "content": "<|assistant|>",
69
+ "single_word": false,
70
+ "lstrip": false,
71
+ "rstrip": false,
72
+ "normalized": false,
73
+ "special": true
74
+ },
75
+ "154829": {
76
+ "content": "<|observation|>",
77
+ "single_word": false,
78
+ "lstrip": false,
79
+ "rstrip": false,
80
+ "normalized": false,
81
+ "special": true
82
+ },
83
+ "154830": {
84
+ "content": "<|begin_of_image|>",
85
+ "single_word": false,
86
+ "lstrip": false,
87
+ "rstrip": false,
88
+ "normalized": false,
89
+ "special": true
90
+ },
91
+ "154831": {
92
+ "content": "<|end_of_image|>",
93
+ "single_word": false,
94
+ "lstrip": false,
95
+ "rstrip": false,
96
+ "normalized": false,
97
+ "special": true
98
+ },
99
+ "154832": {
100
+ "content": "<|begin_of_video|>",
101
+ "single_word": false,
102
+ "lstrip": false,
103
+ "rstrip": false,
104
+ "normalized": false,
105
+ "special": true
106
+ },
107
+ "154833": {
108
+ "content": "<|end_of_video|>",
109
+ "single_word": false,
110
+ "lstrip": false,
111
+ "rstrip": false,
112
+ "normalized": false,
113
+ "special": true
114
+ },
115
+ "154834": {
116
+ "content": "<|begin_of_audio|>",
117
+ "single_word": false,
118
+ "lstrip": false,
119
+ "rstrip": false,
120
+ "normalized": false,
121
+ "special": true
122
+ },
123
+ "154835": {
124
+ "content": "<|end_of_audio|>",
125
+ "single_word": false,
126
+ "lstrip": false,
127
+ "rstrip": false,
128
+ "normalized": false,
129
+ "special": true
130
+ },
131
+ "154836": {
132
+ "content": "<|begin_of_transcription|>",
133
+ "single_word": false,
134
+ "lstrip": false,
135
+ "rstrip": false,
136
+ "normalized": false,
137
+ "special": true
138
+ },
139
+ "154837": {
140
+ "content": "<|end_of_transcription|>",
141
+ "single_word": false,
142
+ "lstrip": false,
143
+ "rstrip": false,
144
+ "normalized": false,
145
+ "special": true
146
+ },
147
+ "154838": {
148
+ "content": "<|code_prefix|>",
149
+ "single_word": false,
150
+ "lstrip": false,
151
+ "rstrip": false,
152
+ "normalized": false,
153
+ "special": false
154
+ },
155
+ "154839": {
156
+ "content": "<|code_middle|>",
157
+ "single_word": false,
158
+ "lstrip": false,
159
+ "rstrip": false,
160
+ "normalized": false,
161
+ "special": false
162
+ },
163
+ "154840": {
164
+ "content": "<|code_suffix|>",
165
+ "single_word": false,
166
+ "lstrip": false,
167
+ "rstrip": false,
168
+ "normalized": false,
169
+ "special": false
170
+ },
171
+ "154841": {
172
+ "content": "<think>",
173
+ "single_word": false,
174
+ "lstrip": false,
175
+ "rstrip": false,
176
+ "normalized": false,
177
+ "special": false
178
+ },
179
+ "154842": {
180
+ "content": "</think>",
181
+ "single_word": false,
182
+ "lstrip": false,
183
+ "rstrip": false,
184
+ "normalized": false,
185
+ "special": false
186
+ },
187
+ "154843": {
188
+ "content": "<tool_call>",
189
+ "single_word": false,
190
+ "lstrip": false,
191
+ "rstrip": false,
192
+ "normalized": false,
193
+ "special": false
194
+ },
195
+ "154844": {
196
+ "content": "</tool_call>",
197
+ "single_word": false,
198
+ "lstrip": false,
199
+ "rstrip": false,
200
+ "normalized": false,
201
+ "special": false
202
+ },
203
+ "154845": {
204
+ "content": "<tool_response>",
205
+ "single_word": false,
206
+ "lstrip": false,
207
+ "rstrip": false,
208
+ "normalized": false,
209
+ "special": false
210
+ },
211
+ "154846": {
212
+ "content": "</tool_response>",
213
+ "single_word": false,
214
+ "lstrip": false,
215
+ "rstrip": false,
216
+ "normalized": false,
217
+ "special": false
218
+ },
219
+ "154847": {
220
+ "content": "<arg_key>",
221
+ "single_word": false,
222
+ "lstrip": false,
223
+ "rstrip": false,
224
+ "normalized": false,
225
+ "special": false
226
+ },
227
+ "154848": {
228
+ "content": "</arg_key>",
229
+ "single_word": false,
230
+ "lstrip": false,
231
+ "rstrip": false,
232
+ "normalized": false,
233
+ "special": false
234
+ },
235
+ "154849": {
236
+ "content": "<arg_value>",
237
+ "single_word": false,
238
+ "lstrip": false,
239
+ "rstrip": false,
240
+ "normalized": false,
241
+ "special": false
242
+ },
243
+ "154850": {
244
+ "content": "</arg_value>",
245
+ "single_word": false,
246
+ "lstrip": false,
247
+ "rstrip": false,
248
+ "normalized": false,
249
+ "special": false
250
+ },
251
+ "154851": {
252
+ "content": "/nothink",
253
+ "single_word": false,
254
+ "lstrip": false,
255
+ "rstrip": false,
256
+ "normalized": false,
257
+ "special": false
258
+ },
259
+ "154852": {
260
+ "content": "<|begin_of_box|>",
261
+ "single_word": false,
262
+ "lstrip": false,
263
+ "rstrip": false,
264
+ "normalized": false,
265
+ "special": false
266
+ },
267
+ "154853": {
268
+ "content": "<|end_of_box|>",
269
+ "single_word": false,
270
+ "lstrip": false,
271
+ "rstrip": false,
272
+ "normalized": false,
273
+ "special": false
274
+ },
275
+ "154854": {
276
+ "content": "<|image|>",
277
+ "single_word": false,
278
+ "lstrip": false,
279
+ "rstrip": false,
280
+ "normalized": false,
281
+ "special": false
282
+ },
283
+ "154855": {
284
+ "content": "<|video|>",
285
+ "single_word": false,
286
+ "lstrip": false,
287
+ "rstrip": false,
288
+ "normalized": false,
289
+ "special": false
290
+ }
291
+ },
292
+ "additional_special_tokens": [
293
+ "<|endoftext|>",
294
+ "[MASK]",
295
+ "[gMASK]",
296
+ "[sMASK]",
297
+ "<sop>",
298
+ "<eop>",
299
+ "<|system|>",
300
+ "<|user|>",
301
+ "<|assistant|>",
302
+ "<|observation|>",
303
+ "<|begin_of_image|>",
304
+ "<|end_of_image|>",
305
+ "<|begin_of_video|>",
306
+ "<|end_of_video|>",
307
+ "<|begin_of_audio|>",
308
+ "<|end_of_audio|>",
309
+ "<|begin_of_transcription|>",
310
+ "<|end_of_transcription|>"
311
+ ],
312
+ "clean_up_tokenization_spaces": false,
313
+ "do_lower_case": false,
314
+ "eos_token": "<|endoftext|>",
315
+ "extra_special_tokens": {},
316
+ "model_max_length": 128000,
317
+ "pad_token": "<|endoftext|>",
318
+ "padding_side": "left",
319
+ "remove_space": false,
320
+ "tokenizer_class": "PreTrainedTokenizer"
321
+ }