shieldstackllc commited on
Commit
68ad38c
·
verified ·
1 Parent(s): ed28b36

Upload MLX model files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
chat_template.jinja ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# ----------‑‑‑ special token variables ‑‑‑---------- #}
2
+ {%- set toolcall_begin_token = '<minimax:tool_call>' -%}
3
+ {%- set toolcall_end_token = '</minimax:tool_call>' -%}
4
+ {#- Tool Rendering Functions ============================================== -#}
5
+ {%- macro render_tool_namespace(namespace_name, tool_list) -%}
6
+ {%- for tool in tool_list -%}
7
+ <tool>{{ tool.function | tojson(ensure_ascii=False) }}</tool>
8
+ {% endfor -%}
9
+ {%- endmacro -%}
10
+ {%- macro visible_text(content) -%}
11
+ {%- if content is string -%}
12
+ {{ content }}
13
+ {%- elif content is iterable and content is not mapping -%}
14
+ {%- for item in content -%}
15
+ {%- if item is mapping and item.type == 'text' -%}
16
+ {{- item.text }}
17
+ {%- elif item is string -%}
18
+ {{- item }}
19
+ {%- endif -%}
20
+ {%- endfor -%}
21
+ {%- else -%}
22
+ {{- content }}
23
+ {%- endif -%}
24
+ {%- endmacro -%}
25
+ {#- System Message Construction ============================================ -#}
26
+ {%- macro build_system_message(system_message) -%}
27
+ {%- if system_message and system_message.content -%}
28
+ {{- visible_text(system_message.content) }}
29
+ {%- else -%}
30
+ {%- if model_identity is not defined -%}
31
+ {%- set model_identity = "You are a helpful assistant. Your name is MiniMax-M2.5 and is built by MiniMax." -%}
32
+ {%- endif -%}
33
+ {{- model_identity }}
34
+ {%- endif -%}
35
+
36
+ {#- Handle current_date -#}
37
+ {%- if system_message and system_message.current_date -%}
38
+ {{- '\n' ~ 'Current date: ' + system_message.current_date }}
39
+ {%- endif -%}
40
+ {#- Handle current_location -#}
41
+ {%- if system_message and system_message.current_location -%}
42
+ {{- '\n' ~ 'Current location: ' + system_message.current_location }}
43
+ {%- endif -%}
44
+ {%- endmacro -%}
45
+ {#- Main Template Logic ================================================= -#}
46
+ {#- Extract system message (only first message if it's system) -#}
47
+ {%- set system_message = none -%}
48
+ {%- set conversation_messages = messages -%}
49
+ {%- if messages and messages[0].role == "system" -%}
50
+ {%- set system_message = messages[0] -%}
51
+ {%- set conversation_messages = messages[1:] -%}
52
+ {%- endif -%}
53
+ {#- Get the last user message turn, for interleved thinking -#}
54
+ {%- set ns = namespace(last_user_index=-1) %}
55
+ {% for m in conversation_messages %}
56
+ {%- if m.role == 'user' %}
57
+ {% set ns.last_user_index = loop.index0 -%}
58
+ {%- endif %}
59
+ {%- endfor %}
60
+ {#- Render system message -#}
61
+ {{- ']~!b[' ~ ']~b]system' ~ '\n' }}
62
+ {{- build_system_message(system_message) }}
63
+ {#- Render tools if available -#}
64
+ {%- if tools -%}
65
+ {{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
66
+ {{- '\n' ~ '<tools>' ~ '\n' }}
67
+ {{- render_tool_namespace("functions", tools) }}
68
+ {{- '</tools>' ~ '\n\n' }}
69
+ {{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
70
+ {{- '\n' ~ toolcall_begin_token }}
71
+ <invoke name="tool-name-1">
72
+ <parameter name="param-key-1">param-value-1</parameter>
73
+ <parameter name="param-key-2">param-value-2</parameter>
74
+ ...
75
+ </invoke>
76
+ {{- '\n' ~ toolcall_end_token }}
77
+ {%- endif -%}
78
+ {{- '[e~[\n' }}
79
+
80
+ {#- Render messages -#}
81
+ {%- set last_tool_call = namespace(name=none) -%}
82
+ {%- for message in conversation_messages -%}
83
+ {%- if message.role == 'assistant' -%}
84
+ {#- Only render reasoning_content if no user message follows -#}
85
+ {{- ']~b]ai' ~ '\n' }}
86
+
87
+ {%- set reasoning_content = '' %}
88
+ {%- set content = visible_text(message.content) %}
89
+ {%- if message.reasoning_content is string %}
90
+ {%- set reasoning_content = message.reasoning_content %}
91
+ {%- else %}
92
+ {%- if '</think>' in content %}
93
+ {%- set reasoning_content = content.split('</think>')[0].strip('\n').split('<think>')[-1].strip('\n') %}
94
+ {%- set content = content.split('</think>')[-1].strip('\n') %}
95
+ {%- endif %}
96
+ {%- endif %}
97
+ {%- if reasoning_content and loop.index0 > ns.last_user_index -%}
98
+ {{- '<think>' ~ '\n' ~ reasoning_content ~ '\n' ~ '</think>' ~ '\n\n' }}
99
+ {%- endif -%}
100
+ {%- if content -%}
101
+ {{- content }}
102
+ {%- endif -%}
103
+ {%- if message.tool_calls -%}
104
+ {{- '\n' ~ toolcall_begin_token ~ '\n' }}
105
+
106
+ {%- for tool_call in message.tool_calls -%}
107
+ {%- if tool_call.function %}
108
+ {%- set tool_call = tool_call.function %}
109
+ {%- endif %}
110
+ {{- '<invoke name="' + tool_call.name + '">' }}
111
+ {% set _args = tool_call.arguments %}
112
+ {%- for k, v in _args.items() %}
113
+ {{- '<parameter name="' + k + '">' }}
114
+ {{- v | tojson(ensure_ascii=False) if v is not string else v }}
115
+ {{- '</parameter>' }}
116
+ {% endfor %}
117
+ {{- '</invoke>' ~ '\n' }}
118
+ {%- endfor -%}
119
+
120
+ {{- toolcall_end_token}}
121
+ {%- set last_tool_call.name = message.tool_calls[-1].name -%}
122
+ {%- else -%}
123
+ {%- set last_tool_call.name = none -%}
124
+ {%- endif -%}
125
+ {{- '[e~[' ~ '\n' }}
126
+
127
+ {%- elif message.role == 'tool' -%}
128
+ {%- if last_tool_call.name is none -%}
129
+ {{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
130
+ {%- endif -%}
131
+ {%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
132
+ {{- ']~b]tool' }}
133
+ {%- endif -%}
134
+ {%- if message.content is string -%}
135
+ {{- '\n<response>' }}
136
+ {{- message.content }}
137
+ {{- '</response>' }}
138
+ {%- else -%}
139
+ {%- for tr in message.content -%}
140
+ {{- '\n<response>' }}
141
+ {{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
142
+ {{- '\n</response>' }}
143
+ {%- endfor -%}
144
+ {%- endif -%}
145
+ {%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
146
+ {{- '[e~[\n' -}}
147
+ {%- endif -%}
148
+
149
+ {%- elif message.role == 'user' -%}
150
+ {{- ']~b]user' ~ '\n' }}
151
+ {{- visible_text(message.content) }}
152
+ {{- '[e~[' ~ '\n' }}
153
+ {%- endif -%}
154
+ {%- endfor -%}
155
+
156
+ {#- Generation prompt -#}
157
+ {%- if add_generation_prompt -%}
158
+ {{- ']~b]ai' ~ '\n' ~ '<think>' ~ '\n' }}
159
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MiniMaxM2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "attn_type_list": [
7
+ 1,
8
+ 1,
9
+ 1,
10
+ 1,
11
+ 1,
12
+ 1,
13
+ 1,
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 1,
18
+ 1,
19
+ 1,
20
+ 1,
21
+ 1,
22
+ 1,
23
+ 1,
24
+ 1,
25
+ 1,
26
+ 1,
27
+ 1,
28
+ 1,
29
+ 1,
30
+ 1,
31
+ 1,
32
+ 1,
33
+ 1,
34
+ 1,
35
+ 1,
36
+ 1,
37
+ 1,
38
+ 1,
39
+ 1,
40
+ 1,
41
+ 1,
42
+ 1,
43
+ 1,
44
+ 1,
45
+ 1,
46
+ 1,
47
+ 1,
48
+ 1,
49
+ 1,
50
+ 1,
51
+ 1,
52
+ 1,
53
+ 1,
54
+ 1,
55
+ 1,
56
+ 1,
57
+ 1,
58
+ 1,
59
+ 1,
60
+ 1,
61
+ 1,
62
+ 1,
63
+ 1,
64
+ 1,
65
+ 1,
66
+ 1,
67
+ 1,
68
+ 1
69
+ ],
70
+ "auto_map": {
71
+ "AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
72
+ "AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
73
+ },
74
+ "bos_token_id": 1,
75
+ "eos_token_id": 200020,
76
+ "head_dim": 128,
77
+ "hidden_act": "silu",
78
+ "hidden_size": 3072,
79
+ "initializer_range": 0.02,
80
+ "intermediate_size": 1536,
81
+ "max_position_embeddings": 196608,
82
+ "model_type": "minimax_m2",
83
+ "mtp_transformer_layers": 1,
84
+ "num_attention_heads": 48,
85
+ "num_experts_per_tok": 8,
86
+ "num_hidden_layers": 62,
87
+ "num_key_value_heads": 8,
88
+ "num_local_experts": 180,
89
+ "num_mtp_modules": 3,
90
+ "output_router_logits": false,
91
+ "partial_rotary_factor": 0.5,
92
+ "qk_norm_type": "per_layer",
93
+ "quantization": {
94
+ "group_size": 64,
95
+ "bits": 4,
96
+ "mode": "affine",
97
+ "model.layers.0.block_sparse_moe.gate": {
98
+ "group_size": 64,
99
+ "bits": 8
100
+ },
101
+ "model.layers.1.block_sparse_moe.gate": {
102
+ "group_size": 64,
103
+ "bits": 8
104
+ },
105
+ "model.layers.2.block_sparse_moe.gate": {
106
+ "group_size": 64,
107
+ "bits": 8
108
+ },
109
+ "model.layers.3.block_sparse_moe.gate": {
110
+ "group_size": 64,
111
+ "bits": 8
112
+ },
113
+ "model.layers.4.block_sparse_moe.gate": {
114
+ "group_size": 64,
115
+ "bits": 8
116
+ },
117
+ "model.layers.5.block_sparse_moe.gate": {
118
+ "group_size": 64,
119
+ "bits": 8
120
+ },
121
+ "model.layers.6.block_sparse_moe.gate": {
122
+ "group_size": 64,
123
+ "bits": 8
124
+ },
125
+ "model.layers.7.block_sparse_moe.gate": {
126
+ "group_size": 64,
127
+ "bits": 8
128
+ },
129
+ "model.layers.8.block_sparse_moe.gate": {
130
+ "group_size": 64,
131
+ "bits": 8
132
+ },
133
+ "model.layers.9.block_sparse_moe.gate": {
134
+ "group_size": 64,
135
+ "bits": 8
136
+ },
137
+ "model.layers.10.block_sparse_moe.gate": {
138
+ "group_size": 64,
139
+ "bits": 8
140
+ },
141
+ "model.layers.11.block_sparse_moe.gate": {
142
+ "group_size": 64,
143
+ "bits": 8
144
+ },
145
+ "model.layers.12.block_sparse_moe.gate": {
146
+ "group_size": 64,
147
+ "bits": 8
148
+ },
149
+ "model.layers.13.block_sparse_moe.gate": {
150
+ "group_size": 64,
151
+ "bits": 8
152
+ },
153
+ "model.layers.14.block_sparse_moe.gate": {
154
+ "group_size": 64,
155
+ "bits": 8
156
+ },
157
+ "model.layers.15.block_sparse_moe.gate": {
158
+ "group_size": 64,
159
+ "bits": 8
160
+ },
161
+ "model.layers.16.block_sparse_moe.gate": {
162
+ "group_size": 64,
163
+ "bits": 8
164
+ },
165
+ "model.layers.17.block_sparse_moe.gate": {
166
+ "group_size": 64,
167
+ "bits": 8
168
+ },
169
+ "model.layers.18.block_sparse_moe.gate": {
170
+ "group_size": 64,
171
+ "bits": 8
172
+ },
173
+ "model.layers.19.block_sparse_moe.gate": {
174
+ "group_size": 64,
175
+ "bits": 8
176
+ },
177
+ "model.layers.20.block_sparse_moe.gate": {
178
+ "group_size": 64,
179
+ "bits": 8
180
+ },
181
+ "model.layers.21.block_sparse_moe.gate": {
182
+ "group_size": 64,
183
+ "bits": 8
184
+ },
185
+ "model.layers.22.block_sparse_moe.gate": {
186
+ "group_size": 64,
187
+ "bits": 8
188
+ },
189
+ "model.layers.23.block_sparse_moe.gate": {
190
+ "group_size": 64,
191
+ "bits": 8
192
+ },
193
+ "model.layers.24.block_sparse_moe.gate": {
194
+ "group_size": 64,
195
+ "bits": 8
196
+ },
197
+ "model.layers.25.block_sparse_moe.gate": {
198
+ "group_size": 64,
199
+ "bits": 8
200
+ },
201
+ "model.layers.26.block_sparse_moe.gate": {
202
+ "group_size": 64,
203
+ "bits": 8
204
+ },
205
+ "model.layers.27.block_sparse_moe.gate": {
206
+ "group_size": 64,
207
+ "bits": 8
208
+ },
209
+ "model.layers.28.block_sparse_moe.gate": {
210
+ "group_size": 64,
211
+ "bits": 8
212
+ },
213
+ "model.layers.29.block_sparse_moe.gate": {
214
+ "group_size": 64,
215
+ "bits": 8
216
+ },
217
+ "model.layers.30.block_sparse_moe.gate": {
218
+ "group_size": 64,
219
+ "bits": 8
220
+ },
221
+ "model.layers.31.block_sparse_moe.gate": {
222
+ "group_size": 64,
223
+ "bits": 8
224
+ },
225
+ "model.layers.32.block_sparse_moe.gate": {
226
+ "group_size": 64,
227
+ "bits": 8
228
+ },
229
+ "model.layers.33.block_sparse_moe.gate": {
230
+ "group_size": 64,
231
+ "bits": 8
232
+ },
233
+ "model.layers.34.block_sparse_moe.gate": {
234
+ "group_size": 64,
235
+ "bits": 8
236
+ },
237
+ "model.layers.35.block_sparse_moe.gate": {
238
+ "group_size": 64,
239
+ "bits": 8
240
+ },
241
+ "model.layers.36.block_sparse_moe.gate": {
242
+ "group_size": 64,
243
+ "bits": 8
244
+ },
245
+ "model.layers.37.block_sparse_moe.gate": {
246
+ "group_size": 64,
247
+ "bits": 8
248
+ },
249
+ "model.layers.38.block_sparse_moe.gate": {
250
+ "group_size": 64,
251
+ "bits": 8
252
+ },
253
+ "model.layers.39.block_sparse_moe.gate": {
254
+ "group_size": 64,
255
+ "bits": 8
256
+ },
257
+ "model.layers.40.block_sparse_moe.gate": {
258
+ "group_size": 64,
259
+ "bits": 8
260
+ },
261
+ "model.layers.41.block_sparse_moe.gate": {
262
+ "group_size": 64,
263
+ "bits": 8
264
+ },
265
+ "model.layers.42.block_sparse_moe.gate": {
266
+ "group_size": 64,
267
+ "bits": 8
268
+ },
269
+ "model.layers.43.block_sparse_moe.gate": {
270
+ "group_size": 64,
271
+ "bits": 8
272
+ },
273
+ "model.layers.44.block_sparse_moe.gate": {
274
+ "group_size": 64,
275
+ "bits": 8
276
+ },
277
+ "model.layers.45.block_sparse_moe.gate": {
278
+ "group_size": 64,
279
+ "bits": 8
280
+ },
281
+ "model.layers.46.block_sparse_moe.gate": {
282
+ "group_size": 64,
283
+ "bits": 8
284
+ },
285
+ "model.layers.47.block_sparse_moe.gate": {
286
+ "group_size": 64,
287
+ "bits": 8
288
+ },
289
+ "model.layers.48.block_sparse_moe.gate": {
290
+ "group_size": 64,
291
+ "bits": 8
292
+ },
293
+ "model.layers.49.block_sparse_moe.gate": {
294
+ "group_size": 64,
295
+ "bits": 8
296
+ },
297
+ "model.layers.50.block_sparse_moe.gate": {
298
+ "group_size": 64,
299
+ "bits": 8
300
+ },
301
+ "model.layers.51.block_sparse_moe.gate": {
302
+ "group_size": 64,
303
+ "bits": 8
304
+ },
305
+ "model.layers.52.block_sparse_moe.gate": {
306
+ "group_size": 64,
307
+ "bits": 8
308
+ },
309
+ "model.layers.53.block_sparse_moe.gate": {
310
+ "group_size": 64,
311
+ "bits": 8
312
+ },
313
+ "model.layers.54.block_sparse_moe.gate": {
314
+ "group_size": 64,
315
+ "bits": 8
316
+ },
317
+ "model.layers.55.block_sparse_moe.gate": {
318
+ "group_size": 64,
319
+ "bits": 8
320
+ },
321
+ "model.layers.56.block_sparse_moe.gate": {
322
+ "group_size": 64,
323
+ "bits": 8
324
+ },
325
+ "model.layers.57.block_sparse_moe.gate": {
326
+ "group_size": 64,
327
+ "bits": 8
328
+ },
329
+ "model.layers.58.block_sparse_moe.gate": {
330
+ "group_size": 64,
331
+ "bits": 8
332
+ },
333
+ "model.layers.59.block_sparse_moe.gate": {
334
+ "group_size": 64,
335
+ "bits": 8
336
+ },
337
+ "model.layers.60.block_sparse_moe.gate": {
338
+ "group_size": 64,
339
+ "bits": 8
340
+ },
341
+ "model.layers.61.block_sparse_moe.gate": {
342
+ "group_size": 64,
343
+ "bits": 8
344
+ }
345
+ },
346
+ "quantization_config": {
347
+ "group_size": 64,
348
+ "bits": 4,
349
+ "mode": "affine",
350
+ "model.layers.0.block_sparse_moe.gate": {
351
+ "group_size": 64,
352
+ "bits": 8
353
+ },
354
+ "model.layers.1.block_sparse_moe.gate": {
355
+ "group_size": 64,
356
+ "bits": 8
357
+ },
358
+ "model.layers.2.block_sparse_moe.gate": {
359
+ "group_size": 64,
360
+ "bits": 8
361
+ },
362
+ "model.layers.3.block_sparse_moe.gate": {
363
+ "group_size": 64,
364
+ "bits": 8
365
+ },
366
+ "model.layers.4.block_sparse_moe.gate": {
367
+ "group_size": 64,
368
+ "bits": 8
369
+ },
370
+ "model.layers.5.block_sparse_moe.gate": {
371
+ "group_size": 64,
372
+ "bits": 8
373
+ },
374
+ "model.layers.6.block_sparse_moe.gate": {
375
+ "group_size": 64,
376
+ "bits": 8
377
+ },
378
+ "model.layers.7.block_sparse_moe.gate": {
379
+ "group_size": 64,
380
+ "bits": 8
381
+ },
382
+ "model.layers.8.block_sparse_moe.gate": {
383
+ "group_size": 64,
384
+ "bits": 8
385
+ },
386
+ "model.layers.9.block_sparse_moe.gate": {
387
+ "group_size": 64,
388
+ "bits": 8
389
+ },
390
+ "model.layers.10.block_sparse_moe.gate": {
391
+ "group_size": 64,
392
+ "bits": 8
393
+ },
394
+ "model.layers.11.block_sparse_moe.gate": {
395
+ "group_size": 64,
396
+ "bits": 8
397
+ },
398
+ "model.layers.12.block_sparse_moe.gate": {
399
+ "group_size": 64,
400
+ "bits": 8
401
+ },
402
+ "model.layers.13.block_sparse_moe.gate": {
403
+ "group_size": 64,
404
+ "bits": 8
405
+ },
406
+ "model.layers.14.block_sparse_moe.gate": {
407
+ "group_size": 64,
408
+ "bits": 8
409
+ },
410
+ "model.layers.15.block_sparse_moe.gate": {
411
+ "group_size": 64,
412
+ "bits": 8
413
+ },
414
+ "model.layers.16.block_sparse_moe.gate": {
415
+ "group_size": 64,
416
+ "bits": 8
417
+ },
418
+ "model.layers.17.block_sparse_moe.gate": {
419
+ "group_size": 64,
420
+ "bits": 8
421
+ },
422
+ "model.layers.18.block_sparse_moe.gate": {
423
+ "group_size": 64,
424
+ "bits": 8
425
+ },
426
+ "model.layers.19.block_sparse_moe.gate": {
427
+ "group_size": 64,
428
+ "bits": 8
429
+ },
430
+ "model.layers.20.block_sparse_moe.gate": {
431
+ "group_size": 64,
432
+ "bits": 8
433
+ },
434
+ "model.layers.21.block_sparse_moe.gate": {
435
+ "group_size": 64,
436
+ "bits": 8
437
+ },
438
+ "model.layers.22.block_sparse_moe.gate": {
439
+ "group_size": 64,
440
+ "bits": 8
441
+ },
442
+ "model.layers.23.block_sparse_moe.gate": {
443
+ "group_size": 64,
444
+ "bits": 8
445
+ },
446
+ "model.layers.24.block_sparse_moe.gate": {
447
+ "group_size": 64,
448
+ "bits": 8
449
+ },
450
+ "model.layers.25.block_sparse_moe.gate": {
451
+ "group_size": 64,
452
+ "bits": 8
453
+ },
454
+ "model.layers.26.block_sparse_moe.gate": {
455
+ "group_size": 64,
456
+ "bits": 8
457
+ },
458
+ "model.layers.27.block_sparse_moe.gate": {
459
+ "group_size": 64,
460
+ "bits": 8
461
+ },
462
+ "model.layers.28.block_sparse_moe.gate": {
463
+ "group_size": 64,
464
+ "bits": 8
465
+ },
466
+ "model.layers.29.block_sparse_moe.gate": {
467
+ "group_size": 64,
468
+ "bits": 8
469
+ },
470
+ "model.layers.30.block_sparse_moe.gate": {
471
+ "group_size": 64,
472
+ "bits": 8
473
+ },
474
+ "model.layers.31.block_sparse_moe.gate": {
475
+ "group_size": 64,
476
+ "bits": 8
477
+ },
478
+ "model.layers.32.block_sparse_moe.gate": {
479
+ "group_size": 64,
480
+ "bits": 8
481
+ },
482
+ "model.layers.33.block_sparse_moe.gate": {
483
+ "group_size": 64,
484
+ "bits": 8
485
+ },
486
+ "model.layers.34.block_sparse_moe.gate": {
487
+ "group_size": 64,
488
+ "bits": 8
489
+ },
490
+ "model.layers.35.block_sparse_moe.gate": {
491
+ "group_size": 64,
492
+ "bits": 8
493
+ },
494
+ "model.layers.36.block_sparse_moe.gate": {
495
+ "group_size": 64,
496
+ "bits": 8
497
+ },
498
+ "model.layers.37.block_sparse_moe.gate": {
499
+ "group_size": 64,
500
+ "bits": 8
501
+ },
502
+ "model.layers.38.block_sparse_moe.gate": {
503
+ "group_size": 64,
504
+ "bits": 8
505
+ },
506
+ "model.layers.39.block_sparse_moe.gate": {
507
+ "group_size": 64,
508
+ "bits": 8
509
+ },
510
+ "model.layers.40.block_sparse_moe.gate": {
511
+ "group_size": 64,
512
+ "bits": 8
513
+ },
514
+ "model.layers.41.block_sparse_moe.gate": {
515
+ "group_size": 64,
516
+ "bits": 8
517
+ },
518
+ "model.layers.42.block_sparse_moe.gate": {
519
+ "group_size": 64,
520
+ "bits": 8
521
+ },
522
+ "model.layers.43.block_sparse_moe.gate": {
523
+ "group_size": 64,
524
+ "bits": 8
525
+ },
526
+ "model.layers.44.block_sparse_moe.gate": {
527
+ "group_size": 64,
528
+ "bits": 8
529
+ },
530
+ "model.layers.45.block_sparse_moe.gate": {
531
+ "group_size": 64,
532
+ "bits": 8
533
+ },
534
+ "model.layers.46.block_sparse_moe.gate": {
535
+ "group_size": 64,
536
+ "bits": 8
537
+ },
538
+ "model.layers.47.block_sparse_moe.gate": {
539
+ "group_size": 64,
540
+ "bits": 8
541
+ },
542
+ "model.layers.48.block_sparse_moe.gate": {
543
+ "group_size": 64,
544
+ "bits": 8
545
+ },
546
+ "model.layers.49.block_sparse_moe.gate": {
547
+ "group_size": 64,
548
+ "bits": 8
549
+ },
550
+ "model.layers.50.block_sparse_moe.gate": {
551
+ "group_size": 64,
552
+ "bits": 8
553
+ },
554
+ "model.layers.51.block_sparse_moe.gate": {
555
+ "group_size": 64,
556
+ "bits": 8
557
+ },
558
+ "model.layers.52.block_sparse_moe.gate": {
559
+ "group_size": 64,
560
+ "bits": 8
561
+ },
562
+ "model.layers.53.block_sparse_moe.gate": {
563
+ "group_size": 64,
564
+ "bits": 8
565
+ },
566
+ "model.layers.54.block_sparse_moe.gate": {
567
+ "group_size": 64,
568
+ "bits": 8
569
+ },
570
+ "model.layers.55.block_sparse_moe.gate": {
571
+ "group_size": 64,
572
+ "bits": 8
573
+ },
574
+ "model.layers.56.block_sparse_moe.gate": {
575
+ "group_size": 64,
576
+ "bits": 8
577
+ },
578
+ "model.layers.57.block_sparse_moe.gate": {
579
+ "group_size": 64,
580
+ "bits": 8
581
+ },
582
+ "model.layers.58.block_sparse_moe.gate": {
583
+ "group_size": 64,
584
+ "bits": 8
585
+ },
586
+ "model.layers.59.block_sparse_moe.gate": {
587
+ "group_size": 64,
588
+ "bits": 8
589
+ },
590
+ "model.layers.60.block_sparse_moe.gate": {
591
+ "group_size": 64,
592
+ "bits": 8
593
+ },
594
+ "model.layers.61.block_sparse_moe.gate": {
595
+ "group_size": 64,
596
+ "bits": 8
597
+ }
598
+ },
599
+ "rms_norm_eps": 1e-06,
600
+ "rope_theta": 5000000,
601
+ "rotary_dim": 64,
602
+ "router_aux_loss_coef": 0.001,
603
+ "router_jitter_noise": 0.0,
604
+ "scoring_func": "sigmoid",
605
+ "shared_intermediate_size": 0,
606
+ "sliding_window": null,
607
+ "tie_word_embeddings": false,
608
+ "torch_dtype": "bfloat16",
609
+ "transformers_version": "4.55.0",
610
+ "use_cache": false,
611
+ "use_mtp": true,
612
+ "use_qk_norm": true,
613
+ "use_routing_bias": true,
614
+ "vocab_size": 200064
615
+ }
configuration_minimax_m2.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from transformers.configuration_utils import PretrainedConfig
24
+
25
+
26
+ class MiniMaxM2Config(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`MiniMaxM2Model`]. It is used to instantiate an
29
+ MiniMaxM2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
30
+ with the defaults will yield a similar configuration to that of the MiniMaxM2-7B-v0.1 or MiniMaxM2-7B-Instruct-v0.1.
31
+
32
+ [minimax_m2ai/MiniMaxM2-8x7B](https://huggingface.co/minimax_m2ai/MiniMaxM2-8x7B)
33
+ [minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1](https://huggingface.co/minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1)
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 32000):
41
+ Vocabulary size of the MiniMaxM2 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`MiniMaxM2Model`]
43
+ hidden_size (`int`, *optional*, defaults to 4096):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 14336):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 8):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details, check out [this
57
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
58
+ head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
59
+ The attention head dimension.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
63
+ The maximum sequence length that this model might ever be used with. MiniMaxM2's sliding window attention
64
+ allows sequence of up to 4096*32 tokens.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ pad_token_id (`int`, *optional*):
73
+ The id of the padding token.
74
+ bos_token_id (`int`, *optional*, defaults to 1):
75
+ The id of the "beginning-of-sequence" token.
76
+ eos_token_id (`int`, *optional*, defaults to 2):
77
+ The id of the "end-of-sequence" token.
78
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
79
+ Whether the model's input and output word embeddings should be tied.
80
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
81
+ The base period of the RoPE embeddings.
82
+ sliding_window (`int`, *optional*):
83
+ Sliding window attention window size. If not specified, will default to `4096`.
84
+ attention_dropout (`float`, *optional*, defaults to 0.0):
85
+ The dropout ratio for the attention probabilities.
86
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
87
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
88
+ parameter
89
+ num_local_experts (`int`, *optional*, defaults to 8):
90
+ Number of experts per Sparse MLP layer.
91
+ output_router_logits (`bool`, *optional*, defaults to `False`):
92
+ Whether or not the router logits should be returned by the model. Enabling this will also
93
+ allow the model to output the auxiliary loss. See [here]() for more details
94
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
95
+ The aux loss factor for the total loss.
96
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
97
+ Amount of noise to add to the router.
98
+
99
+ ```python
100
+ >>> from transformers import MiniMaxM2Model, MiniMaxM2Config
101
+
102
+ >>> # Initializing a MiniMaxM2 7B style configuration
103
+ >>> configuration = MiniMaxM2Config()
104
+
105
+ >>> # Initializing a model from the MiniMaxM2 7B style configuration
106
+ >>> model = MiniMaxM2Model(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = "minimax_m2"
113
+ keys_to_ignore_at_inference = ["past_key_values"]
114
+ base_model_tp_plan = {
115
+ "layers.*.self_attn.q_proj": "colwise",
116
+ "layers.*.self_attn.k_proj": "colwise",
117
+ "layers.*.self_attn.v_proj": "colwise",
118
+ "layers.*.self_attn.o_proj": "rowwise",
119
+ "layers.*.block_sparse_moe.gate": "colwise_rep", # we need to replicate here to correctly route experts
120
+ "layers.*.block_sparse_moe.experts.*.w1": "colwise",
121
+ "layers.*.block_sparse_moe.experts.*.w2": "rowwise",
122
+ "layers.*.block_sparse_moe.experts.*.w3": "colwise",
123
+ }
124
+ base_model_pp_plan = {
125
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
126
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
127
+ "norm": (["hidden_states"], ["hidden_states"]),
128
+ }
129
+
130
+ def __init__(
131
+ self,
132
+ vocab_size=32000,
133
+ hidden_size=4096,
134
+ intermediate_size=14336,
135
+ num_hidden_layers=32,
136
+ num_attention_heads=32,
137
+ num_key_value_heads=8,
138
+ head_dim=None,
139
+ hidden_act="silu",
140
+ max_position_embeddings=4096 * 32,
141
+ initializer_range=0.02,
142
+ rms_norm_eps=1e-5,
143
+ use_cache=True,
144
+ pad_token_id=None,
145
+ bos_token_id=1,
146
+ eos_token_id=2,
147
+ tie_word_embeddings=False,
148
+ rope_theta=1e6,
149
+ sliding_window=None,
150
+ attention_dropout=0.0,
151
+ num_experts_per_tok=2,
152
+ num_local_experts=8,
153
+ output_router_logits=False,
154
+ router_aux_loss_coef=0.001,
155
+ router_jitter_noise=0.0,
156
+ **kwargs,
157
+ ):
158
+ self.vocab_size = vocab_size
159
+ self.max_position_embeddings = max_position_embeddings
160
+ self.hidden_size = hidden_size
161
+ self.intermediate_size = intermediate_size
162
+ self.num_hidden_layers = num_hidden_layers
163
+ self.num_attention_heads = num_attention_heads
164
+ self.sliding_window = sliding_window
165
+
166
+ # for backward compatibility
167
+ if num_key_value_heads is None:
168
+ num_key_value_heads = num_attention_heads
169
+
170
+ self.num_key_value_heads = num_key_value_heads
171
+ self.hidden_act = hidden_act
172
+ self.initializer_range = initializer_range
173
+ self.rms_norm_eps = rms_norm_eps
174
+ self.use_cache = use_cache
175
+ self.rope_theta = rope_theta
176
+ self.attention_dropout = attention_dropout
177
+ self.head_dim = head_dim
178
+
179
+ self.num_experts_per_tok = num_experts_per_tok
180
+ self.num_local_experts = num_local_experts
181
+ self.output_router_logits = output_router_logits
182
+ self.router_aux_loss_coef = router_aux_loss_coef
183
+ self.router_jitter_noise = router_jitter_noise
184
+
185
+ self.use_qk_norm = kwargs.pop("use_qk_norm", False)
186
+ self.rotary_dim = kwargs.pop("rotary_dim", self.head_dim)
187
+ self.partial_rotary_factor = kwargs.pop("partial_rotary_factor", 1)
188
+ if self.head_dim is not None:
189
+ self.partial_rotary_factor = self.rotary_dim / self.head_dim
190
+
191
+ super().__init__(
192
+ pad_token_id=pad_token_id,
193
+ bos_token_id=bos_token_id,
194
+ eos_token_id=eos_token_id,
195
+ tie_word_embeddings=tie_word_embeddings,
196
+ **kwargs,
197
+ )
198
+
199
+
200
+ __all__ = ["MiniMaxM2Config"]
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 200019,
3
+ "do_sample": true,
4
+ "eos_token_id": 200020,
5
+ "top_k": 40,
6
+ "top_p": 0.95,
7
+ "transformers_version": "4.55.0"
8
+ }
model-00001-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:459dd833457ea2d93a9c3601a942b98bb4e54e223e93d9d3928db9d841fc31d8
3
+ size 5224834183
model-00002-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d36fa0cf3bfa77d75f95a53e7777b439476813a96e55b5dffbe31d506212d10f
3
+ size 5356894070
model-00003-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7bd85e0f8965755e655c59bfd775410283c11fc904dab3c159f877d596bfa96
3
+ size 5331504755
model-00004-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c53659808eb86c9fb10ba32b0f3992d6a0f2c9b34117bdfb59101826e760a7b8
3
+ size 5356894213
model-00005-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a0cb489f97c5f53004d5d84e7860a7e3040998b820d12bdf96a7d9995acc365
3
+ size 5356894183
model-00006-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79b6e3b81a383b91d65120bcc311b6a5fa94c65aed17bc7bedc3929f6a4f59c7
3
+ size 5331504805
model-00007-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e6237d38289f17a9075b4ab7a19c825e6b96688d9e1ecf455dff7d5e1c4627b
3
+ size 5356894223
model-00008-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b61829253ea62a1469dabec151f614d861e6898533a788b559389cecd2a5ab9
3
+ size 5356894233
model-00009-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0866ef8b306a6bc95521429e96bca61f37b75c079da891175f21fd0ec632eb6a
3
+ size 5331504813
model-00010-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09633e8fe85ab38d8f6db11033341d83d43a523fc38e6960fd272ec60dd01c8b
3
+ size 5356894265
model-00011-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb1a8d4102424f1b9af0e5f96606d02c3e95126d0ce8c9aa1a6cd8a1ed579097
3
+ size 5356894209
model-00012-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c54041250b57e8c634fc0368292337e653082e2b297b4e8d91702b3af020c663
3
+ size 5331504823
model-00013-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:100674ac7e4ab11efb7472ebcc7516c01d4509f9ac0bf1634913778a564b7aca
3
+ size 5356894217
model-00014-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0915001c7853eaafd88d3b67144e22f815931e576fe2773ef47ac432efc34e53
3
+ size 5356894185
model-00015-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e604454ac835734d40bd1a9b199aab3bb1a209e4c17c30774ce984591a932d3
3
+ size 5331504825
model-00016-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ee7c661c2725546c7da299b107bec18f389fa59e5d87a203b54973f2dc85902
3
+ size 5356894143
model-00017-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2087d37e32e2f9d8d58d721226bf564b63bf42d9d70473573dcd9b397c62b924
3
+ size 5331524046
model-00018-of-00018.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2238829788caa4a52592e6051f437e886b0318785d1d7a7b2c3d6f838af547d
3
+ size 345710894
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_minimax_m2.py ADDED
@@ -0,0 +1,706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from collections.abc import Callable
24
+ from typing import Optional, Union, Unpack
25
+
26
+ import torch
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.generation import GenerationMixin
32
+ from transformers.integrations import use_kernel_forward_from_hub
33
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
34
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
35
+ from transformers.modeling_layers import (
36
+ GenericForQuestionAnswering,
37
+ GenericForSequenceClassification,
38
+ GenericForTokenClassification,
39
+ GradientCheckpointingLayer,
40
+ )
41
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
42
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
43
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
44
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
45
+ from transformers.utils.deprecation import deprecate_kwarg
46
+ from transformers.utils.generic import OutputRecorder, check_model_inputs
47
+ from .configuration_minimax_m2 import MiniMaxM2Config
48
+
49
+
50
+ class MiniMaxM2MLP(nn.Module):
51
+ def __init__(self, config: MiniMaxM2Config):
52
+ super().__init__()
53
+ self.ffn_dim = config.intermediate_size
54
+ self.hidden_dim = config.hidden_size
55
+
56
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
57
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
58
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
59
+
60
+ self.act_fn = ACT2FN[config.hidden_act]
61
+
62
+ def forward(self, hidden_states):
63
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
64
+ current_hidden_states = self.w2(current_hidden_states)
65
+ return current_hidden_states
66
+
67
+
68
+ class MiniMaxM2Experts(nn.ModuleList):
69
+ """
70
+ ModuleList of experts.
71
+ """
72
+
73
+ def __init__(self, config: MiniMaxM2Config):
74
+ super().__init__()
75
+ self.top_k = config.num_experts_per_tok
76
+ self.num_experts = config.num_local_experts
77
+ for _ in range(self.num_experts):
78
+ self.append(MiniMaxM2MLP(config))
79
+
80
+ def forward(
81
+ self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor
82
+ ) -> torch.Tensor:
83
+ """
84
+ Args:
85
+ hidden_states: (batch_size * sequence_length, hidden_dim)
86
+ selected_experts: (batch_size * sequence_length, top_k)
87
+ routing_weights: (batch_size * sequence_length, top_k)
88
+ Returns:
89
+ (batch_size * sequence_length, hidden_dim)
90
+ """
91
+ final_hidden_states = torch.zeros_like(hidden_states)
92
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0)
93
+
94
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
95
+ for expert_idx in expert_hit:
96
+ idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
97
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
98
+ current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None]
99
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
100
+ return final_hidden_states
101
+
102
+
103
+ class MiniMaxM2SparseMoeBlock(nn.Module):
104
+ def __init__(self, config):
105
+ super().__init__()
106
+ self.top_k = config.num_experts_per_tok
107
+ self.jitter_noise = config.router_jitter_noise
108
+ self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False)
109
+ self.experts = MiniMaxM2Experts(config)
110
+ self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
111
+
112
+ def route_tokens_to_experts(self, router_logits):
113
+ routing_weights = torch.nn.functional.sigmoid(router_logits.float())
114
+ scores_for_choice = routing_weights + self.e_score_correction_bias
115
+ _, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
116
+ top_k_weights = routing_weights.gather(1, top_k_index)
117
+ top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
118
+ return top_k_index, top_k_weights.to(router_logits.dtype)
119
+
120
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
121
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
122
+ if self.training and self.jitter_noise > 0:
123
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
124
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
125
+ router_logits = self.gate(hidden_states)
126
+ top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
127
+ hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype))
128
+ hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
129
+ return hidden_states, router_logits
130
+
131
+
132
+ @use_kernel_forward_from_hub("RMSNorm")
133
+ class MiniMaxM2RMSNorm(nn.Module):
134
+ def __init__(self, hidden_size, eps=1e-6):
135
+ """
136
+ MiniMaxM2RMSNorm is equivalent to T5LayerNorm
137
+ """
138
+ super().__init__()
139
+ self.weight = nn.Parameter(torch.ones(hidden_size))
140
+ self.variance_epsilon = eps
141
+
142
+ def forward(self, hidden_states):
143
+ input_dtype = hidden_states.dtype
144
+ hidden_states = hidden_states.to(torch.float32)
145
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
146
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
147
+ return self.weight * hidden_states.to(input_dtype)
148
+
149
+ def extra_repr(self):
150
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
151
+
152
+
153
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
154
+ """
155
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
156
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
157
+ """
158
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
159
+ if n_rep == 1:
160
+ return hidden_states
161
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
162
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
163
+
164
+
165
+ def eager_attention_forward(
166
+ module: nn.Module,
167
+ query: torch.Tensor,
168
+ key: torch.Tensor,
169
+ value: torch.Tensor,
170
+ attention_mask: Optional[torch.Tensor],
171
+ scaling: float,
172
+ dropout: float = 0.0,
173
+ **kwargs: Unpack[TransformersKwargs],
174
+ ):
175
+ key_states = repeat_kv(key, module.num_key_value_groups)
176
+ value_states = repeat_kv(value, module.num_key_value_groups)
177
+
178
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
179
+ if attention_mask is not None:
180
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
181
+ attn_weights = attn_weights + causal_mask
182
+
183
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
184
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
185
+ attn_output = torch.matmul(attn_weights, value_states)
186
+ attn_output = attn_output.transpose(1, 2).contiguous()
187
+
188
+ return attn_output, attn_weights
189
+
190
+
191
+ def rotate_half(x):
192
+ """Rotates half the hidden dims of the input."""
193
+ x1 = x[..., : x.shape[-1] // 2]
194
+ x2 = x[..., x.shape[-1] // 2 :]
195
+ return torch.cat((-x2, x1), dim=-1)
196
+
197
+
198
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
199
+ """Applies Rotary Position Embedding to the query and key tensors.
200
+
201
+ Args:
202
+ q (`torch.Tensor`): The query tensor.
203
+ k (`torch.Tensor`): The key tensor.
204
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
205
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
206
+ position_ids (`torch.Tensor`, *optional*):
207
+ Deprecated and unused.
208
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
209
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
210
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
211
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
212
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
213
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
214
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
215
+ Returns:
216
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
217
+ """
218
+ cos = cos.unsqueeze(unsqueeze_dim)
219
+ sin = sin.unsqueeze(unsqueeze_dim)
220
+
221
+ # Keep half or full tensor for later concatenation
222
+ rotary_dim = cos.shape[-1]
223
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
224
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
225
+
226
+ # Apply rotary embeddings on the first half or full tensor
227
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
228
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
229
+
230
+ # Concatenate back to full shape
231
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
232
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
233
+ return q_embed, k_embed
234
+
235
+
236
+ class MiniMaxM2Attention(nn.Module):
237
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
238
+
239
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
240
+ super().__init__()
241
+ self.config = config
242
+ self.layer_idx = layer_idx
243
+ self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
244
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
245
+ self.scaling = self.head_dim**-0.5
246
+ self.attention_dropout = config.attention_dropout
247
+ self.is_causal = True
248
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
249
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
250
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
251
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
252
+
253
+ self.use_qk_norm = config.use_qk_norm
254
+ if self.use_qk_norm:
255
+ self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps)
256
+ self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps)
257
+
258
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
259
+ def forward(
260
+ self,
261
+ hidden_states: torch.Tensor,
262
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
263
+ attention_mask: Optional[torch.Tensor],
264
+ past_key_values: Optional[Cache] = None,
265
+ cache_position: Optional[torch.LongTensor] = None,
266
+ **kwargs: Unpack[FlashAttentionKwargs],
267
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
268
+ input_shape = hidden_states.shape[:-1]
269
+ hidden_shape = (*input_shape, -1, self.head_dim)
270
+
271
+ query_states = self.q_proj(hidden_states)
272
+ key_states = self.k_proj(hidden_states)
273
+ value_states = self.v_proj(hidden_states)
274
+
275
+ if self.use_qk_norm: # main diff from Llama
276
+ query_states = self.q_norm(query_states)
277
+ key_states = self.k_norm(key_states)
278
+
279
+ key_states = key_states.view(hidden_shape)
280
+ query_states = query_states.view(hidden_shape)
281
+ value_states = value_states.view(hidden_shape)
282
+
283
+ query_states = query_states.transpose(1, 2)
284
+ key_states = key_states.transpose(1, 2)
285
+ value_states = value_states.transpose(1, 2)
286
+
287
+ cos, sin = position_embeddings
288
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
289
+
290
+ if past_key_values is not None:
291
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
292
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
293
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
294
+
295
+ attention_interface: Callable = eager_attention_forward
296
+ if self.config._attn_implementation != "eager":
297
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
298
+
299
+ attn_output, attn_weights = attention_interface(
300
+ self,
301
+ query_states,
302
+ key_states,
303
+ value_states,
304
+ attention_mask,
305
+ dropout=0.0 if not self.training else self.attention_dropout,
306
+ scaling=self.scaling,
307
+ **kwargs,
308
+ )
309
+
310
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
311
+ attn_output = self.o_proj(attn_output)
312
+ return attn_output, attn_weights
313
+
314
+
315
+ class MiniMaxM2DecoderLayer(GradientCheckpointingLayer):
316
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
317
+ super().__init__()
318
+ self.hidden_size = config.hidden_size
319
+
320
+ self.self_attn = MiniMaxM2Attention(config, layer_idx)
321
+
322
+ self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
323
+ self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
324
+ self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
325
+
326
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
327
+ def forward(
328
+ self,
329
+ hidden_states: torch.Tensor,
330
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
331
+ attention_mask: Optional[torch.Tensor] = None,
332
+ position_ids: Optional[torch.LongTensor] = None,
333
+ past_key_values: Optional[Cache] = None,
334
+ cache_position: Optional[torch.LongTensor] = None,
335
+ **kwargs: Unpack[TransformersKwargs],
336
+ ) -> torch.FloatTensor:
337
+ residual = hidden_states
338
+
339
+ hidden_states = self.input_layernorm(hidden_states)
340
+
341
+ # Self Attention
342
+ hidden_states, _ = self.self_attn(
343
+ hidden_states=hidden_states,
344
+ position_embeddings=position_embeddings,
345
+ attention_mask=attention_mask,
346
+ position_ids=position_ids,
347
+ past_key_values=past_key_values,
348
+ cache_position=cache_position,
349
+ **kwargs,
350
+ )
351
+ hidden_states = residual + hidden_states
352
+
353
+ # Fully Connected
354
+ residual = hidden_states
355
+ hidden_states = self.post_attention_layernorm(hidden_states)
356
+ hidden_states, _ = self.block_sparse_moe(hidden_states)
357
+ hidden_states = residual + hidden_states
358
+
359
+ return hidden_states
360
+
361
+
362
+ class MiniMaxM2RotaryEmbedding(nn.Module):
363
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
364
+
365
+ def __init__(self, config: MiniMaxM2Config, device=None):
366
+ super().__init__()
367
+ # BC: "rope_type" was originally "type"
368
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
369
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
370
+ else:
371
+ self.rope_type = "default"
372
+ self.max_seq_len_cached = config.max_position_embeddings
373
+ self.original_max_seq_len = config.max_position_embeddings
374
+
375
+ self.config = config
376
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
377
+
378
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
379
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
380
+ self.original_inv_freq = self.inv_freq
381
+
382
+ @torch.no_grad()
383
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
384
+ def forward(self, x, position_ids):
385
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
386
+ position_ids_expanded = position_ids[:, None, :].float()
387
+
388
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
389
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
390
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
391
+ emb = torch.cat((freqs, freqs), dim=-1)
392
+ cos = emb.cos() * self.attention_scaling
393
+ sin = emb.sin() * self.attention_scaling
394
+
395
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
396
+
397
+
398
+ @auto_docstring
399
+ class MiniMaxM2PreTrainedModel(PreTrainedModel):
400
+ config: MiniMaxM2Config
401
+ base_model_prefix = "model"
402
+ supports_gradient_checkpointing = True
403
+ _no_split_modules = ["MiniMaxM2DecoderLayer"]
404
+ _skip_keys_device_placement = ["past_key_values"]
405
+ _supports_flash_attn = True
406
+ _supports_sdpa = True
407
+ _supports_flex_attn = True
408
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
409
+ _supports_attention_backend = True
410
+ _can_record_outputs = {
411
+ "router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1),
412
+ "hidden_states": MiniMaxM2DecoderLayer,
413
+ "attentions": MiniMaxM2Attention,
414
+ }
415
+
416
+
417
+ @auto_docstring
418
+ class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
419
+ def __init__(self, config: MiniMaxM2Config):
420
+ super().__init__(config)
421
+ self.padding_idx = config.pad_token_id
422
+ self.vocab_size = config.vocab_size
423
+
424
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
425
+ self.layers = nn.ModuleList(
426
+ [MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
427
+ )
428
+ self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
429
+ self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config)
430
+ self.gradient_checkpointing = False
431
+
432
+ # Initialize weights and apply final processing
433
+ self.post_init()
434
+
435
+ @check_model_inputs
436
+ @auto_docstring
437
+ def forward(
438
+ self,
439
+ input_ids: Optional[torch.LongTensor] = None,
440
+ attention_mask: Optional[torch.Tensor] = None,
441
+ position_ids: Optional[torch.LongTensor] = None,
442
+ past_key_values: Optional[Cache] = None,
443
+ inputs_embeds: Optional[torch.FloatTensor] = None,
444
+ use_cache: Optional[bool] = None,
445
+ cache_position: Optional[torch.LongTensor] = None,
446
+ **kwargs: Unpack[TransformersKwargs],
447
+ ) -> MoeModelOutputWithPast:
448
+ if (input_ids is None) ^ (inputs_embeds is not None):
449
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
450
+
451
+ if use_cache and past_key_values is None:
452
+ past_key_values = DynamicCache(config=self.config)
453
+
454
+ if inputs_embeds is None:
455
+ inputs_embeds = self.embed_tokens(input_ids)
456
+
457
+ if cache_position is None:
458
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
459
+ cache_position = torch.arange(
460
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
461
+ )
462
+ if position_ids is None:
463
+ position_ids = cache_position.unsqueeze(0)
464
+
465
+ mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
466
+ causal_mask = mask_function(
467
+ config=self.config,
468
+ input_embeds=inputs_embeds,
469
+ attention_mask=attention_mask,
470
+ cache_position=cache_position,
471
+ past_key_values=past_key_values,
472
+ position_ids=position_ids,
473
+ )
474
+
475
+ hidden_states = inputs_embeds
476
+
477
+ # create position embeddings to be shared across the decoder layers
478
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
479
+
480
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
481
+ hidden_states = decoder_layer(
482
+ hidden_states,
483
+ position_embeddings=position_embeddings,
484
+ attention_mask=causal_mask,
485
+ position_ids=position_ids,
486
+ past_key_values=past_key_values,
487
+ use_cache=use_cache,
488
+ cache_position=cache_position,
489
+ **kwargs,
490
+ )
491
+
492
+ hidden_states = self.norm(hidden_states)
493
+
494
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
495
+ last_hidden_state=hidden_states,
496
+ past_key_values=past_key_values,
497
+ )
498
+
499
+
500
+ def load_balancing_loss_func(
501
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
502
+ num_experts: Optional[int] = None,
503
+ top_k=2,
504
+ attention_mask: Optional[torch.Tensor] = None,
505
+ ) -> Union[torch.Tensor, int]:
506
+ r"""
507
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
508
+
509
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
510
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
511
+ experts is too unbalanced.
512
+
513
+ Args:
514
+ gate_logits:
515
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
516
+ shape [batch_size X sequence_length, num_experts].
517
+ num_experts:
518
+ Number of experts
519
+ top_k:
520
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
521
+ parameter.
522
+ attention_mask (`torch.Tensor`, *optional*):
523
+ The attention_mask used in forward function
524
+ shape [batch_size X sequence_length] if not None.
525
+
526
+ Returns:
527
+ The auxiliary loss.
528
+ """
529
+ if gate_logits is None or not isinstance(gate_logits, tuple):
530
+ return 0
531
+
532
+ if isinstance(gate_logits, tuple):
533
+ compute_device = gate_logits[0].device
534
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
535
+
536
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
537
+
538
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
539
+
540
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
541
+
542
+ if attention_mask is None:
543
+ # Compute the percentage of tokens routed to each experts
544
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
545
+
546
+ # Compute the average probability of routing to these experts
547
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
548
+ else:
549
+ batch_size, sequence_length = attention_mask.shape
550
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
551
+
552
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
553
+ expert_attention_mask = (
554
+ attention_mask[None, :, :, None, None]
555
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
556
+ .reshape(-1, top_k, num_experts)
557
+ .to(compute_device)
558
+ )
559
+
560
+ # Compute the percentage of tokens routed to each experts
561
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
562
+ expert_attention_mask, dim=0
563
+ )
564
+
565
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
566
+ router_per_expert_attention_mask = (
567
+ attention_mask[None, :, :, None]
568
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
569
+ .reshape(-1, num_experts)
570
+ .to(compute_device)
571
+ )
572
+
573
+ # Compute the average probability of routing to these experts
574
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
575
+ router_per_expert_attention_mask, dim=0
576
+ )
577
+
578
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
579
+ return overall_loss * num_experts
580
+
581
+
582
+ @auto_docstring
583
+ class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
584
+ _tied_weights_keys = ["lm_head.weight"]
585
+ _tp_plan = {"lm_head": "colwise_rep"}
586
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
587
+
588
+ def __init__(self, config):
589
+ super().__init__(config)
590
+ self.model = MiniMaxM2Model(config)
591
+ self.vocab_size = config.vocab_size
592
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
593
+ self.router_aux_loss_coef = config.router_aux_loss_coef
594
+ self.num_experts = config.num_local_experts
595
+ self.num_experts_per_tok = config.num_experts_per_tok
596
+
597
+ # Initialize weights and apply final processing
598
+ self.post_init()
599
+
600
+ @can_return_tuple
601
+ @auto_docstring
602
+ def forward(
603
+ self,
604
+ input_ids: Optional[torch.LongTensor] = None,
605
+ attention_mask: Optional[torch.Tensor] = None,
606
+ position_ids: Optional[torch.LongTensor] = None,
607
+ past_key_values: Optional[Cache] = None,
608
+ inputs_embeds: Optional[torch.FloatTensor] = None,
609
+ labels: Optional[torch.LongTensor] = None,
610
+ use_cache: Optional[bool] = None,
611
+ output_router_logits: Optional[bool] = None,
612
+ cache_position: Optional[torch.LongTensor] = None,
613
+ logits_to_keep: Union[int, torch.Tensor] = 0,
614
+ **kwargs: Unpack[TransformersKwargs],
615
+ ) -> MoeCausalLMOutputWithPast:
616
+ r"""
617
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
618
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
619
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
620
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
621
+
622
+ Example:
623
+
624
+ ```python
625
+ >>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM
626
+
627
+ >>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
628
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
629
+
630
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
631
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
632
+
633
+ >>> # Generate
634
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
635
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
636
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
637
+ ```"""
638
+
639
+ output_router_logits = (
640
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
641
+ )
642
+
643
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
644
+ outputs: MoeModelOutputWithPast = self.model(
645
+ input_ids=input_ids,
646
+ attention_mask=attention_mask,
647
+ position_ids=position_ids,
648
+ past_key_values=past_key_values,
649
+ inputs_embeds=inputs_embeds,
650
+ use_cache=use_cache,
651
+ output_router_logits=output_router_logits,
652
+ cache_position=cache_position,
653
+ **kwargs,
654
+ )
655
+
656
+ hidden_states = outputs.last_hidden_state
657
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
658
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
659
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
660
+
661
+ loss = None
662
+ if labels is not None:
663
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
664
+
665
+ aux_loss = None
666
+ if output_router_logits:
667
+ aux_loss = load_balancing_loss_func(
668
+ outputs.router_logits,
669
+ self.num_experts,
670
+ self.num_experts_per_tok,
671
+ attention_mask,
672
+ )
673
+ if labels is not None:
674
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
675
+
676
+ return MoeCausalLMOutputWithPast(
677
+ loss=loss,
678
+ aux_loss=aux_loss,
679
+ logits=logits,
680
+ past_key_values=outputs.past_key_values,
681
+ hidden_states=outputs.hidden_states,
682
+ attentions=outputs.attentions,
683
+ router_logits=outputs.router_logits,
684
+ )
685
+
686
+
687
+ class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel):
688
+ pass
689
+
690
+
691
+ class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel):
692
+ pass
693
+
694
+
695
+ class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel):
696
+ pass
697
+
698
+
699
+ __all__ = [
700
+ "MiniMaxM2ForCausalLM",
701
+ "MiniMaxM2ForQuestionAnswering",
702
+ "MiniMaxM2Model",
703
+ "MiniMaxM2PreTrainedModel",
704
+ "MiniMaxM2ForSequenceClassification",
705
+ "MiniMaxM2ForTokenClassification",
706
+ ]
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b81e5e5cba2b169e86a0771825a927e9d41b4c4484ded4a286410f41f702f17
3
+ size 15523144
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "]~!b[",
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "[e~[",
7
+ "is_local": true,
8
+ "model_max_length": 40960000,
9
+ "tokenizer_class": "TokenizersBackend",
10
+ "tool_parser_type": "minimax_m2",
11
+ "unk_token": "]!d~["
12
+ }