xinhe commited on
Commit
e1dec48
·
verified ·
1 Parent(s): 6b79418

Upload folder using huggingface_hub

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% macro render_content(content) %}{% if content is none %}{{- '' }}{% elif content is string %}{{- content }}{% elif content is mapping %}{{- content['value'] if 'value' in content else content['text'] }}{% elif content is iterable %}{% for item in content %}{% if item.type == 'text' %}{{- item['value'] if 'value' in item else item['text'] }}{% elif item.type == 'image' %}<im_patch>{% endif %}{% endfor %}{% endif %}{% endmacro %}
2
+ {{bos_token}}{%- if tools %}
3
+ {{- '<|im_start|>system\n' }}
4
+ {%- if messages[0].role == 'system' %}
5
+ {{- render_content(messages[0].content) + '\n\n' }}
6
+ {%- endif %}
7
+ {{- "# Tools\n\nYou have access to the following functions in JSONSchema format:\n\n<tools>" }}
8
+ {%- for tool in tools %}
9
+ {{- "\n" }}
10
+ {{- tool | tojson(ensure_ascii=False) }}
11
+ {%- endfor %}
12
+ {{- "\n</tools>\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n- Function calls MUST follow the specified format: an inner <function=...>\n...\n</function> block must be nested within <tool_call>\n...\n</tool_call> XML tags\n- Required parameters MUST be specified\n</IMPORTANT><|im_end|>\n" }}
13
+ {%- else %}
14
+ {%- if messages[0].role == 'system' %}
15
+ {{- '<|im_start|>system\n' + render_content(messages[0].content) + '<|im_end|>\n' }}
16
+ {%- endif %}
17
+ {%- endif %}
18
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
19
+ {%- for message in messages[::-1] %}
20
+ {%- set index = (messages|length - 1) - loop.index0 %}
21
+ {%- if ns.multi_step_tool and message.role == "user" and render_content(message.content) is string and not(render_content(message.content).startswith('<tool_response>') and render_content(message.content).endswith('</tool_response>')) %}
22
+ {%- set ns.multi_step_tool = false %}
23
+ {%- set ns.last_query_index = index %}
24
+ {%- endif %}
25
+ {%- endfor %}
26
+ {%- for message in messages %}
27
+ {%- set content = render_content(message.content) %}
28
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
29
+ {%- set role_name = 'observation' if (message.role == "system" and not loop.first and message.name == 'observation') else message.role %}
30
+ {{- '<|im_start|>' + role_name + '\n' + content + '<|im_end|>' + '\n' }}
31
+ {%- elif message.role == "assistant" %}
32
+ {%- if message.reasoning_content is string %}
33
+ {%- set reasoning_content = render_content(message.reasoning_content) %}
34
+ {%- else %}
35
+ {%- if '</think>' in content %}
36
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
37
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
38
+ {%- else %}
39
+ {%- set reasoning_content = '' %}
40
+ {%- endif %}
41
+ {%- endif %}
42
+ {%- if loop.index0 > ns.last_query_index %}
43
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content + '\n</think>\n' + content }}
44
+ {%- else %}
45
+ {{- '<|im_start|>' + message.role + '\n' + content }}
46
+ {%- endif %}
47
+ {%- if message.tool_calls %}
48
+ {%- for tool_call in message.tool_calls %}
49
+ {%- if tool_call.function is defined %}
50
+ {%- set tool_call = tool_call.function %}
51
+ {%- endif %}
52
+ {{- '<tool_call>\n<function=' + tool_call.name + '>\n' }}
53
+ {%- if tool_call.arguments is defined %}
54
+ {%- set arguments = tool_call.arguments %}
55
+ {%- for args_name, args_value in arguments|items %}
56
+ {{- '<parameter=' + args_name + '>\n' }}
57
+ {%- set args_value = args_value | tojson(ensure_ascii=False) | safe if args_value is mapping or (args_value is sequence and args_value is not string) else args_value | string %}
58
+ {{- args_value }}
59
+ {{- '\n</parameter>\n' }}
60
+ {%- endfor %}
61
+ {%- endif %}
62
+ {{- '</function>\n</tool_call>' }}
63
+ {%- endfor %}
64
+ {%- endif %}
65
+ {{- '<|im_end|>\n' }}
66
+ {%- elif message.role == "tool" %}
67
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
68
+ {{- '<|im_start|>tool_response\n' }}
69
+ {%- endif %}
70
+ {{- '<tool_response>' }}
71
+ {{- content }}
72
+ {{- '</tool_response>' }}
73
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
74
+ {{- '<|im_end|>\n' }}
75
+ {%- endif %}
76
+ {%- endif %}
77
+ {%- endfor %}
78
+ {%- if add_generation_prompt %}
79
+ {{- '<|im_start|>assistant\n<think>\n' }}
80
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,1335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Step3p5ForCausalLM"
4
+ ],
5
+ "att_impl_type": "GQA",
6
+ "attention_other_setting": {
7
+ "attention_type": "sliding_attention",
8
+ "head_dim": 128,
9
+ "num_attention_groups": 8,
10
+ "num_attention_heads": 96,
11
+ "true_head_dim": 128
12
+ },
13
+ "auto_map": {
14
+ "AutoConfig": "configuration_step3p5.Step3p5Config",
15
+ "AutoModelForCausalLM": "modeling_step3p5.Step3p5ForCausalLM"
16
+ },
17
+ "bos_token_id": 0,
18
+ "dtype": "bfloat16",
19
+ "eos_token_id": [
20
+ 1,
21
+ 2,
22
+ 128007
23
+ ],
24
+ "head_dim": 128,
25
+ "hidden_size": 4096,
26
+ "intermediate_size": 11264,
27
+ "layer_types": [
28
+ "full_attention",
29
+ "sliding_attention",
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "full_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "sliding_attention",
36
+ "full_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "full_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "full_attention",
45
+ "sliding_attention",
46
+ "sliding_attention",
47
+ "sliding_attention",
48
+ "full_attention",
49
+ "sliding_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "full_attention",
53
+ "sliding_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "full_attention",
57
+ "sliding_attention",
58
+ "sliding_attention",
59
+ "sliding_attention",
60
+ "full_attention",
61
+ "sliding_attention",
62
+ "sliding_attention",
63
+ "sliding_attention",
64
+ "full_attention",
65
+ "sliding_attention",
66
+ "sliding_attention",
67
+ "sliding_attention",
68
+ "full_attention",
69
+ "sliding_attention",
70
+ "sliding_attention",
71
+ "sliding_attention",
72
+ "full_attention",
73
+ "sliding_attention",
74
+ "sliding_attention",
75
+ "sliding_attention"
76
+ ],
77
+ "max_position_embeddings": 262144,
78
+ "max_seq_len": 262144,
79
+ "model_type": "step3p5",
80
+ "moe_every_n_layer": 1,
81
+ "moe_intermediate_size": 1280,
82
+ "moe_layer_offset": 0,
83
+ "moe_layers_enum": "3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44",
84
+ "moe_num_experts": 288,
85
+ "moe_router_activation": "sigmoid",
86
+ "moe_router_scaling_factor": 3.0,
87
+ "moe_top_k": 8,
88
+ "need_fp32_gate": true,
89
+ "norm_expert_weight": true,
90
+ "num_attention_groups": 8,
91
+ "num_attention_heads": 64,
92
+ "num_hidden_layers": 45,
93
+ "num_nextn_predict_layers": 3,
94
+ "partial_rotary_factor": 0.5,
95
+ "partial_rotary_factors": [
96
+ 0.5,
97
+ 1.0,
98
+ 1.0,
99
+ 1.0,
100
+ 0.5,
101
+ 1.0,
102
+ 1.0,
103
+ 1.0,
104
+ 0.5,
105
+ 1.0,
106
+ 1.0,
107
+ 1.0,
108
+ 0.5,
109
+ 1.0,
110
+ 1.0,
111
+ 1.0,
112
+ 0.5,
113
+ 1.0,
114
+ 1.0,
115
+ 1.0,
116
+ 0.5,
117
+ 1.0,
118
+ 1.0,
119
+ 1.0,
120
+ 0.5,
121
+ 1.0,
122
+ 1.0,
123
+ 1.0,
124
+ 0.5,
125
+ 1.0,
126
+ 1.0,
127
+ 1.0,
128
+ 0.5,
129
+ 1.0,
130
+ 1.0,
131
+ 1.0,
132
+ 0.5,
133
+ 1.0,
134
+ 1.0,
135
+ 1.0,
136
+ 0.5,
137
+ 1.0,
138
+ 1.0,
139
+ 1.0,
140
+ 0.5,
141
+ 1.0,
142
+ 1.0,
143
+ 1.0
144
+ ],
145
+ "quantization_config": {
146
+ "autoround_version": "0.12.0",
147
+ "bits": 4,
148
+ "data_type": "int",
149
+ "extra_config": {
150
+ ".*eh_proj.*": {
151
+ "bits": 16,
152
+ "data_type": "float"
153
+ },
154
+ ".*g_proj.*": {
155
+ "bits": 16,
156
+ "data_type": "float"
157
+ },
158
+ ".*layers\\.45.*": {
159
+ "bits": 16,
160
+ "data_type": "float"
161
+ },
162
+ ".*layers\\.46.*": {
163
+ "bits": 8
164
+ },
165
+ ".*layers\\.47.*": {
166
+ "bits": 8
167
+ },
168
+ ".*mlp.*": {
169
+ "bits": 8
170
+ },
171
+ ".*moe\\.gate.*": {
172
+ "bits": 16,
173
+ "data_type": "float"
174
+ },
175
+ ".*self_attn.*": {
176
+ "bits": 8
177
+ },
178
+ ".*shared_head.*": {
179
+ "bits": 16,
180
+ "data_type": "float"
181
+ },
182
+ "model.layers.0.mlp.down_proj": {
183
+ "bits": 8
184
+ },
185
+ "model.layers.0.mlp.gate_proj": {
186
+ "bits": 8
187
+ },
188
+ "model.layers.0.mlp.up_proj": {
189
+ "bits": 8
190
+ },
191
+ "model.layers.0.self_attn.g_proj": {
192
+ "bits": 16,
193
+ "data_type": "float"
194
+ },
195
+ "model.layers.0.self_attn.k_proj": {
196
+ "bits": 8
197
+ },
198
+ "model.layers.0.self_attn.o_proj": {
199
+ "bits": 8
200
+ },
201
+ "model.layers.0.self_attn.q_proj": {
202
+ "bits": 8
203
+ },
204
+ "model.layers.0.self_attn.v_proj": {
205
+ "bits": 8
206
+ },
207
+ "model.layers.1.mlp.down_proj": {
208
+ "bits": 8
209
+ },
210
+ "model.layers.1.mlp.gate_proj": {
211
+ "bits": 8
212
+ },
213
+ "model.layers.1.mlp.up_proj": {
214
+ "bits": 8
215
+ },
216
+ "model.layers.1.self_attn.g_proj": {
217
+ "bits": 16,
218
+ "data_type": "float"
219
+ },
220
+ "model.layers.1.self_attn.k_proj": {
221
+ "bits": 8
222
+ },
223
+ "model.layers.1.self_attn.o_proj": {
224
+ "bits": 8
225
+ },
226
+ "model.layers.1.self_attn.q_proj": {
227
+ "bits": 8
228
+ },
229
+ "model.layers.1.self_attn.v_proj": {
230
+ "bits": 8
231
+ },
232
+ "model.layers.10.moe.gate": {
233
+ "bits": 16,
234
+ "data_type": "float"
235
+ },
236
+ "model.layers.10.self_attn.g_proj": {
237
+ "bits": 16,
238
+ "data_type": "float"
239
+ },
240
+ "model.layers.10.self_attn.k_proj": {
241
+ "bits": 8
242
+ },
243
+ "model.layers.10.self_attn.o_proj": {
244
+ "bits": 8
245
+ },
246
+ "model.layers.10.self_attn.q_proj": {
247
+ "bits": 8
248
+ },
249
+ "model.layers.10.self_attn.v_proj": {
250
+ "bits": 8
251
+ },
252
+ "model.layers.11.moe.gate": {
253
+ "bits": 16,
254
+ "data_type": "float"
255
+ },
256
+ "model.layers.11.self_attn.g_proj": {
257
+ "bits": 16,
258
+ "data_type": "float"
259
+ },
260
+ "model.layers.11.self_attn.k_proj": {
261
+ "bits": 8
262
+ },
263
+ "model.layers.11.self_attn.o_proj": {
264
+ "bits": 8
265
+ },
266
+ "model.layers.11.self_attn.q_proj": {
267
+ "bits": 8
268
+ },
269
+ "model.layers.11.self_attn.v_proj": {
270
+ "bits": 8
271
+ },
272
+ "model.layers.12.moe.gate": {
273
+ "bits": 16,
274
+ "data_type": "float"
275
+ },
276
+ "model.layers.12.self_attn.g_proj": {
277
+ "bits": 16,
278
+ "data_type": "float"
279
+ },
280
+ "model.layers.12.self_attn.k_proj": {
281
+ "bits": 8
282
+ },
283
+ "model.layers.12.self_attn.o_proj": {
284
+ "bits": 8
285
+ },
286
+ "model.layers.12.self_attn.q_proj": {
287
+ "bits": 8
288
+ },
289
+ "model.layers.12.self_attn.v_proj": {
290
+ "bits": 8
291
+ },
292
+ "model.layers.13.moe.gate": {
293
+ "bits": 16,
294
+ "data_type": "float"
295
+ },
296
+ "model.layers.13.self_attn.g_proj": {
297
+ "bits": 16,
298
+ "data_type": "float"
299
+ },
300
+ "model.layers.13.self_attn.k_proj": {
301
+ "bits": 8
302
+ },
303
+ "model.layers.13.self_attn.o_proj": {
304
+ "bits": 8
305
+ },
306
+ "model.layers.13.self_attn.q_proj": {
307
+ "bits": 8
308
+ },
309
+ "model.layers.13.self_attn.v_proj": {
310
+ "bits": 8
311
+ },
312
+ "model.layers.14.moe.gate": {
313
+ "bits": 16,
314
+ "data_type": "float"
315
+ },
316
+ "model.layers.14.self_attn.g_proj": {
317
+ "bits": 16,
318
+ "data_type": "float"
319
+ },
320
+ "model.layers.14.self_attn.k_proj": {
321
+ "bits": 8
322
+ },
323
+ "model.layers.14.self_attn.o_proj": {
324
+ "bits": 8
325
+ },
326
+ "model.layers.14.self_attn.q_proj": {
327
+ "bits": 8
328
+ },
329
+ "model.layers.14.self_attn.v_proj": {
330
+ "bits": 8
331
+ },
332
+ "model.layers.15.moe.gate": {
333
+ "bits": 16,
334
+ "data_type": "float"
335
+ },
336
+ "model.layers.15.self_attn.g_proj": {
337
+ "bits": 16,
338
+ "data_type": "float"
339
+ },
340
+ "model.layers.15.self_attn.k_proj": {
341
+ "bits": 8
342
+ },
343
+ "model.layers.15.self_attn.o_proj": {
344
+ "bits": 8
345
+ },
346
+ "model.layers.15.self_attn.q_proj": {
347
+ "bits": 8
348
+ },
349
+ "model.layers.15.self_attn.v_proj": {
350
+ "bits": 8
351
+ },
352
+ "model.layers.16.moe.gate": {
353
+ "bits": 16,
354
+ "data_type": "float"
355
+ },
356
+ "model.layers.16.self_attn.g_proj": {
357
+ "bits": 16,
358
+ "data_type": "float"
359
+ },
360
+ "model.layers.16.self_attn.k_proj": {
361
+ "bits": 8
362
+ },
363
+ "model.layers.16.self_attn.o_proj": {
364
+ "bits": 8
365
+ },
366
+ "model.layers.16.self_attn.q_proj": {
367
+ "bits": 8
368
+ },
369
+ "model.layers.16.self_attn.v_proj": {
370
+ "bits": 8
371
+ },
372
+ "model.layers.17.moe.gate": {
373
+ "bits": 16,
374
+ "data_type": "float"
375
+ },
376
+ "model.layers.17.self_attn.g_proj": {
377
+ "bits": 16,
378
+ "data_type": "float"
379
+ },
380
+ "model.layers.17.self_attn.k_proj": {
381
+ "bits": 8
382
+ },
383
+ "model.layers.17.self_attn.o_proj": {
384
+ "bits": 8
385
+ },
386
+ "model.layers.17.self_attn.q_proj": {
387
+ "bits": 8
388
+ },
389
+ "model.layers.17.self_attn.v_proj": {
390
+ "bits": 8
391
+ },
392
+ "model.layers.18.moe.gate": {
393
+ "bits": 16,
394
+ "data_type": "float"
395
+ },
396
+ "model.layers.18.self_attn.g_proj": {
397
+ "bits": 16,
398
+ "data_type": "float"
399
+ },
400
+ "model.layers.18.self_attn.k_proj": {
401
+ "bits": 8
402
+ },
403
+ "model.layers.18.self_attn.o_proj": {
404
+ "bits": 8
405
+ },
406
+ "model.layers.18.self_attn.q_proj": {
407
+ "bits": 8
408
+ },
409
+ "model.layers.18.self_attn.v_proj": {
410
+ "bits": 8
411
+ },
412
+ "model.layers.19.moe.gate": {
413
+ "bits": 16,
414
+ "data_type": "float"
415
+ },
416
+ "model.layers.19.self_attn.g_proj": {
417
+ "bits": 16,
418
+ "data_type": "float"
419
+ },
420
+ "model.layers.19.self_attn.k_proj": {
421
+ "bits": 8
422
+ },
423
+ "model.layers.19.self_attn.o_proj": {
424
+ "bits": 8
425
+ },
426
+ "model.layers.19.self_attn.q_proj": {
427
+ "bits": 8
428
+ },
429
+ "model.layers.19.self_attn.v_proj": {
430
+ "bits": 8
431
+ },
432
+ "model.layers.2.mlp.down_proj": {
433
+ "bits": 8
434
+ },
435
+ "model.layers.2.mlp.gate_proj": {
436
+ "bits": 8
437
+ },
438
+ "model.layers.2.mlp.up_proj": {
439
+ "bits": 8
440
+ },
441
+ "model.layers.2.self_attn.g_proj": {
442
+ "bits": 16,
443
+ "data_type": "float"
444
+ },
445
+ "model.layers.2.self_attn.k_proj": {
446
+ "bits": 8
447
+ },
448
+ "model.layers.2.self_attn.o_proj": {
449
+ "bits": 8
450
+ },
451
+ "model.layers.2.self_attn.q_proj": {
452
+ "bits": 8
453
+ },
454
+ "model.layers.2.self_attn.v_proj": {
455
+ "bits": 8
456
+ },
457
+ "model.layers.20.moe.gate": {
458
+ "bits": 16,
459
+ "data_type": "float"
460
+ },
461
+ "model.layers.20.self_attn.g_proj": {
462
+ "bits": 16,
463
+ "data_type": "float"
464
+ },
465
+ "model.layers.20.self_attn.k_proj": {
466
+ "bits": 8
467
+ },
468
+ "model.layers.20.self_attn.o_proj": {
469
+ "bits": 8
470
+ },
471
+ "model.layers.20.self_attn.q_proj": {
472
+ "bits": 8
473
+ },
474
+ "model.layers.20.self_attn.v_proj": {
475
+ "bits": 8
476
+ },
477
+ "model.layers.21.moe.gate": {
478
+ "bits": 16,
479
+ "data_type": "float"
480
+ },
481
+ "model.layers.21.self_attn.g_proj": {
482
+ "bits": 16,
483
+ "data_type": "float"
484
+ },
485
+ "model.layers.21.self_attn.k_proj": {
486
+ "bits": 8
487
+ },
488
+ "model.layers.21.self_attn.o_proj": {
489
+ "bits": 8
490
+ },
491
+ "model.layers.21.self_attn.q_proj": {
492
+ "bits": 8
493
+ },
494
+ "model.layers.21.self_attn.v_proj": {
495
+ "bits": 8
496
+ },
497
+ "model.layers.22.moe.gate": {
498
+ "bits": 16,
499
+ "data_type": "float"
500
+ },
501
+ "model.layers.22.self_attn.g_proj": {
502
+ "bits": 16,
503
+ "data_type": "float"
504
+ },
505
+ "model.layers.22.self_attn.k_proj": {
506
+ "bits": 8
507
+ },
508
+ "model.layers.22.self_attn.o_proj": {
509
+ "bits": 8
510
+ },
511
+ "model.layers.22.self_attn.q_proj": {
512
+ "bits": 8
513
+ },
514
+ "model.layers.22.self_attn.v_proj": {
515
+ "bits": 8
516
+ },
517
+ "model.layers.23.moe.gate": {
518
+ "bits": 16,
519
+ "data_type": "float"
520
+ },
521
+ "model.layers.23.self_attn.g_proj": {
522
+ "bits": 16,
523
+ "data_type": "float"
524
+ },
525
+ "model.layers.23.self_attn.k_proj": {
526
+ "bits": 8
527
+ },
528
+ "model.layers.23.self_attn.o_proj": {
529
+ "bits": 8
530
+ },
531
+ "model.layers.23.self_attn.q_proj": {
532
+ "bits": 8
533
+ },
534
+ "model.layers.23.self_attn.v_proj": {
535
+ "bits": 8
536
+ },
537
+ "model.layers.24.moe.gate": {
538
+ "bits": 16,
539
+ "data_type": "float"
540
+ },
541
+ "model.layers.24.self_attn.g_proj": {
542
+ "bits": 16,
543
+ "data_type": "float"
544
+ },
545
+ "model.layers.24.self_attn.k_proj": {
546
+ "bits": 8
547
+ },
548
+ "model.layers.24.self_attn.o_proj": {
549
+ "bits": 8
550
+ },
551
+ "model.layers.24.self_attn.q_proj": {
552
+ "bits": 8
553
+ },
554
+ "model.layers.24.self_attn.v_proj": {
555
+ "bits": 8
556
+ },
557
+ "model.layers.25.moe.gate": {
558
+ "bits": 16,
559
+ "data_type": "float"
560
+ },
561
+ "model.layers.25.self_attn.g_proj": {
562
+ "bits": 16,
563
+ "data_type": "float"
564
+ },
565
+ "model.layers.25.self_attn.k_proj": {
566
+ "bits": 8
567
+ },
568
+ "model.layers.25.self_attn.o_proj": {
569
+ "bits": 8
570
+ },
571
+ "model.layers.25.self_attn.q_proj": {
572
+ "bits": 8
573
+ },
574
+ "model.layers.25.self_attn.v_proj": {
575
+ "bits": 8
576
+ },
577
+ "model.layers.26.moe.gate": {
578
+ "bits": 16,
579
+ "data_type": "float"
580
+ },
581
+ "model.layers.26.self_attn.g_proj": {
582
+ "bits": 16,
583
+ "data_type": "float"
584
+ },
585
+ "model.layers.26.self_attn.k_proj": {
586
+ "bits": 8
587
+ },
588
+ "model.layers.26.self_attn.o_proj": {
589
+ "bits": 8
590
+ },
591
+ "model.layers.26.self_attn.q_proj": {
592
+ "bits": 8
593
+ },
594
+ "model.layers.26.self_attn.v_proj": {
595
+ "bits": 8
596
+ },
597
+ "model.layers.27.moe.gate": {
598
+ "bits": 16,
599
+ "data_type": "float"
600
+ },
601
+ "model.layers.27.self_attn.g_proj": {
602
+ "bits": 16,
603
+ "data_type": "float"
604
+ },
605
+ "model.layers.27.self_attn.k_proj": {
606
+ "bits": 8
607
+ },
608
+ "model.layers.27.self_attn.o_proj": {
609
+ "bits": 8
610
+ },
611
+ "model.layers.27.self_attn.q_proj": {
612
+ "bits": 8
613
+ },
614
+ "model.layers.27.self_attn.v_proj": {
615
+ "bits": 8
616
+ },
617
+ "model.layers.28.moe.gate": {
618
+ "bits": 16,
619
+ "data_type": "float"
620
+ },
621
+ "model.layers.28.self_attn.g_proj": {
622
+ "bits": 16,
623
+ "data_type": "float"
624
+ },
625
+ "model.layers.28.self_attn.k_proj": {
626
+ "bits": 8
627
+ },
628
+ "model.layers.28.self_attn.o_proj": {
629
+ "bits": 8
630
+ },
631
+ "model.layers.28.self_attn.q_proj": {
632
+ "bits": 8
633
+ },
634
+ "model.layers.28.self_attn.v_proj": {
635
+ "bits": 8
636
+ },
637
+ "model.layers.29.moe.gate": {
638
+ "bits": 16,
639
+ "data_type": "float"
640
+ },
641
+ "model.layers.29.self_attn.g_proj": {
642
+ "bits": 16,
643
+ "data_type": "float"
644
+ },
645
+ "model.layers.29.self_attn.k_proj": {
646
+ "bits": 8
647
+ },
648
+ "model.layers.29.self_attn.o_proj": {
649
+ "bits": 8
650
+ },
651
+ "model.layers.29.self_attn.q_proj": {
652
+ "bits": 8
653
+ },
654
+ "model.layers.29.self_attn.v_proj": {
655
+ "bits": 8
656
+ },
657
+ "model.layers.3.moe.gate": {
658
+ "bits": 16,
659
+ "data_type": "float"
660
+ },
661
+ "model.layers.3.self_attn.g_proj": {
662
+ "bits": 16,
663
+ "data_type": "float"
664
+ },
665
+ "model.layers.3.self_attn.k_proj": {
666
+ "bits": 8
667
+ },
668
+ "model.layers.3.self_attn.o_proj": {
669
+ "bits": 8
670
+ },
671
+ "model.layers.3.self_attn.q_proj": {
672
+ "bits": 8
673
+ },
674
+ "model.layers.3.self_attn.v_proj": {
675
+ "bits": 8
676
+ },
677
+ "model.layers.30.moe.gate": {
678
+ "bits": 16,
679
+ "data_type": "float"
680
+ },
681
+ "model.layers.30.self_attn.g_proj": {
682
+ "bits": 16,
683
+ "data_type": "float"
684
+ },
685
+ "model.layers.30.self_attn.k_proj": {
686
+ "bits": 8
687
+ },
688
+ "model.layers.30.self_attn.o_proj": {
689
+ "bits": 8
690
+ },
691
+ "model.layers.30.self_attn.q_proj": {
692
+ "bits": 8
693
+ },
694
+ "model.layers.30.self_attn.v_proj": {
695
+ "bits": 8
696
+ },
697
+ "model.layers.31.moe.gate": {
698
+ "bits": 16,
699
+ "data_type": "float"
700
+ },
701
+ "model.layers.31.self_attn.g_proj": {
702
+ "bits": 16,
703
+ "data_type": "float"
704
+ },
705
+ "model.layers.31.self_attn.k_proj": {
706
+ "bits": 8
707
+ },
708
+ "model.layers.31.self_attn.o_proj": {
709
+ "bits": 8
710
+ },
711
+ "model.layers.31.self_attn.q_proj": {
712
+ "bits": 8
713
+ },
714
+ "model.layers.31.self_attn.v_proj": {
715
+ "bits": 8
716
+ },
717
+ "model.layers.32.moe.gate": {
718
+ "bits": 16,
719
+ "data_type": "float"
720
+ },
721
+ "model.layers.32.self_attn.g_proj": {
722
+ "bits": 16,
723
+ "data_type": "float"
724
+ },
725
+ "model.layers.32.self_attn.k_proj": {
726
+ "bits": 8
727
+ },
728
+ "model.layers.32.self_attn.o_proj": {
729
+ "bits": 8
730
+ },
731
+ "model.layers.32.self_attn.q_proj": {
732
+ "bits": 8
733
+ },
734
+ "model.layers.32.self_attn.v_proj": {
735
+ "bits": 8
736
+ },
737
+ "model.layers.33.moe.gate": {
738
+ "bits": 16,
739
+ "data_type": "float"
740
+ },
741
+ "model.layers.33.self_attn.g_proj": {
742
+ "bits": 16,
743
+ "data_type": "float"
744
+ },
745
+ "model.layers.33.self_attn.k_proj": {
746
+ "bits": 8
747
+ },
748
+ "model.layers.33.self_attn.o_proj": {
749
+ "bits": 8
750
+ },
751
+ "model.layers.33.self_attn.q_proj": {
752
+ "bits": 8
753
+ },
754
+ "model.layers.33.self_attn.v_proj": {
755
+ "bits": 8
756
+ },
757
+ "model.layers.34.moe.gate": {
758
+ "bits": 16,
759
+ "data_type": "float"
760
+ },
761
+ "model.layers.34.self_attn.g_proj": {
762
+ "bits": 16,
763
+ "data_type": "float"
764
+ },
765
+ "model.layers.34.self_attn.k_proj": {
766
+ "bits": 8
767
+ },
768
+ "model.layers.34.self_attn.o_proj": {
769
+ "bits": 8
770
+ },
771
+ "model.layers.34.self_attn.q_proj": {
772
+ "bits": 8
773
+ },
774
+ "model.layers.34.self_attn.v_proj": {
775
+ "bits": 8
776
+ },
777
+ "model.layers.35.moe.gate": {
778
+ "bits": 16,
779
+ "data_type": "float"
780
+ },
781
+ "model.layers.35.self_attn.g_proj": {
782
+ "bits": 16,
783
+ "data_type": "float"
784
+ },
785
+ "model.layers.35.self_attn.k_proj": {
786
+ "bits": 8
787
+ },
788
+ "model.layers.35.self_attn.o_proj": {
789
+ "bits": 8
790
+ },
791
+ "model.layers.35.self_attn.q_proj": {
792
+ "bits": 8
793
+ },
794
+ "model.layers.35.self_attn.v_proj": {
795
+ "bits": 8
796
+ },
797
+ "model.layers.36.moe.gate": {
798
+ "bits": 16,
799
+ "data_type": "float"
800
+ },
801
+ "model.layers.36.self_attn.g_proj": {
802
+ "bits": 16,
803
+ "data_type": "float"
804
+ },
805
+ "model.layers.36.self_attn.k_proj": {
806
+ "bits": 8
807
+ },
808
+ "model.layers.36.self_attn.o_proj": {
809
+ "bits": 8
810
+ },
811
+ "model.layers.36.self_attn.q_proj": {
812
+ "bits": 8
813
+ },
814
+ "model.layers.36.self_attn.v_proj": {
815
+ "bits": 8
816
+ },
817
+ "model.layers.37.moe.gate": {
818
+ "bits": 16,
819
+ "data_type": "float"
820
+ },
821
+ "model.layers.37.self_attn.g_proj": {
822
+ "bits": 16,
823
+ "data_type": "float"
824
+ },
825
+ "model.layers.37.self_attn.k_proj": {
826
+ "bits": 8
827
+ },
828
+ "model.layers.37.self_attn.o_proj": {
829
+ "bits": 8
830
+ },
831
+ "model.layers.37.self_attn.q_proj": {
832
+ "bits": 8
833
+ },
834
+ "model.layers.37.self_attn.v_proj": {
835
+ "bits": 8
836
+ },
837
+ "model.layers.38.moe.gate": {
838
+ "bits": 16,
839
+ "data_type": "float"
840
+ },
841
+ "model.layers.38.self_attn.g_proj": {
842
+ "bits": 16,
843
+ "data_type": "float"
844
+ },
845
+ "model.layers.38.self_attn.k_proj": {
846
+ "bits": 8
847
+ },
848
+ "model.layers.38.self_attn.o_proj": {
849
+ "bits": 8
850
+ },
851
+ "model.layers.38.self_attn.q_proj": {
852
+ "bits": 8
853
+ },
854
+ "model.layers.38.self_attn.v_proj": {
855
+ "bits": 8
856
+ },
857
+ "model.layers.39.moe.gate": {
858
+ "bits": 16,
859
+ "data_type": "float"
860
+ },
861
+ "model.layers.39.self_attn.g_proj": {
862
+ "bits": 16,
863
+ "data_type": "float"
864
+ },
865
+ "model.layers.39.self_attn.k_proj": {
866
+ "bits": 8
867
+ },
868
+ "model.layers.39.self_attn.o_proj": {
869
+ "bits": 8
870
+ },
871
+ "model.layers.39.self_attn.q_proj": {
872
+ "bits": 8
873
+ },
874
+ "model.layers.39.self_attn.v_proj": {
875
+ "bits": 8
876
+ },
877
+ "model.layers.4.moe.gate": {
878
+ "bits": 16,
879
+ "data_type": "float"
880
+ },
881
+ "model.layers.4.self_attn.g_proj": {
882
+ "bits": 16,
883
+ "data_type": "float"
884
+ },
885
+ "model.layers.4.self_attn.k_proj": {
886
+ "bits": 8
887
+ },
888
+ "model.layers.4.self_attn.o_proj": {
889
+ "bits": 8
890
+ },
891
+ "model.layers.4.self_attn.q_proj": {
892
+ "bits": 8
893
+ },
894
+ "model.layers.4.self_attn.v_proj": {
895
+ "bits": 8
896
+ },
897
+ "model.layers.40.moe.gate": {
898
+ "bits": 16,
899
+ "data_type": "float"
900
+ },
901
+ "model.layers.40.self_attn.g_proj": {
902
+ "bits": 16,
903
+ "data_type": "float"
904
+ },
905
+ "model.layers.40.self_attn.k_proj": {
906
+ "bits": 8
907
+ },
908
+ "model.layers.40.self_attn.o_proj": {
909
+ "bits": 8
910
+ },
911
+ "model.layers.40.self_attn.q_proj": {
912
+ "bits": 8
913
+ },
914
+ "model.layers.40.self_attn.v_proj": {
915
+ "bits": 8
916
+ },
917
+ "model.layers.41.moe.gate": {
918
+ "bits": 16,
919
+ "data_type": "float"
920
+ },
921
+ "model.layers.41.self_attn.g_proj": {
922
+ "bits": 16,
923
+ "data_type": "float"
924
+ },
925
+ "model.layers.41.self_attn.k_proj": {
926
+ "bits": 8
927
+ },
928
+ "model.layers.41.self_attn.o_proj": {
929
+ "bits": 8
930
+ },
931
+ "model.layers.41.self_attn.q_proj": {
932
+ "bits": 8
933
+ },
934
+ "model.layers.41.self_attn.v_proj": {
935
+ "bits": 8
936
+ },
937
+ "model.layers.42.moe.gate": {
938
+ "bits": 16,
939
+ "data_type": "float"
940
+ },
941
+ "model.layers.42.self_attn.g_proj": {
942
+ "bits": 16,
943
+ "data_type": "float"
944
+ },
945
+ "model.layers.42.self_attn.k_proj": {
946
+ "bits": 8
947
+ },
948
+ "model.layers.42.self_attn.o_proj": {
949
+ "bits": 8
950
+ },
951
+ "model.layers.42.self_attn.q_proj": {
952
+ "bits": 8
953
+ },
954
+ "model.layers.42.self_attn.v_proj": {
955
+ "bits": 8
956
+ },
957
+ "model.layers.43.moe.gate": {
958
+ "bits": 16,
959
+ "data_type": "float"
960
+ },
961
+ "model.layers.43.self_attn.g_proj": {
962
+ "bits": 16,
963
+ "data_type": "float"
964
+ },
965
+ "model.layers.43.self_attn.k_proj": {
966
+ "bits": 8
967
+ },
968
+ "model.layers.43.self_attn.o_proj": {
969
+ "bits": 8
970
+ },
971
+ "model.layers.43.self_attn.q_proj": {
972
+ "bits": 8
973
+ },
974
+ "model.layers.43.self_attn.v_proj": {
975
+ "bits": 8
976
+ },
977
+ "model.layers.44.moe.gate": {
978
+ "bits": 16,
979
+ "data_type": "float"
980
+ },
981
+ "model.layers.44.self_attn.g_proj": {
982
+ "bits": 16,
983
+ "data_type": "float"
984
+ },
985
+ "model.layers.44.self_attn.k_proj": {
986
+ "bits": 8
987
+ },
988
+ "model.layers.44.self_attn.o_proj": {
989
+ "bits": 8
990
+ },
991
+ "model.layers.44.self_attn.q_proj": {
992
+ "bits": 8
993
+ },
994
+ "model.layers.44.self_attn.v_proj": {
995
+ "bits": 8
996
+ },
997
+ "model.layers.5.moe.gate": {
998
+ "bits": 16,
999
+ "data_type": "float"
1000
+ },
1001
+ "model.layers.5.self_attn.g_proj": {
1002
+ "bits": 16,
1003
+ "data_type": "float"
1004
+ },
1005
+ "model.layers.5.self_attn.k_proj": {
1006
+ "bits": 8
1007
+ },
1008
+ "model.layers.5.self_attn.o_proj": {
1009
+ "bits": 8
1010
+ },
1011
+ "model.layers.5.self_attn.q_proj": {
1012
+ "bits": 8
1013
+ },
1014
+ "model.layers.5.self_attn.v_proj": {
1015
+ "bits": 8
1016
+ },
1017
+ "model.layers.6.moe.gate": {
1018
+ "bits": 16,
1019
+ "data_type": "float"
1020
+ },
1021
+ "model.layers.6.self_attn.g_proj": {
1022
+ "bits": 16,
1023
+ "data_type": "float"
1024
+ },
1025
+ "model.layers.6.self_attn.k_proj": {
1026
+ "bits": 8
1027
+ },
1028
+ "model.layers.6.self_attn.o_proj": {
1029
+ "bits": 8
1030
+ },
1031
+ "model.layers.6.self_attn.q_proj": {
1032
+ "bits": 8
1033
+ },
1034
+ "model.layers.6.self_attn.v_proj": {
1035
+ "bits": 8
1036
+ },
1037
+ "model.layers.7.moe.gate": {
1038
+ "bits": 16,
1039
+ "data_type": "float"
1040
+ },
1041
+ "model.layers.7.self_attn.g_proj": {
1042
+ "bits": 16,
1043
+ "data_type": "float"
1044
+ },
1045
+ "model.layers.7.self_attn.k_proj": {
1046
+ "bits": 8
1047
+ },
1048
+ "model.layers.7.self_attn.o_proj": {
1049
+ "bits": 8
1050
+ },
1051
+ "model.layers.7.self_attn.q_proj": {
1052
+ "bits": 8
1053
+ },
1054
+ "model.layers.7.self_attn.v_proj": {
1055
+ "bits": 8
1056
+ },
1057
+ "model.layers.8.moe.gate": {
1058
+ "bits": 16,
1059
+ "data_type": "float"
1060
+ },
1061
+ "model.layers.8.self_attn.g_proj": {
1062
+ "bits": 16,
1063
+ "data_type": "float"
1064
+ },
1065
+ "model.layers.8.self_attn.k_proj": {
1066
+ "bits": 8
1067
+ },
1068
+ "model.layers.8.self_attn.o_proj": {
1069
+ "bits": 8
1070
+ },
1071
+ "model.layers.8.self_attn.q_proj": {
1072
+ "bits": 8
1073
+ },
1074
+ "model.layers.8.self_attn.v_proj": {
1075
+ "bits": 8
1076
+ },
1077
+ "model.layers.9.moe.gate": {
1078
+ "bits": 16,
1079
+ "data_type": "float"
1080
+ },
1081
+ "model.layers.9.self_attn.g_proj": {
1082
+ "bits": 16,
1083
+ "data_type": "float"
1084
+ },
1085
+ "model.layers.9.self_attn.k_proj": {
1086
+ "bits": 8
1087
+ },
1088
+ "model.layers.9.self_attn.o_proj": {
1089
+ "bits": 8
1090
+ },
1091
+ "model.layers.9.self_attn.q_proj": {
1092
+ "bits": 8
1093
+ },
1094
+ "model.layers.9.self_attn.v_proj": {
1095
+ "bits": 8
1096
+ },
1097
+ "model.layers.45.eh_proj": {
1098
+ "bits": 16,
1099
+ "data_type": "fp"
1100
+ },
1101
+ "model.layers.45.mlp.down_proj": {
1102
+ "bits": 16,
1103
+ "data_type": "fp"
1104
+ },
1105
+ "model.layers.45.mlp.gate_proj": {
1106
+ "bits": 16,
1107
+ "data_type": "fp"
1108
+ },
1109
+ "model.layers.45.mlp.up_proj": {
1110
+ "bits": 16,
1111
+ "data_type": "fp"
1112
+ },
1113
+ "model.layers.45.self_attn.g_proj": {
1114
+ "bits": 16,
1115
+ "data_type": "fp"
1116
+ },
1117
+ "model.layers.45.self_attn.k_proj": {
1118
+ "bits": 16,
1119
+ "data_type": "fp"
1120
+ },
1121
+ "model.layers.45.self_attn.o_proj": {
1122
+ "bits": 16,
1123
+ "data_type": "fp"
1124
+ },
1125
+ "model.layers.45.self_attn.q_proj": {
1126
+ "bits": 16,
1127
+ "data_type": "fp"
1128
+ },
1129
+ "model.layers.45.self_attn.v_proj": {
1130
+ "bits": 16,
1131
+ "data_type": "fp"
1132
+ },
1133
+ "model.layers.45.transformer.shared_head.output": {
1134
+ "bits": 16,
1135
+ "data_type": "fp"
1136
+ },
1137
+ "model.layers.46.transformer.shared_head.output": {
1138
+ "bits": 16,
1139
+ "data_type": "fp"
1140
+ },
1141
+ "model.layers.47.transformer.shared_head.output": {
1142
+ "bits": 16,
1143
+ "data_type": "fp"
1144
+ }
1145
+ },
1146
+ "group_size": 128,
1147
+ "iters": 0,
1148
+ "packing_format": "auto_round:auto_gptq",
1149
+ "quant_method": "auto-round",
1150
+ "sym": true,
1151
+ "block_name_to_quantize": [
1152
+ "model.layers"
1153
+ ]
1154
+ },
1155
+ "rms_norm_eps": 1e-05,
1156
+ "rope_parameters": {
1157
+ "factor": 2.0,
1158
+ "high_freq_factor": 32.0,
1159
+ "low_freq_factor": 1.0,
1160
+ "original_max_position_embeddings": 131072,
1161
+ "rope_type": "llama3"
1162
+ },
1163
+ "rope_scaling": {
1164
+ "factor": 2.0,
1165
+ "high_freq_factor": 32.0,
1166
+ "low_freq_factor": 1.0,
1167
+ "original_max_position_embeddings": 131072,
1168
+ "rope_type": "llama3"
1169
+ },
1170
+ "rope_theta": [
1171
+ 5000000.0,
1172
+ 10000.0,
1173
+ 10000.0,
1174
+ 10000.0,
1175
+ 5000000.0,
1176
+ 10000.0,
1177
+ 10000.0,
1178
+ 10000.0,
1179
+ 5000000.0,
1180
+ 10000.0,
1181
+ 10000.0,
1182
+ 10000.0,
1183
+ 5000000.0,
1184
+ 10000.0,
1185
+ 10000.0,
1186
+ 10000.0,
1187
+ 5000000.0,
1188
+ 10000.0,
1189
+ 10000.0,
1190
+ 10000.0,
1191
+ 5000000.0,
1192
+ 10000.0,
1193
+ 10000.0,
1194
+ 10000.0,
1195
+ 5000000.0,
1196
+ 10000.0,
1197
+ 10000.0,
1198
+ 10000.0,
1199
+ 5000000.0,
1200
+ 10000.0,
1201
+ 10000.0,
1202
+ 10000.0,
1203
+ 5000000.0,
1204
+ 10000.0,
1205
+ 10000.0,
1206
+ 10000.0,
1207
+ 5000000.0,
1208
+ 10000.0,
1209
+ 10000.0,
1210
+ 10000.0,
1211
+ 5000000.0,
1212
+ 10000.0,
1213
+ 10000.0,
1214
+ 10000.0,
1215
+ 5000000.0,
1216
+ 10000.0,
1217
+ 10000.0,
1218
+ 10000.0
1219
+ ],
1220
+ "share_expert_dim": 1280,
1221
+ "sink": false,
1222
+ "sliding_window": 512,
1223
+ "swiglu_limits": [
1224
+ 0.0,
1225
+ 0.0,
1226
+ 0.0,
1227
+ 0.0,
1228
+ 0.0,
1229
+ 0.0,
1230
+ 0.0,
1231
+ 0.0,
1232
+ 0.0,
1233
+ 0.0,
1234
+ 0.0,
1235
+ 0.0,
1236
+ 0.0,
1237
+ 0.0,
1238
+ 0.0,
1239
+ 0.0,
1240
+ 0.0,
1241
+ 0.0,
1242
+ 0.0,
1243
+ 0.0,
1244
+ 0.0,
1245
+ 0.0,
1246
+ 0.0,
1247
+ 0.0,
1248
+ 0.0,
1249
+ 0.0,
1250
+ 0.0,
1251
+ 0.0,
1252
+ 0.0,
1253
+ 0.0,
1254
+ 0.0,
1255
+ 0.0,
1256
+ 0.0,
1257
+ 0.0,
1258
+ 0.0,
1259
+ 0.0,
1260
+ 0.0,
1261
+ 0.0,
1262
+ 0.0,
1263
+ 0.0,
1264
+ 0.0,
1265
+ 0.0,
1266
+ 0.0,
1267
+ 7,
1268
+ 7,
1269
+ 0.0,
1270
+ 0.0,
1271
+ 0.0
1272
+ ],
1273
+ "swiglu_limits_shared": [
1274
+ 0.0,
1275
+ 0.0,
1276
+ 0.0,
1277
+ 0.0,
1278
+ 0.0,
1279
+ 0.0,
1280
+ 0.0,
1281
+ 0.0,
1282
+ 0.0,
1283
+ 0.0,
1284
+ 0.0,
1285
+ 0.0,
1286
+ 0.0,
1287
+ 0.0,
1288
+ 0.0,
1289
+ 0.0,
1290
+ 0.0,
1291
+ 0.0,
1292
+ 0.0,
1293
+ 0.0,
1294
+ 0.0,
1295
+ 0.0,
1296
+ 0.0,
1297
+ 0.0,
1298
+ 0.0,
1299
+ 0.0,
1300
+ 0.0,
1301
+ 0.0,
1302
+ 0.0,
1303
+ 0.0,
1304
+ 0.0,
1305
+ 0.0,
1306
+ 0.0,
1307
+ 0.0,
1308
+ 0.0,
1309
+ 0.0,
1310
+ 0.0,
1311
+ 0.0,
1312
+ 0.0,
1313
+ 0.0,
1314
+ 0.0,
1315
+ 0.0,
1316
+ 0.0,
1317
+ 0.0,
1318
+ 16,
1319
+ 0.0,
1320
+ 0.0,
1321
+ 0.0
1322
+ ],
1323
+ "tie_word_embeddings": false,
1324
+ "transformers_version": "4.57.6",
1325
+ "use_head_wise_attn_gate": true,
1326
+ "use_moe": true,
1327
+ "use_moe_router_bias": true,
1328
+ "use_qk_norm": true,
1329
+ "use_rope_layers": [],
1330
+ "vocab_size": 128896,
1331
+ "yarn_only_types": [
1332
+ "full_attention"
1333
+ ],
1334
+ "zero_centered": true
1335
+ }
configuration_step3p5.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional, Union
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+
5
+
6
+
7
+ class Step3p5Config(PretrainedConfig):
8
+ model_type = "step3p5"
9
+ architectures = ["Step3p5ForCausalLM"]
10
+
11
+ def __init__(
12
+ self,
13
+ hidden_size: int = 4096,
14
+ intermediate_size: int = 11264,
15
+ num_attention_heads: int = 64,
16
+ num_attention_groups: int = 8,
17
+ num_hidden_layers: int = 45,
18
+ max_seq_len: int = 128000,
19
+ vocab_size: int = 128815,
20
+ rms_norm_eps: float = 1e-5,
21
+ moe_intermediate_size: int = 1280,
22
+ moe_num_experts: int = 288,
23
+ moe_top_k: int = 8,
24
+ rope_theta: float = 10000,
25
+ rope_scaling: Optional[dict[str, Any]] = None,
26
+ max_position_embeddings: int = 128000,
27
+ share_expert_dims: int = 1280,
28
+ head_dim: int = 128,
29
+ norm_expert_weight: bool = True,
30
+ layer_types: list[str] = None,
31
+ sliding_window: Optional[int] = None,
32
+ moe_layers_enum: tuple[int] = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
33
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
34
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
35
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44),
36
+ **kwargs,
37
+ ) -> None:
38
+ self.hidden_size = hidden_size
39
+ self.intermediate_size = intermediate_size
40
+ self.num_attention_heads = num_attention_heads
41
+ self.num_attention_groups = num_attention_groups
42
+ self.num_hidden_layers = num_hidden_layers
43
+ self.max_seq_len = max_seq_len
44
+ self.vocab_size = vocab_size
45
+ self.rms_norm_eps = rms_norm_eps
46
+ self.moe_intermediate_size = moe_intermediate_size
47
+ self.moe_num_experts = moe_num_experts
48
+ self.moe_top_k = moe_top_k
49
+ self.rope_theta = rope_theta
50
+ self.rope_scaling = rope_scaling
51
+ self.max_position_embeddings = max_position_embeddings
52
+ self.share_expert_dim = share_expert_dims
53
+ self.head_dim = head_dim
54
+ self.norm_expert_weight = norm_expert_weight
55
+ self.moe_layers_enum = moe_layers_enum
56
+ self.layer_types = layer_types
57
+ self.sliding_window = sliding_window
58
+ super().__init__(**kwargs)
59
+
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 2,
8
+ 128007
9
+ ],
10
+ "transformers_version": "4.57.6"
11
+ }
model-00001-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6761e05735387572c860b970b71b20479452d860cc9cff3c464fed6d7e3b0a3
3
+ size 5368438976
model-00002-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d919ddd9fe51359aec2a85a0d737d6aab95fdb53e40dbb99b0c00d53daafd951
3
+ size 5367756960
model-00003-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecc665f3f26d7289669815ea3f12654a72d9e5403775b96bcf3298f2db7de0ea
3
+ size 5368211176
model-00004-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13459c621f4409f6f143e1b574cd7d6afcee0bd0b06dc6d1b73638cb97d21397
3
+ size 5367403280
model-00005-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a0a8068dfefb60311b53baa82d8a15d2046ff565c23c0ffcb493e70473ee166
3
+ size 5368217472
model-00006-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a4f14d7b4edf8595d3c873dd36659a9b363521d9e25df95862a88e270c641ec
3
+ size 5367405736
model-00007-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f19fa080692ba69c733196074a0cd39ecce4c2a454727477a1aa58eb01c3bdaf
3
+ size 5368217472
model-00008-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4bd4cda7b12265f50748a5f926bde08ab0c8ea12ca8881b57f1116e89f81509
3
+ size 5368574056
model-00009-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05230197766ec36cf303cfe0264e6dd89f5f9ddd0acd3826bba5e0bf119be52b
3
+ size 5367405280
model-00010-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:255e4a296f3293a45ceb428c6118cd6d2bc61d79fc7a44b8a27a4683d5a8947c
3
+ size 5368217216
model-00011-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:848656c526345b669e2b1cbda54dd9f6a590fe4befc2ef051176efcd97344cd2
3
+ size 5367405744
model-00012-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc225788d7437fd09ef857f72f1c6290ad78a21c8af6f4b8843b8d055c0b46fc
3
+ size 5368217472
model-00013-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:043f1636056467feb87d440b40cb1c86dd1ccc75a28246f5a2f73f6bc9e33ab8
3
+ size 5367405736
model-00014-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d877f818ccc366539baf2dc45f0c6a9d00bcd39321e29104b6ee1a66495182d
3
+ size 5368573928
model-00015-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64ad7c1d9663a77e1d02ab3dd69900d988b04fb236eff3711305b8553aca8723
3
+ size 5367405280
model-00016-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21b32b19397cbe7cc145e8dd890d24ad960aa3e26e68e5019640f811307b0f9a
3
+ size 5368217344
model-00017-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70d223a1f5c6363e8ec9a6f14e8859de959accb7f2fa617e9dea458d31f05ae6
3
+ size 5367405744
model-00018-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b0a28352230fde481023a0dfd49dddb5b88f7e6f90fa6de571f9e835deb4ebf
3
+ size 5368217472
model-00019-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cb1238f34c02c87293f0d68b8b57177655e7b5327660a6bdca2ad2f7d1a228e
3
+ size 5344969632
model-00020-of-00020.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04299ea56f67b315b3840263b5ab5eb52d22a7f7e41728af8bcc05db5069a861
3
+ size 4510972416
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
model_extra_tensors.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e6afe4bd107e02e1206923942d755d1f8678d395bce6a60e8d4f660aea638ba
3
+ size 4306719864
modeling_step3p5.py ADDED
@@ -0,0 +1,900 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The LLAMA4 and HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from dataclasses import dataclass
16
+ from typing import Callable, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from transformers.activations import ACT2FN
22
+ from transformers.cache_utils import Cache, DynamicCache
23
+ from transformers.generation import GenerationMixin
24
+ from transformers.masking_utils import (create_causal_mask,
25
+ create_sliding_window_causal_mask)
26
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
27
+ from transformers.modeling_layers import GradientCheckpointingLayer
28
+ from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
29
+ from transformers.modeling_rope_utils import (ROPE_INIT_FUNCTIONS,
30
+ dynamic_rope_update)
31
+ from transformers.modeling_utils import (ALL_ATTENTION_FUNCTIONS,
32
+ PreTrainedModel)
33
+ from transformers.processing_utils import Unpack
34
+ from transformers.utils import TransformersKwargs, can_return_tuple, logging
35
+
36
+ from .configuration_step3p5 import Step3p5Config
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ __all__ = ["Step3p5Model", "Step3p5ForCausalLM"]
41
+
42
+ class Step3p5RotaryEmbedding(nn.Module):
43
+
44
+ def __init__(self, config: Step3p5Config, device=None, layer_idx=None):
45
+ super().__init__()
46
+ # BC: "rope_type" was originally "type"
47
+ self.layer_idx = layer_idx
48
+ if config.rope_parameters is not None:
49
+ self.rope_type = config.rope_parameters.get(
50
+ "rope_type", config.rope_parameters.get("type"))
51
+ else:
52
+ self.rope_type = "default"
53
+ self.max_seq_len_cached = config.max_position_embeddings
54
+ self.original_max_seq_len = config.max_position_embeddings
55
+
56
+ partial_rotary_factors = getattr(config, "partial_rotary_factors",
57
+ None)
58
+ if partial_rotary_factors is not None:
59
+ config.partial_rotary_factor = partial_rotary_factors[
60
+ self.layer_idx]
61
+ else:
62
+ config.partial_rotary_factor = 1.0
63
+
64
+ self.rope_theta = config.rope_theta
65
+ if isinstance(config.rope_theta, list):
66
+ self.rope_theta = config.rope_theta.copy()
67
+ config.rope_theta = self.rope_theta[self.layer_idx]
68
+
69
+ self.config = config
70
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
71
+ inv_freq, self.attention_scaling = self.rope_init_fn(
72
+ self.config, device)
73
+
74
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
75
+ self.original_inv_freq = self.inv_freq
76
+ config.rope_theta = self.rope_theta
77
+
78
+ @torch.no_grad()
79
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
80
+ def forward(self, x, position_ids):
81
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(
82
+ position_ids.shape[0], -1, 1).to(x.device)
83
+ position_ids_expanded = position_ids[:, None, :].float().to(x.device)
84
+
85
+ device_type = x.device.type if isinstance(
86
+ x.device.type, str) and x.device.type != "mps" else "cpu"
87
+ with torch.autocast(device_type=device_type,
88
+ enabled=False): # Force float32
89
+ freqs = (inv_freq_expanded.float()
90
+ @ position_ids_expanded.float()).transpose(1, 2)
91
+ emb = torch.cat((freqs, freqs), dim=-1)
92
+ cos = emb.cos() * self.attention_scaling
93
+ sin = emb.sin() * self.attention_scaling
94
+
95
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
96
+
97
+
98
+ def rotate_half(x):
99
+ """Rotates half the hidden dims of the input."""
100
+ x1 = x[..., :x.shape[-1] // 2]
101
+ x2 = x[..., x.shape[-1] // 2:]
102
+ return torch.cat((-x2, x1), dim=-1)
103
+
104
+
105
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
106
+ """Applies Rotary Position Embedding to the query and key tensors.
107
+
108
+ Args:
109
+ q (`torch.Tensor`): The query tensor.
110
+ k (`torch.Tensor`): The key tensor.
111
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
112
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
113
+ position_ids (`torch.Tensor`, *optional*):
114
+ Deprecated and unused.
115
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
116
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
117
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
118
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
119
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
120
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
121
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
122
+ Returns:
123
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
124
+ """
125
+ rotary_dim = cos.shape[-1]
126
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
127
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
128
+
129
+ # Apply rotary embeddings on the first half or full tensor
130
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
131
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
132
+
133
+ # Concatenate back to full shape
134
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
135
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
136
+ return q_embed, k_embed
137
+
138
+
139
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
140
+ """
141
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
142
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
143
+ """
144
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
145
+ if n_rep == 1:
146
+ return hidden_states
147
+ hidden_states = hidden_states[:, :,
148
+ None, :, :].expand(batch,
149
+ num_key_value_heads,
150
+ n_rep, slen, head_dim)
151
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen,
152
+ head_dim)
153
+
154
+
155
+ # Adapted from transformers.models.llama.modeling_llama.eager_attention_forward -> llama4 doesn't cast attn weights to fp32
156
+ def eager_attention_forward(
157
+ module: nn.Module,
158
+ query: torch.Tensor,
159
+ key: torch.Tensor,
160
+ value: torch.Tensor,
161
+ attention_mask: Optional[torch.Tensor],
162
+ scaling: float,
163
+ dropout: float = 0.0,
164
+ **kwargs,
165
+ ):
166
+ key_states = repeat_kv(key, module.num_key_value_groups)
167
+ value_states = repeat_kv(value, module.num_key_value_groups)
168
+ # breakpoint()
169
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
170
+ if attention_mask is not None:
171
+ causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
172
+ attn_weights = attn_weights + causal_mask
173
+
174
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
175
+ attn_weights = nn.functional.dropout(attn_weights,
176
+ p=dropout,
177
+ training=module.training)
178
+ attn_output = torch.matmul(attn_weights, value_states)
179
+ attn_output = attn_output.transpose(1, 2).contiguous()
180
+
181
+ return attn_output, attn_weights
182
+
183
+ @dataclass
184
+ class Step3p5CausalLMOutputWithPast(ModelOutput):
185
+ r"""
186
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
187
+ Language modeling loss (for next-token prediction).
188
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
189
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
190
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
191
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
192
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
193
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
194
+ `past_key_values` input) to speed up sequential decoding.
195
+ """
196
+
197
+ loss: Optional[torch.FloatTensor] = None
198
+ last_hidden_state: Optional[torch.FloatTensor] = None
199
+ logits: torch.FloatTensor = None
200
+ past_key_values: Optional[list[torch.FloatTensor]] = None
201
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
202
+ attentions: Optional[tuple[torch.FloatTensor]] = None
203
+
204
+
205
+ class Step3p5MLP(nn.Module):
206
+
207
+ def __init__(self, config, intermediate_size=None, swiglu_limit=None):
208
+ super().__init__()
209
+ self.config = config
210
+ self.hidden_size = config.hidden_size
211
+ self.intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
212
+ self.gate_proj = nn.Linear(self.hidden_size,
213
+ self.intermediate_size,
214
+ bias=False)
215
+ self.up_proj = nn.Linear(self.hidden_size,
216
+ self.intermediate_size,
217
+ bias=False)
218
+ self.down_proj = nn.Linear(self.intermediate_size,
219
+ self.hidden_size,
220
+ bias=False)
221
+ self.act_fn = ACT2FN["silu"]
222
+ self.limit = swiglu_limit
223
+
224
+ def forward(self, x):
225
+ up = self.up_proj(x)
226
+ gate = self.act_fn(self.gate_proj(x))
227
+ if self.limit is not None:
228
+ gate = gate.clamp(min=None, max=self.limit)
229
+ up = up.clamp(min=-self.limit, max=self.limit)
230
+
231
+ return self.down_proj(gate * up)
232
+
233
+
234
+ def sigmoid_routing_function(gating_output: torch.Tensor, topk: int,
235
+ renormalize: bool):
236
+ gating_output = gating_output.float()
237
+ gate_prob = torch.sigmoid(gating_output)
238
+ gate_prob = gate_prob / gate_prob.sum(dim=-1, keepdim=True)
239
+ topk_prob, indices = torch.topk(gate_prob, k=topk, dim=1)
240
+ expert_topk_weight = topk_prob
241
+ if renormalize:
242
+ expert_topk_weight = expert_topk_weight / torch.sum(
243
+ expert_topk_weight, dim=-1, keepdim=True)
244
+ return expert_topk_weight, indices
245
+
246
+
247
+ def softmax_routing_function(gating_output: torch.Tensor, top_k: int,
248
+ renormalize: bool):
249
+ gating_output = gating_output.float()
250
+ gate_prob = torch.softmax(gating_output, dim=-1)
251
+ gate_prob = gate_prob / gate_prob.sum(dim=-1, keepdim=True)
252
+ topk_prob, indices = torch.topk(gate_prob, k=top_k, dim=1)
253
+ expert_topk_weight = topk_prob
254
+ if renormalize:
255
+ expert_topk_weight = expert_topk_weight / torch.sum(
256
+ expert_topk_weight, dim=-1, keepdim=True)
257
+ return expert_topk_weight, indices.to(torch.int32)
258
+
259
+
260
+ class MoELinear(nn.Module):
261
+
262
+ def __init__(self, num_experts, in_features, out_features):
263
+ super().__init__()
264
+ self.num_experts = num_experts
265
+ self.in_features = in_features
266
+ self.out_features = out_features
267
+ self.weight = nn.Parameter(
268
+ torch.empty(num_experts, out_features, in_features))
269
+
270
+ def forward(self, x, expert_id):
271
+ x = F.linear(x.float(), self.weight[expert_id].float())
272
+ return x
273
+
274
+
275
+ class Step3p5MoEMLP(nn.Module):
276
+
277
+ def __init__(self, config, swiglu_limit=None):
278
+ super().__init__()
279
+ self.num_experts = config.moe_num_experts
280
+ self.top_k = config.moe_top_k
281
+ self.hidden_size = config.hidden_size
282
+ self.moe_intermediate_size = config.moe_intermediate_size
283
+
284
+ self.use_moe_router_bias = config.use_moe_router_bias
285
+ if self.use_moe_router_bias:
286
+ self.router_bias = nn.Parameter(torch.zeros(config.moe_num_experts,
287
+ dtype=torch.float32),
288
+ requires_grad=False)
289
+ self.custom_routing_function = self.router_bias_func
290
+ elif config.moe_router_activation == "sigmoid":
291
+ self.custom_routing_function = sigmoid_routing_function
292
+ else:
293
+ self.custom_routing_function = None
294
+ self.need_fp32_gate = config.need_fp32_gate
295
+ self.routed_scaling_factor = getattr(config,
296
+ "moe_router_scaling_factor", 1.0)
297
+
298
+ # gating
299
+ self.gate = nn.Linear(self.hidden_size, self.num_experts, bias=False)
300
+
301
+ self.act_fn = ACT2FN["silu"]
302
+ self.limit = swiglu_limit
303
+
304
+ self.up_proj = MoELinear(self.num_experts, self.hidden_size,
305
+ self.moe_intermediate_size)
306
+ self.gate_proj = MoELinear(self.num_experts, self.hidden_size,
307
+ self.moe_intermediate_size)
308
+ self.down_proj = MoELinear(self.num_experts,
309
+ self.moe_intermediate_size,
310
+ self.hidden_size)
311
+
312
+ def router_bias_func(self, gating_output: torch.Tensor, topk: int,
313
+ renormalize: bool):
314
+ gate_prob = torch.sigmoid(gating_output.float())
315
+ gate_prob_with_bias = gate_prob + self.router_bias.unsqueeze(0)
316
+ _, indices = torch.topk(gate_prob_with_bias, k=topk, dim=1)
317
+ topk_prob = torch.gather(gate_prob, 1, indices)
318
+ expert_topk_weight = topk_prob
319
+ if renormalize:
320
+ expert_topk_weight = expert_topk_weight / (
321
+ torch.sum(expert_topk_weight, dim=-1, keepdim=True) + 1e-20)
322
+ return expert_topk_weight, indices
323
+
324
+ def get_expert_output(self, inputs: torch.Tensor, expert_id):
325
+ #if self.limit is None:
326
+ up = self.up_proj(inputs, expert_id)
327
+ gate = self.act_fn(self.gate_proj(inputs, expert_id))
328
+ if self.limit is not None:
329
+ gate = gate.clamp(min=None, max=self.limit)
330
+ up = up.clamp(min=-self.limit, max=self.limit)
331
+
332
+ return self.down_proj(gate * up, expert_id)
333
+
334
+ def forward(self, hidden_states):
335
+ """ """
336
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
337
+ hidden_states = hidden_states.view(-1, hidden_dim)
338
+ if self.need_fp32_gate:
339
+ router_logits = torch.matmul(hidden_states.to(torch.float32), self.gate.weight.t().to(torch.float32))
340
+ else:
341
+ # router_logits: (batch * sequence_length, n_experts)
342
+ router_logits = self.gate(hidden_states)
343
+
344
+ if self.custom_routing_function:
345
+ routing_weights, selected_experts = self.custom_routing_function(
346
+ router_logits, self.top_k, renormalize=True)
347
+ else:
348
+ routing_weights = F.softmax(router_logits,
349
+ dim=1,
350
+ dtype=torch.float)
351
+ routing_weights, selected_experts = torch.topk(routing_weights,
352
+ self.top_k,
353
+ dim=-1)
354
+
355
+ routing_weights = routing_weights * self.routed_scaling_factor
356
+
357
+ final_hidden_states = torch.zeros(
358
+ (batch_size * sequence_length, hidden_dim),
359
+ dtype=hidden_states.dtype,
360
+ device=hidden_states.device)
361
+
362
+ # One hot encode the selected experts to create an expert mask
363
+ # this will be used to easily index which expert is going to be sollicitated
364
+ expert_mask = torch.nn.functional.one_hot(
365
+ selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
366
+
367
+ # Loop over all available experts in the model and perform the computation on each expert
368
+ for expert_idx in range(self.num_experts):
369
+ idx, top_x = torch.where(expert_mask[expert_idx])
370
+
371
+ # Index the correct hidden states and compute the expert hidden state for
372
+ # the current expert. We need to make sure to multiply the output hidden
373
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
374
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
375
+ current_hidden_states = (
376
+ self.get_expert_output(current_state, expert_idx) *
377
+ routing_weights[top_x, idx, None])
378
+
379
+ # However `index_add_` only support torch tensors for indexing so we'll use
380
+ # the `top_x` tensor here.
381
+ final_hidden_states.index_add_(
382
+ 0, top_x, current_hidden_states.to(hidden_states.dtype))
383
+ final_hidden_states = final_hidden_states.reshape(
384
+ batch_size, sequence_length, hidden_dim)
385
+ return final_hidden_states
386
+
387
+
388
+ class Step3p5RMSNorm(nn.Module):
389
+
390
+ def __init__(
391
+ self,
392
+ hidden_size: int,
393
+ eps: float = 1e-5,
394
+ ) -> None:
395
+ super().__init__()
396
+ self.weight = nn.Parameter(torch.ones(hidden_size))
397
+ self.variance_epsilon = eps
398
+
399
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
400
+ dtype = x.dtype
401
+ x = x.float()
402
+ variance = x.pow(2).mean(dim=-1, keepdim=True)
403
+ normed = x * torch.rsqrt(variance + self.variance_epsilon)
404
+ normed = normed * (self.weight.float() + 1)
405
+ return normed.to(dtype)
406
+ class Step3p5Attention(nn.Module):
407
+
408
+ def __init__(self, config: Step3p5Config, layer_idx):
409
+ super().__init__()
410
+ self.config = config
411
+ self.layer_idx = layer_idx
412
+ self.num_attention_heads = config.num_attention_heads
413
+ self.num_key_value_heads = config.num_attention_groups
414
+
415
+ layer_types = getattr(config, "layer_types", [])
416
+ if layer_types:
417
+ enable_sliding_window = layer_types[
418
+ self.layer_idx] == "sliding_attention"
419
+ else:
420
+ enable_sliding_window = self.layer_idx % 2 == 0
421
+
422
+ if hasattr(config, "yarn_only_types") and layer_types[
423
+ self.layer_idx] not in config.yarn_only_types:
424
+ config.rope_parameters = None
425
+ else:
426
+ config.rope_parameters = getattr(config, "rope_scaling", None)
427
+
428
+ self.sliding_window = config.sliding_window
429
+ if enable_sliding_window:
430
+ self.num_attention_heads = config.attention_other_setting[
431
+ "num_attention_heads"]
432
+ self.num_key_value_heads = config.attention_other_setting[
433
+ "num_attention_groups"]
434
+
435
+ if self.sliding_window is not None and enable_sliding_window:
436
+ self.sliding_window = (self.sliding_window)
437
+ else:
438
+ self.sliding_window = None
439
+ self.head_dim = getattr(config, "head_dim",
440
+ config.hidden_size // self.num_attention_heads)
441
+ self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads
442
+
443
+ self.rotary_emb = Step3p5RotaryEmbedding(config, layer_idx=layer_idx)
444
+
445
+ self.q_size = self.num_attention_heads * self.head_dim
446
+ self.kv_size = self.num_key_value_heads * self.head_dim
447
+ self.scaling = self.head_dim**-0.5
448
+
449
+ self.q_proj = nn.Linear(config.hidden_size, self.q_size, bias=False)
450
+ self.k_proj = nn.Linear(config.hidden_size, self.kv_size, bias=False)
451
+ self.v_proj = nn.Linear(config.hidden_size, self.kv_size, bias=False)
452
+ self.o_proj = nn.Linear(self.q_size, config.hidden_size, bias=False)
453
+ self.q_norm = Step3p5RMSNorm(self.head_dim,
454
+ eps=config.rms_norm_eps)
455
+ self.k_norm = Step3p5RMSNorm(self.head_dim,
456
+ eps=config.rms_norm_eps)
457
+
458
+ self.use_head_wise_attn_gate = config.use_head_wise_attn_gate
459
+ if self.use_head_wise_attn_gate:
460
+ self.g_proj = nn.Linear(config.hidden_size,
461
+ self.num_attention_heads,
462
+ bias=False)
463
+
464
+ self.use_rope = True
465
+ use_rope_layers = getattr(config, "use_rope_layers", None)
466
+ if use_rope_layers:
467
+ self.use_rope = use_rope_layers[self.layer_idx]
468
+
469
+ def forward(
470
+ self,
471
+ hidden_states: torch.Tensor,
472
+ attention_mask: Optional[torch.Tensor],
473
+ past_key_value: Optional[Cache] = None,
474
+ cache_position: Optional[torch.LongTensor] = None,
475
+ position_ids: Optional[torch.LongTensor] = None,
476
+ **kwargs: Unpack[FlashAttentionKwargs],
477
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor],
478
+ Optional[Tuple[torch.Tensor]]]:
479
+ input_shape = hidden_states.shape[:-1]
480
+ hidden_shape = (*input_shape, -1, self.head_dim)
481
+
482
+ query_states = self.q_norm(
483
+ self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
484
+ key_states = self.k_norm(
485
+ self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
486
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(
487
+ 1, 2)
488
+ if self.use_head_wise_attn_gate:
489
+ gate_states = self.g_proj(hidden_states)
490
+ cos, sin = self.rotary_emb(hidden_states, position_ids)
491
+
492
+ # cos, sin = position_embeddings
493
+ query_states, key_states = apply_rotary_pos_emb(
494
+ query_states, key_states, cos, sin)
495
+
496
+ # query_states, key_states = apply_rotary_pos_emb(query_norm_states, key_norm_states, cos, sin)
497
+ if past_key_value is not None:
498
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
499
+ cache_kwargs = {
500
+ "sin": sin,
501
+ "cos": cos,
502
+ "cache_position": cache_position
503
+ }
504
+ key_states, value_states = past_key_value.update(
505
+ key_states, value_states, self.layer_idx, cache_kwargs)
506
+
507
+ attention_interface: Callable = eager_attention_forward
508
+ # TODO: considering FP8;
509
+ # RuntimeError: Expected attn_mask dtype to be bool or float or to match query dtype,
510
+ # but got attn_mask.dtype: long int and query.dtype: c10::BFloat16 instead.
511
+ if self.config._attn_implementation != "eager":
512
+ attention_interface = ALL_ATTENTION_FUNCTIONS[
513
+ self.config._attn_implementation]
514
+
515
+ attn_output, attn_weights = attention_interface(
516
+ self,
517
+ query_states,
518
+ key_states,
519
+ value_states,
520
+ attention_mask,
521
+ dropout=0.0 if not self.training else self.attention_dropout,
522
+ scaling=self.scaling,
523
+ sliding_window=self.sliding_window, # main diff with Llama
524
+ **kwargs,
525
+ )
526
+ attn_output = attn_output.reshape(*input_shape, -1)
527
+ if self.use_head_wise_attn_gate:
528
+ output = attn_output.view(
529
+ *attn_output.shape[:-1], self.num_attention_heads,
530
+ self.head_dim) * gate_states.unsqueeze(-1).sigmoid()
531
+ attn_output = output.view(*attn_output.shape)
532
+ attn_output = self.o_proj(attn_output)
533
+
534
+ return attn_output, attn_weights
535
+
536
+
537
+ class Step3p5DecoderLayer(GradientCheckpointingLayer):
538
+
539
+ def __init__(self, config, layer_idx):
540
+ super().__init__()
541
+ self.hidden_size = config.hidden_size
542
+ self.layer_idx = layer_idx
543
+ self.self_attn = Step3p5Attention(config, layer_idx)
544
+ self.attention_type = config.layer_types[layer_idx]
545
+
546
+ moe_layers_enum = getattr(config, "moe_layers_enum", None)
547
+ if moe_layers_enum is not None:
548
+ moe_layers_idx = [
549
+ int(i) for i in moe_layers_enum.strip().split(',')
550
+ ]
551
+ else:
552
+ moe_layers_idx = [i for i in range(1, config.num_hidden_layers)]
553
+ self.is_moe_layer = layer_idx in moe_layers_idx
554
+ self.use_moe = False
555
+
556
+ if config.swiglu_limits_shared and config.swiglu_limits_shared[
557
+ layer_idx] is not None and config.swiglu_limits_shared[
558
+ layer_idx] != 0:
559
+ swiglu_limit_shared = config.swiglu_limits_shared[layer_idx]
560
+ else:
561
+ swiglu_limit_shared = None
562
+ if config.swiglu_limits and config.swiglu_limits[
563
+ layer_idx] is not None and config.swiglu_limits[layer_idx] != 0:
564
+ swiglu_limit = config.swiglu_limits[layer_idx]
565
+ else:
566
+ swiglu_limit = None
567
+ if self.is_moe_layer:
568
+ self.moe = Step3p5MoEMLP(config, swiglu_limit=swiglu_limit) #
569
+ self.share_expert = Step3p5MLP(
570
+ config,
571
+ intermediate_size=config.share_expert_dim,
572
+ swiglu_limit=swiglu_limit_shared)
573
+ self.use_moe = True
574
+ else:
575
+ self.mlp = Step3p5MLP(config,
576
+ intermediate_size=config.intermediate_size,
577
+ swiglu_limit=swiglu_limit_shared)
578
+
579
+ self.input_layernorm = Step3p5RMSNorm(
580
+ config.hidden_size,
581
+ eps=config.rms_norm_eps)
582
+ self.post_attention_layernorm = Step3p5RMSNorm(
583
+ config.hidden_size,
584
+ eps=config.rms_norm_eps)
585
+
586
+ def forward(
587
+ self,
588
+ hidden_states: torch.Tensor,
589
+ attention_mask: Optional[torch.Tensor] = None,
590
+ position_ids: Optional[torch.LongTensor] = None,
591
+ past_key_value: Optional[tuple[torch.Tensor]] = None,
592
+ cache_position: Optional[torch.LongTensor] = None,
593
+ **kwargs: Unpack[FlashAttentionKwargs],
594
+ ) -> torch.FloatTensor:
595
+ residual = hidden_states
596
+ hidden_states = self.input_layernorm(hidden_states)
597
+ hidden_states, _ = self.self_attn(
598
+ hidden_states=hidden_states,
599
+ attention_mask=attention_mask,
600
+ position_ids=position_ids,
601
+ past_key_value=past_key_value,
602
+ cache_position=cache_position,
603
+ **kwargs,
604
+ )
605
+ hidden_states = residual + hidden_states
606
+
607
+ # Fully Connected
608
+ residual = hidden_states
609
+ hidden_states = self.post_attention_layernorm(hidden_states)
610
+ if self.use_moe:
611
+ share_output = self.share_expert(hidden_states)
612
+ moe_output = self.moe(hidden_states)
613
+ ffn_output = moe_output + share_output
614
+ else:
615
+ ffn_output = self.mlp(hidden_states)
616
+ if isinstance(ffn_output, tuple):
617
+ hidden_states, _ = ffn_output
618
+ else:
619
+ hidden_states = ffn_output
620
+
621
+ hidden_states = residual + hidden_states
622
+ return hidden_states
623
+
624
+
625
+ class Step3p5PreTrainedModel(PreTrainedModel):
626
+ # Link this model family to its configuration class so PreTrainedModel.from_pretrained
627
+ # can load the config instead of failing with a NoneType error.
628
+ config_class = Step3p5Config
629
+ supports_gradient_checkpointing = True
630
+ _skip_keys_device_placement = ["past_key_values"]
631
+ _keys_to_ignore_on_load_unexpected = [
632
+ r"model\.layers\.45\.*",
633
+ r"model\.layers\.46\.*",
634
+ r"model\.layers\.47\.*"
635
+ ]
636
+ _supports_flash_attn = False
637
+ _supports_sdpa = True
638
+ _supports_flex_attn = True
639
+ _supports_static_cache = True
640
+ _supports_attention_backend = True
641
+
642
+
643
+ class Step3p5Model(Step3p5PreTrainedModel, GenerationMixin):
644
+ _no_split_modules = ["Step3p5DecoderLayer"]
645
+ base_model_prefix = "model"
646
+ _tied_weights_keys = ["lm_head.weight"]
647
+ config: Step3p5Config
648
+ def __init__(self, config: Step3p5Config):
649
+ super().__init__(config)
650
+ self.padding_idx = config.pad_token_id
651
+ self.vocab_size = config.vocab_size
652
+
653
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size,
654
+ self.padding_idx)
655
+ self.layers = nn.ModuleList([
656
+ Step3p5DecoderLayer(config, layer_idx)
657
+ for layer_idx in range(config.num_hidden_layers)
658
+ ])
659
+ self.norm = Step3p5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
660
+ self.gradient_checkpointing = False
661
+ self.has_sliding_layers = "sliding_attention" in self.config.layer_types
662
+
663
+ # Initialize weights and apply final processing
664
+ self.post_init()
665
+
666
+ def get_input_embeddings(self, input_ids):
667
+ return self.embed_tokens(input_ids)
668
+
669
+ @can_return_tuple
670
+ def forward(
671
+ self,
672
+ input_ids: torch.LongTensor = None,
673
+ attention_mask: Optional[torch.Tensor] = None,
674
+ position_ids: Optional[torch.LongTensor] = None,
675
+ past_key_values: Optional[Cache] = None,
676
+ inputs_embeds: Optional[torch.FloatTensor] = None,
677
+ use_cache: Optional[bool] = None,
678
+ output_attentions: Optional[bool] = None,
679
+ output_hidden_states: Optional[bool] = None,
680
+ return_dict: Optional[bool] = None,
681
+ cache_position: Optional[torch.LongTensor] = None,
682
+ **kwargs: Unpack[TransformersKwargs],
683
+ ) -> Union[tuple, BaseModelOutputWithPast]:
684
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
685
+ output_hidden_states = (output_hidden_states
686
+ if output_hidden_states is not None else
687
+ self.config.output_hidden_states)
688
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
689
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
690
+ if (input_ids is None) ^ (inputs_embeds is not None):
691
+ raise ValueError(
692
+ "You must specify exactly one of input_ids or inputs_embeds")
693
+
694
+ if self.gradient_checkpointing and self.training and use_cache:
695
+ logger.warning_once(
696
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
697
+ )
698
+ use_cache = False
699
+
700
+ if inputs_embeds is None:
701
+ inputs_embeds = self.embed_tokens(
702
+ input_ids.to(self.embed_tokens.weight.device))
703
+
704
+ if use_cache and past_key_values is None:
705
+ past_key_values = DynamicCache()
706
+
707
+ if cache_position is None:
708
+ past_seen_tokens = past_key_values.get_seq_length(
709
+ ) if past_key_values is not None else 0
710
+ cache_position = torch.arange(past_seen_tokens,
711
+ past_seen_tokens +
712
+ inputs_embeds.shape[1],
713
+ device=inputs_embeds.device)
714
+
715
+ if position_ids is None:
716
+ position_ids = cache_position.unsqueeze(0)
717
+
718
+ hidden_states = inputs_embeds
719
+
720
+ # It may already have been prepared by e.g. `generate`
721
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
722
+ # Prepare mask arguments
723
+ mask_kwargs = {
724
+ "config": self.config,
725
+ "input_embeds": inputs_embeds,
726
+ "attention_mask": attention_mask,
727
+ "cache_position": cache_position,
728
+ "past_key_values": past_key_values,
729
+ "position_ids": position_ids,
730
+ }
731
+ # Create the masks
732
+ causal_mask_mapping = {
733
+ "full_attention": create_causal_mask(**mask_kwargs),
734
+ }
735
+
736
+ # The sliding window alternating layers are not always activated depending on the config
737
+ if self.has_sliding_layers:
738
+ causal_mask_mapping[
739
+ "sliding_attention"] = create_sliding_window_causal_mask(
740
+ **mask_kwargs)
741
+
742
+ # # create position embeddings to be shared across the decoder layers
743
+ # decoder layers
744
+ all_hidden_states = () if output_hidden_states else None
745
+ all_self_attns = () if output_attentions else None
746
+ for decoder_layer in self.layers[:self.config.num_hidden_layers]:
747
+ if output_hidden_states:
748
+ all_hidden_states += (hidden_states, )
749
+
750
+ layer_outputs = decoder_layer(
751
+ hidden_states,
752
+ attention_mask=causal_mask_mapping[
753
+ decoder_layer.attention_type],
754
+ position_ids=position_ids,
755
+ past_key_value=past_key_values,
756
+ output_attentions=output_attentions,
757
+ use_cache=use_cache,
758
+ cache_position=cache_position,
759
+ **kwargs,
760
+ )
761
+
762
+ hidden_states = layer_outputs
763
+
764
+ hidden_states = self.norm(hidden_states)
765
+
766
+ return BaseModelOutputWithPast(
767
+ last_hidden_state=hidden_states,
768
+ past_key_values=past_key_values if use_cache else None,
769
+ hidden_states=all_hidden_states,
770
+ attentions=all_self_attns,
771
+ )
772
+
773
+
774
+ class Step3p5ForCausalLM(Step3p5PreTrainedModel, GenerationMixin):
775
+ _tied_weights_keys = ["lm_head.weight"]
776
+ config: Step3p5Config
777
+
778
+ def __init__(self, config: Step3p5Config):
779
+ super().__init__(config)
780
+ self.model = Step3p5Model(config)
781
+ self.lm_head = nn.Linear(config.hidden_size,
782
+ config.vocab_size,
783
+ bias=False)
784
+
785
+ self.post_init()
786
+
787
+ def get_input_embeddings(self):
788
+ return self.model.get_input_embeddings()
789
+
790
+ def set_input_embeddings(self, value):
791
+ self.model.set_input_embeddings(value)
792
+
793
+ def get_output_embeddings(self):
794
+ return self.model.get_output_embeddings()
795
+
796
+ def set_output_embeddings(self, new_embeddings):
797
+ self.model.set_output_embeddings(new_embeddings)
798
+
799
+ def set_decoder(self, decoder):
800
+ self.model.set_decoder(decoder)
801
+
802
+ def get_decoder(self):
803
+ return self.model.get_decoder()
804
+
805
+ def forward(
806
+ self,
807
+ input_ids: torch.LongTensor = None,
808
+ num_patches=None,
809
+ patch_pixel_values=None,
810
+ patch_newline_mask=None,
811
+ attention_mask: Optional[torch.Tensor] = None,
812
+ position_ids: Optional[torch.LongTensor] = None,
813
+ past_key_values: Optional[Cache] = None,
814
+ inputs_embeds: Optional[torch.FloatTensor] = None,
815
+ labels: Optional[torch.LongTensor] = None,
816
+ use_cache: Optional[bool] = None,
817
+ output_attentions: Optional[bool] = None,
818
+ output_hidden_states: Optional[bool] = None,
819
+ return_dict: Optional[bool] = None,
820
+ cache_position: Optional[torch.LongTensor] = None,
821
+ **kwargs: Unpack[TransformersKwargs],
822
+ ) -> Union[tuple, Step3p5CausalLMOutputWithPast]:
823
+ r"""
824
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
825
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
826
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
827
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
828
+ Example:
829
+ ```python
830
+ >>> from transformers import AutoTokenizer, Llama4ForCausalLM
831
+ >>> model = Llama4ForCausalLM.from_pretrained("meta-llama4/Llama4-2-7b-hf")
832
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama4/Llama4-2-7b-hf")
833
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
834
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
835
+ >>> # Generate
836
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
837
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
838
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
839
+ ```"""
840
+
841
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
842
+ output_hidden_states = (output_hidden_states
843
+ if output_hidden_states is not None else
844
+ self.config.output_hidden_states)
845
+ # breakpoint()
846
+ outputs = self.model(
847
+ input_ids=input_ids,
848
+ num_patches=num_patches,
849
+ patch_pixel_values=patch_pixel_values,
850
+ patch_newline_mask=patch_newline_mask,
851
+ position_ids=position_ids,
852
+ attention_mask=attention_mask,
853
+ past_key_values=past_key_values,
854
+ inputs_embeds=inputs_embeds,
855
+ use_cache=use_cache,
856
+ output_attentions=output_attentions,
857
+ output_hidden_states=output_hidden_states,
858
+ return_dict=return_dict,
859
+ cache_position=cache_position,
860
+ **kwargs,
861
+ )
862
+ hidden_states = outputs.last_hidden_state
863
+ logits = self.lm_head(hidden_states)
864
+
865
+ return Step3p5CausalLMOutputWithPast(logits=logits, )
866
+
867
+ def prepare_inputs_for_generation(
868
+ self,
869
+ input_ids,
870
+ past_key_values=None,
871
+ inputs_embeds=None,
872
+ pixel_values=None,
873
+ attention_mask=None,
874
+ cache_position=None,
875
+ logits_to_keep=None,
876
+ **kwargs,
877
+ ):
878
+
879
+ model_inputs = super().prepare_inputs_for_generation(
880
+ input_ids,
881
+ past_key_values=past_key_values,
882
+ inputs_embeds=inputs_embeds,
883
+ attention_mask=attention_mask,
884
+ cache_position=cache_position,
885
+ logits_to_keep=logits_to_keep,
886
+ **kwargs,
887
+ )
888
+
889
+ if cache_position[0] == 0:
890
+ # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
891
+ # Otherwise we need pixel values to be passed to model
892
+ model_inputs["pixel_values"] = pixel_values
893
+
894
+ return model_inputs
895
+
896
+ def _fix_state_dict_key_on_load(self, key: str) -> tuple[str, bool]:
897
+ if key.startswith("language_model."):
898
+ return key[len("language_model."):], True
899
+
900
+ return key, False
quantization_config.json ADDED
@@ -0,0 +1,959 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "data_type": "int",
4
+ "group_size": 128,
5
+ "sym": true,
6
+ "iters": 0,
7
+ "autoround_version": "0.12.0",
8
+ "quant_method": "auto-round",
9
+ "packing_format": "auto_round:auto_gptq",
10
+ "extra_config": {
11
+ "model.layers.0.self_attn.g_proj": {
12
+ "bits": 16,
13
+ "data_type": "float"
14
+ },
15
+ "model.layers.1.self_attn.g_proj": {
16
+ "bits": 16,
17
+ "data_type": "float"
18
+ },
19
+ "model.layers.2.self_attn.g_proj": {
20
+ "bits": 16,
21
+ "data_type": "float"
22
+ },
23
+ "model.layers.3.self_attn.g_proj": {
24
+ "bits": 16,
25
+ "data_type": "float"
26
+ },
27
+ "model.layers.4.self_attn.g_proj": {
28
+ "bits": 16,
29
+ "data_type": "float"
30
+ },
31
+ "model.layers.5.self_attn.g_proj": {
32
+ "bits": 16,
33
+ "data_type": "float"
34
+ },
35
+ "model.layers.6.self_attn.g_proj": {
36
+ "bits": 16,
37
+ "data_type": "float"
38
+ },
39
+ "model.layers.7.self_attn.g_proj": {
40
+ "bits": 16,
41
+ "data_type": "float"
42
+ },
43
+ "model.layers.8.self_attn.g_proj": {
44
+ "bits": 16,
45
+ "data_type": "float"
46
+ },
47
+ "model.layers.9.self_attn.g_proj": {
48
+ "bits": 16,
49
+ "data_type": "float"
50
+ },
51
+ "model.layers.10.self_attn.g_proj": {
52
+ "bits": 16,
53
+ "data_type": "float"
54
+ },
55
+ "model.layers.11.self_attn.g_proj": {
56
+ "bits": 16,
57
+ "data_type": "float"
58
+ },
59
+ "model.layers.12.self_attn.g_proj": {
60
+ "bits": 16,
61
+ "data_type": "float"
62
+ },
63
+ "model.layers.13.self_attn.g_proj": {
64
+ "bits": 16,
65
+ "data_type": "float"
66
+ },
67
+ "model.layers.14.self_attn.g_proj": {
68
+ "bits": 16,
69
+ "data_type": "float"
70
+ },
71
+ "model.layers.15.self_attn.g_proj": {
72
+ "bits": 16,
73
+ "data_type": "float"
74
+ },
75
+ "model.layers.16.self_attn.g_proj": {
76
+ "bits": 16,
77
+ "data_type": "float"
78
+ },
79
+ "model.layers.17.self_attn.g_proj": {
80
+ "bits": 16,
81
+ "data_type": "float"
82
+ },
83
+ "model.layers.18.self_attn.g_proj": {
84
+ "bits": 16,
85
+ "data_type": "float"
86
+ },
87
+ "model.layers.19.self_attn.g_proj": {
88
+ "bits": 16,
89
+ "data_type": "float"
90
+ },
91
+ "model.layers.20.self_attn.g_proj": {
92
+ "bits": 16,
93
+ "data_type": "float"
94
+ },
95
+ "model.layers.21.self_attn.g_proj": {
96
+ "bits": 16,
97
+ "data_type": "float"
98
+ },
99
+ "model.layers.22.self_attn.g_proj": {
100
+ "bits": 16,
101
+ "data_type": "float"
102
+ },
103
+ "model.layers.23.self_attn.g_proj": {
104
+ "bits": 16,
105
+ "data_type": "float"
106
+ },
107
+ "model.layers.24.self_attn.g_proj": {
108
+ "bits": 16,
109
+ "data_type": "float"
110
+ },
111
+ "model.layers.25.self_attn.g_proj": {
112
+ "bits": 16,
113
+ "data_type": "float"
114
+ },
115
+ "model.layers.26.self_attn.g_proj": {
116
+ "bits": 16,
117
+ "data_type": "float"
118
+ },
119
+ "model.layers.27.self_attn.g_proj": {
120
+ "bits": 16,
121
+ "data_type": "float"
122
+ },
123
+ "model.layers.28.self_attn.g_proj": {
124
+ "bits": 16,
125
+ "data_type": "float"
126
+ },
127
+ "model.layers.29.self_attn.g_proj": {
128
+ "bits": 16,
129
+ "data_type": "float"
130
+ },
131
+ "model.layers.30.self_attn.g_proj": {
132
+ "bits": 16,
133
+ "data_type": "float"
134
+ },
135
+ "model.layers.31.self_attn.g_proj": {
136
+ "bits": 16,
137
+ "data_type": "float"
138
+ },
139
+ "model.layers.32.self_attn.g_proj": {
140
+ "bits": 16,
141
+ "data_type": "float"
142
+ },
143
+ "model.layers.33.self_attn.g_proj": {
144
+ "bits": 16,
145
+ "data_type": "float"
146
+ },
147
+ "model.layers.34.self_attn.g_proj": {
148
+ "bits": 16,
149
+ "data_type": "float"
150
+ },
151
+ "model.layers.35.self_attn.g_proj": {
152
+ "bits": 16,
153
+ "data_type": "float"
154
+ },
155
+ "model.layers.36.self_attn.g_proj": {
156
+ "bits": 16,
157
+ "data_type": "float"
158
+ },
159
+ "model.layers.37.self_attn.g_proj": {
160
+ "bits": 16,
161
+ "data_type": "float"
162
+ },
163
+ "model.layers.38.self_attn.g_proj": {
164
+ "bits": 16,
165
+ "data_type": "float"
166
+ },
167
+ "model.layers.39.self_attn.g_proj": {
168
+ "bits": 16,
169
+ "data_type": "float"
170
+ },
171
+ "model.layers.40.self_attn.g_proj": {
172
+ "bits": 16,
173
+ "data_type": "float"
174
+ },
175
+ "model.layers.41.self_attn.g_proj": {
176
+ "bits": 16,
177
+ "data_type": "float"
178
+ },
179
+ "model.layers.42.self_attn.g_proj": {
180
+ "bits": 16,
181
+ "data_type": "float"
182
+ },
183
+ "model.layers.43.self_attn.g_proj": {
184
+ "bits": 16,
185
+ "data_type": "float"
186
+ },
187
+ "model.layers.44.self_attn.g_proj": {
188
+ "bits": 16,
189
+ "data_type": "float"
190
+ },
191
+ "model.layers.3.moe.gate": {
192
+ "bits": 16,
193
+ "data_type": "float"
194
+ },
195
+ "model.layers.4.moe.gate": {
196
+ "bits": 16,
197
+ "data_type": "float"
198
+ },
199
+ "model.layers.5.moe.gate": {
200
+ "bits": 16,
201
+ "data_type": "float"
202
+ },
203
+ "model.layers.6.moe.gate": {
204
+ "bits": 16,
205
+ "data_type": "float"
206
+ },
207
+ "model.layers.7.moe.gate": {
208
+ "bits": 16,
209
+ "data_type": "float"
210
+ },
211
+ "model.layers.8.moe.gate": {
212
+ "bits": 16,
213
+ "data_type": "float"
214
+ },
215
+ "model.layers.9.moe.gate": {
216
+ "bits": 16,
217
+ "data_type": "float"
218
+ },
219
+ "model.layers.10.moe.gate": {
220
+ "bits": 16,
221
+ "data_type": "float"
222
+ },
223
+ "model.layers.11.moe.gate": {
224
+ "bits": 16,
225
+ "data_type": "float"
226
+ },
227
+ "model.layers.12.moe.gate": {
228
+ "bits": 16,
229
+ "data_type": "float"
230
+ },
231
+ "model.layers.13.moe.gate": {
232
+ "bits": 16,
233
+ "data_type": "float"
234
+ },
235
+ "model.layers.14.moe.gate": {
236
+ "bits": 16,
237
+ "data_type": "float"
238
+ },
239
+ "model.layers.15.moe.gate": {
240
+ "bits": 16,
241
+ "data_type": "float"
242
+ },
243
+ "model.layers.16.moe.gate": {
244
+ "bits": 16,
245
+ "data_type": "float"
246
+ },
247
+ "model.layers.17.moe.gate": {
248
+ "bits": 16,
249
+ "data_type": "float"
250
+ },
251
+ "model.layers.18.moe.gate": {
252
+ "bits": 16,
253
+ "data_type": "float"
254
+ },
255
+ "model.layers.19.moe.gate": {
256
+ "bits": 16,
257
+ "data_type": "float"
258
+ },
259
+ "model.layers.20.moe.gate": {
260
+ "bits": 16,
261
+ "data_type": "float"
262
+ },
263
+ "model.layers.21.moe.gate": {
264
+ "bits": 16,
265
+ "data_type": "float"
266
+ },
267
+ "model.layers.22.moe.gate": {
268
+ "bits": 16,
269
+ "data_type": "float"
270
+ },
271
+ "model.layers.23.moe.gate": {
272
+ "bits": 16,
273
+ "data_type": "float"
274
+ },
275
+ "model.layers.24.moe.gate": {
276
+ "bits": 16,
277
+ "data_type": "float"
278
+ },
279
+ "model.layers.25.moe.gate": {
280
+ "bits": 16,
281
+ "data_type": "float"
282
+ },
283
+ "model.layers.26.moe.gate": {
284
+ "bits": 16,
285
+ "data_type": "float"
286
+ },
287
+ "model.layers.27.moe.gate": {
288
+ "bits": 16,
289
+ "data_type": "float"
290
+ },
291
+ "model.layers.28.moe.gate": {
292
+ "bits": 16,
293
+ "data_type": "float"
294
+ },
295
+ "model.layers.29.moe.gate": {
296
+ "bits": 16,
297
+ "data_type": "float"
298
+ },
299
+ "model.layers.30.moe.gate": {
300
+ "bits": 16,
301
+ "data_type": "float"
302
+ },
303
+ "model.layers.31.moe.gate": {
304
+ "bits": 16,
305
+ "data_type": "float"
306
+ },
307
+ "model.layers.32.moe.gate": {
308
+ "bits": 16,
309
+ "data_type": "float"
310
+ },
311
+ "model.layers.33.moe.gate": {
312
+ "bits": 16,
313
+ "data_type": "float"
314
+ },
315
+ "model.layers.34.moe.gate": {
316
+ "bits": 16,
317
+ "data_type": "float"
318
+ },
319
+ "model.layers.35.moe.gate": {
320
+ "bits": 16,
321
+ "data_type": "float"
322
+ },
323
+ "model.layers.36.moe.gate": {
324
+ "bits": 16,
325
+ "data_type": "float"
326
+ },
327
+ "model.layers.37.moe.gate": {
328
+ "bits": 16,
329
+ "data_type": "float"
330
+ },
331
+ "model.layers.38.moe.gate": {
332
+ "bits": 16,
333
+ "data_type": "float"
334
+ },
335
+ "model.layers.39.moe.gate": {
336
+ "bits": 16,
337
+ "data_type": "float"
338
+ },
339
+ "model.layers.40.moe.gate": {
340
+ "bits": 16,
341
+ "data_type": "float"
342
+ },
343
+ "model.layers.41.moe.gate": {
344
+ "bits": 16,
345
+ "data_type": "float"
346
+ },
347
+ "model.layers.42.moe.gate": {
348
+ "bits": 16,
349
+ "data_type": "float"
350
+ },
351
+ "model.layers.43.moe.gate": {
352
+ "bits": 16,
353
+ "data_type": "float"
354
+ },
355
+ "model.layers.44.moe.gate": {
356
+ "bits": 16,
357
+ "data_type": "float"
358
+ },
359
+ "model.layers.0.mlp.gate_proj": {
360
+ "bits": 8
361
+ },
362
+ "model.layers.0.mlp.up_proj": {
363
+ "bits": 8
364
+ },
365
+ "model.layers.0.mlp.down_proj": {
366
+ "bits": 8
367
+ },
368
+ "model.layers.1.mlp.gate_proj": {
369
+ "bits": 8
370
+ },
371
+ "model.layers.1.mlp.up_proj": {
372
+ "bits": 8
373
+ },
374
+ "model.layers.1.mlp.down_proj": {
375
+ "bits": 8
376
+ },
377
+ "model.layers.2.mlp.gate_proj": {
378
+ "bits": 8
379
+ },
380
+ "model.layers.2.mlp.up_proj": {
381
+ "bits": 8
382
+ },
383
+ "model.layers.2.mlp.down_proj": {
384
+ "bits": 8
385
+ },
386
+ "model.layers.0.self_attn.q_proj": {
387
+ "bits": 8
388
+ },
389
+ "model.layers.0.self_attn.k_proj": {
390
+ "bits": 8
391
+ },
392
+ "model.layers.0.self_attn.v_proj": {
393
+ "bits": 8
394
+ },
395
+ "model.layers.0.self_attn.o_proj": {
396
+ "bits": 8
397
+ },
398
+ "model.layers.1.self_attn.q_proj": {
399
+ "bits": 8
400
+ },
401
+ "model.layers.1.self_attn.k_proj": {
402
+ "bits": 8
403
+ },
404
+ "model.layers.1.self_attn.v_proj": {
405
+ "bits": 8
406
+ },
407
+ "model.layers.1.self_attn.o_proj": {
408
+ "bits": 8
409
+ },
410
+ "model.layers.2.self_attn.q_proj": {
411
+ "bits": 8
412
+ },
413
+ "model.layers.2.self_attn.k_proj": {
414
+ "bits": 8
415
+ },
416
+ "model.layers.2.self_attn.v_proj": {
417
+ "bits": 8
418
+ },
419
+ "model.layers.2.self_attn.o_proj": {
420
+ "bits": 8
421
+ },
422
+ "model.layers.3.self_attn.q_proj": {
423
+ "bits": 8
424
+ },
425
+ "model.layers.3.self_attn.k_proj": {
426
+ "bits": 8
427
+ },
428
+ "model.layers.3.self_attn.v_proj": {
429
+ "bits": 8
430
+ },
431
+ "model.layers.3.self_attn.o_proj": {
432
+ "bits": 8
433
+ },
434
+ "model.layers.4.self_attn.q_proj": {
435
+ "bits": 8
436
+ },
437
+ "model.layers.4.self_attn.k_proj": {
438
+ "bits": 8
439
+ },
440
+ "model.layers.4.self_attn.v_proj": {
441
+ "bits": 8
442
+ },
443
+ "model.layers.4.self_attn.o_proj": {
444
+ "bits": 8
445
+ },
446
+ "model.layers.5.self_attn.q_proj": {
447
+ "bits": 8
448
+ },
449
+ "model.layers.5.self_attn.k_proj": {
450
+ "bits": 8
451
+ },
452
+ "model.layers.5.self_attn.v_proj": {
453
+ "bits": 8
454
+ },
455
+ "model.layers.5.self_attn.o_proj": {
456
+ "bits": 8
457
+ },
458
+ "model.layers.6.self_attn.q_proj": {
459
+ "bits": 8
460
+ },
461
+ "model.layers.6.self_attn.k_proj": {
462
+ "bits": 8
463
+ },
464
+ "model.layers.6.self_attn.v_proj": {
465
+ "bits": 8
466
+ },
467
+ "model.layers.6.self_attn.o_proj": {
468
+ "bits": 8
469
+ },
470
+ "model.layers.7.self_attn.q_proj": {
471
+ "bits": 8
472
+ },
473
+ "model.layers.7.self_attn.k_proj": {
474
+ "bits": 8
475
+ },
476
+ "model.layers.7.self_attn.v_proj": {
477
+ "bits": 8
478
+ },
479
+ "model.layers.7.self_attn.o_proj": {
480
+ "bits": 8
481
+ },
482
+ "model.layers.8.self_attn.q_proj": {
483
+ "bits": 8
484
+ },
485
+ "model.layers.8.self_attn.k_proj": {
486
+ "bits": 8
487
+ },
488
+ "model.layers.8.self_attn.v_proj": {
489
+ "bits": 8
490
+ },
491
+ "model.layers.8.self_attn.o_proj": {
492
+ "bits": 8
493
+ },
494
+ "model.layers.9.self_attn.q_proj": {
495
+ "bits": 8
496
+ },
497
+ "model.layers.9.self_attn.k_proj": {
498
+ "bits": 8
499
+ },
500
+ "model.layers.9.self_attn.v_proj": {
501
+ "bits": 8
502
+ },
503
+ "model.layers.9.self_attn.o_proj": {
504
+ "bits": 8
505
+ },
506
+ "model.layers.10.self_attn.q_proj": {
507
+ "bits": 8
508
+ },
509
+ "model.layers.10.self_attn.k_proj": {
510
+ "bits": 8
511
+ },
512
+ "model.layers.10.self_attn.v_proj": {
513
+ "bits": 8
514
+ },
515
+ "model.layers.10.self_attn.o_proj": {
516
+ "bits": 8
517
+ },
518
+ "model.layers.11.self_attn.q_proj": {
519
+ "bits": 8
520
+ },
521
+ "model.layers.11.self_attn.k_proj": {
522
+ "bits": 8
523
+ },
524
+ "model.layers.11.self_attn.v_proj": {
525
+ "bits": 8
526
+ },
527
+ "model.layers.11.self_attn.o_proj": {
528
+ "bits": 8
529
+ },
530
+ "model.layers.12.self_attn.q_proj": {
531
+ "bits": 8
532
+ },
533
+ "model.layers.12.self_attn.k_proj": {
534
+ "bits": 8
535
+ },
536
+ "model.layers.12.self_attn.v_proj": {
537
+ "bits": 8
538
+ },
539
+ "model.layers.12.self_attn.o_proj": {
540
+ "bits": 8
541
+ },
542
+ "model.layers.13.self_attn.q_proj": {
543
+ "bits": 8
544
+ },
545
+ "model.layers.13.self_attn.k_proj": {
546
+ "bits": 8
547
+ },
548
+ "model.layers.13.self_attn.v_proj": {
549
+ "bits": 8
550
+ },
551
+ "model.layers.13.self_attn.o_proj": {
552
+ "bits": 8
553
+ },
554
+ "model.layers.14.self_attn.q_proj": {
555
+ "bits": 8
556
+ },
557
+ "model.layers.14.self_attn.k_proj": {
558
+ "bits": 8
559
+ },
560
+ "model.layers.14.self_attn.v_proj": {
561
+ "bits": 8
562
+ },
563
+ "model.layers.14.self_attn.o_proj": {
564
+ "bits": 8
565
+ },
566
+ "model.layers.15.self_attn.q_proj": {
567
+ "bits": 8
568
+ },
569
+ "model.layers.15.self_attn.k_proj": {
570
+ "bits": 8
571
+ },
572
+ "model.layers.15.self_attn.v_proj": {
573
+ "bits": 8
574
+ },
575
+ "model.layers.15.self_attn.o_proj": {
576
+ "bits": 8
577
+ },
578
+ "model.layers.16.self_attn.q_proj": {
579
+ "bits": 8
580
+ },
581
+ "model.layers.16.self_attn.k_proj": {
582
+ "bits": 8
583
+ },
584
+ "model.layers.16.self_attn.v_proj": {
585
+ "bits": 8
586
+ },
587
+ "model.layers.16.self_attn.o_proj": {
588
+ "bits": 8
589
+ },
590
+ "model.layers.17.self_attn.q_proj": {
591
+ "bits": 8
592
+ },
593
+ "model.layers.17.self_attn.k_proj": {
594
+ "bits": 8
595
+ },
596
+ "model.layers.17.self_attn.v_proj": {
597
+ "bits": 8
598
+ },
599
+ "model.layers.17.self_attn.o_proj": {
600
+ "bits": 8
601
+ },
602
+ "model.layers.18.self_attn.q_proj": {
603
+ "bits": 8
604
+ },
605
+ "model.layers.18.self_attn.k_proj": {
606
+ "bits": 8
607
+ },
608
+ "model.layers.18.self_attn.v_proj": {
609
+ "bits": 8
610
+ },
611
+ "model.layers.18.self_attn.o_proj": {
612
+ "bits": 8
613
+ },
614
+ "model.layers.19.self_attn.q_proj": {
615
+ "bits": 8
616
+ },
617
+ "model.layers.19.self_attn.k_proj": {
618
+ "bits": 8
619
+ },
620
+ "model.layers.19.self_attn.v_proj": {
621
+ "bits": 8
622
+ },
623
+ "model.layers.19.self_attn.o_proj": {
624
+ "bits": 8
625
+ },
626
+ "model.layers.20.self_attn.q_proj": {
627
+ "bits": 8
628
+ },
629
+ "model.layers.20.self_attn.k_proj": {
630
+ "bits": 8
631
+ },
632
+ "model.layers.20.self_attn.v_proj": {
633
+ "bits": 8
634
+ },
635
+ "model.layers.20.self_attn.o_proj": {
636
+ "bits": 8
637
+ },
638
+ "model.layers.21.self_attn.q_proj": {
639
+ "bits": 8
640
+ },
641
+ "model.layers.21.self_attn.k_proj": {
642
+ "bits": 8
643
+ },
644
+ "model.layers.21.self_attn.v_proj": {
645
+ "bits": 8
646
+ },
647
+ "model.layers.21.self_attn.o_proj": {
648
+ "bits": 8
649
+ },
650
+ "model.layers.22.self_attn.q_proj": {
651
+ "bits": 8
652
+ },
653
+ "model.layers.22.self_attn.k_proj": {
654
+ "bits": 8
655
+ },
656
+ "model.layers.22.self_attn.v_proj": {
657
+ "bits": 8
658
+ },
659
+ "model.layers.22.self_attn.o_proj": {
660
+ "bits": 8
661
+ },
662
+ "model.layers.23.self_attn.q_proj": {
663
+ "bits": 8
664
+ },
665
+ "model.layers.23.self_attn.k_proj": {
666
+ "bits": 8
667
+ },
668
+ "model.layers.23.self_attn.v_proj": {
669
+ "bits": 8
670
+ },
671
+ "model.layers.23.self_attn.o_proj": {
672
+ "bits": 8
673
+ },
674
+ "model.layers.24.self_attn.q_proj": {
675
+ "bits": 8
676
+ },
677
+ "model.layers.24.self_attn.k_proj": {
678
+ "bits": 8
679
+ },
680
+ "model.layers.24.self_attn.v_proj": {
681
+ "bits": 8
682
+ },
683
+ "model.layers.24.self_attn.o_proj": {
684
+ "bits": 8
685
+ },
686
+ "model.layers.25.self_attn.q_proj": {
687
+ "bits": 8
688
+ },
689
+ "model.layers.25.self_attn.k_proj": {
690
+ "bits": 8
691
+ },
692
+ "model.layers.25.self_attn.v_proj": {
693
+ "bits": 8
694
+ },
695
+ "model.layers.25.self_attn.o_proj": {
696
+ "bits": 8
697
+ },
698
+ "model.layers.26.self_attn.q_proj": {
699
+ "bits": 8
700
+ },
701
+ "model.layers.26.self_attn.k_proj": {
702
+ "bits": 8
703
+ },
704
+ "model.layers.26.self_attn.v_proj": {
705
+ "bits": 8
706
+ },
707
+ "model.layers.26.self_attn.o_proj": {
708
+ "bits": 8
709
+ },
710
+ "model.layers.27.self_attn.q_proj": {
711
+ "bits": 8
712
+ },
713
+ "model.layers.27.self_attn.k_proj": {
714
+ "bits": 8
715
+ },
716
+ "model.layers.27.self_attn.v_proj": {
717
+ "bits": 8
718
+ },
719
+ "model.layers.27.self_attn.o_proj": {
720
+ "bits": 8
721
+ },
722
+ "model.layers.28.self_attn.q_proj": {
723
+ "bits": 8
724
+ },
725
+ "model.layers.28.self_attn.k_proj": {
726
+ "bits": 8
727
+ },
728
+ "model.layers.28.self_attn.v_proj": {
729
+ "bits": 8
730
+ },
731
+ "model.layers.28.self_attn.o_proj": {
732
+ "bits": 8
733
+ },
734
+ "model.layers.29.self_attn.q_proj": {
735
+ "bits": 8
736
+ },
737
+ "model.layers.29.self_attn.k_proj": {
738
+ "bits": 8
739
+ },
740
+ "model.layers.29.self_attn.v_proj": {
741
+ "bits": 8
742
+ },
743
+ "model.layers.29.self_attn.o_proj": {
744
+ "bits": 8
745
+ },
746
+ "model.layers.30.self_attn.q_proj": {
747
+ "bits": 8
748
+ },
749
+ "model.layers.30.self_attn.k_proj": {
750
+ "bits": 8
751
+ },
752
+ "model.layers.30.self_attn.v_proj": {
753
+ "bits": 8
754
+ },
755
+ "model.layers.30.self_attn.o_proj": {
756
+ "bits": 8
757
+ },
758
+ "model.layers.31.self_attn.q_proj": {
759
+ "bits": 8
760
+ },
761
+ "model.layers.31.self_attn.k_proj": {
762
+ "bits": 8
763
+ },
764
+ "model.layers.31.self_attn.v_proj": {
765
+ "bits": 8
766
+ },
767
+ "model.layers.31.self_attn.o_proj": {
768
+ "bits": 8
769
+ },
770
+ "model.layers.32.self_attn.q_proj": {
771
+ "bits": 8
772
+ },
773
+ "model.layers.32.self_attn.k_proj": {
774
+ "bits": 8
775
+ },
776
+ "model.layers.32.self_attn.v_proj": {
777
+ "bits": 8
778
+ },
779
+ "model.layers.32.self_attn.o_proj": {
780
+ "bits": 8
781
+ },
782
+ "model.layers.33.self_attn.q_proj": {
783
+ "bits": 8
784
+ },
785
+ "model.layers.33.self_attn.k_proj": {
786
+ "bits": 8
787
+ },
788
+ "model.layers.33.self_attn.v_proj": {
789
+ "bits": 8
790
+ },
791
+ "model.layers.33.self_attn.o_proj": {
792
+ "bits": 8
793
+ },
794
+ "model.layers.34.self_attn.q_proj": {
795
+ "bits": 8
796
+ },
797
+ "model.layers.34.self_attn.k_proj": {
798
+ "bits": 8
799
+ },
800
+ "model.layers.34.self_attn.v_proj": {
801
+ "bits": 8
802
+ },
803
+ "model.layers.34.self_attn.o_proj": {
804
+ "bits": 8
805
+ },
806
+ "model.layers.35.self_attn.q_proj": {
807
+ "bits": 8
808
+ },
809
+ "model.layers.35.self_attn.k_proj": {
810
+ "bits": 8
811
+ },
812
+ "model.layers.35.self_attn.v_proj": {
813
+ "bits": 8
814
+ },
815
+ "model.layers.35.self_attn.o_proj": {
816
+ "bits": 8
817
+ },
818
+ "model.layers.36.self_attn.q_proj": {
819
+ "bits": 8
820
+ },
821
+ "model.layers.36.self_attn.k_proj": {
822
+ "bits": 8
823
+ },
824
+ "model.layers.36.self_attn.v_proj": {
825
+ "bits": 8
826
+ },
827
+ "model.layers.36.self_attn.o_proj": {
828
+ "bits": 8
829
+ },
830
+ "model.layers.37.self_attn.q_proj": {
831
+ "bits": 8
832
+ },
833
+ "model.layers.37.self_attn.k_proj": {
834
+ "bits": 8
835
+ },
836
+ "model.layers.37.self_attn.v_proj": {
837
+ "bits": 8
838
+ },
839
+ "model.layers.37.self_attn.o_proj": {
840
+ "bits": 8
841
+ },
842
+ "model.layers.38.self_attn.q_proj": {
843
+ "bits": 8
844
+ },
845
+ "model.layers.38.self_attn.k_proj": {
846
+ "bits": 8
847
+ },
848
+ "model.layers.38.self_attn.v_proj": {
849
+ "bits": 8
850
+ },
851
+ "model.layers.38.self_attn.o_proj": {
852
+ "bits": 8
853
+ },
854
+ "model.layers.39.self_attn.q_proj": {
855
+ "bits": 8
856
+ },
857
+ "model.layers.39.self_attn.k_proj": {
858
+ "bits": 8
859
+ },
860
+ "model.layers.39.self_attn.v_proj": {
861
+ "bits": 8
862
+ },
863
+ "model.layers.39.self_attn.o_proj": {
864
+ "bits": 8
865
+ },
866
+ "model.layers.40.self_attn.q_proj": {
867
+ "bits": 8
868
+ },
869
+ "model.layers.40.self_attn.k_proj": {
870
+ "bits": 8
871
+ },
872
+ "model.layers.40.self_attn.v_proj": {
873
+ "bits": 8
874
+ },
875
+ "model.layers.40.self_attn.o_proj": {
876
+ "bits": 8
877
+ },
878
+ "model.layers.41.self_attn.q_proj": {
879
+ "bits": 8
880
+ },
881
+ "model.layers.41.self_attn.k_proj": {
882
+ "bits": 8
883
+ },
884
+ "model.layers.41.self_attn.v_proj": {
885
+ "bits": 8
886
+ },
887
+ "model.layers.41.self_attn.o_proj": {
888
+ "bits": 8
889
+ },
890
+ "model.layers.42.self_attn.q_proj": {
891
+ "bits": 8
892
+ },
893
+ "model.layers.42.self_attn.k_proj": {
894
+ "bits": 8
895
+ },
896
+ "model.layers.42.self_attn.v_proj": {
897
+ "bits": 8
898
+ },
899
+ "model.layers.42.self_attn.o_proj": {
900
+ "bits": 8
901
+ },
902
+ "model.layers.43.self_attn.q_proj": {
903
+ "bits": 8
904
+ },
905
+ "model.layers.43.self_attn.k_proj": {
906
+ "bits": 8
907
+ },
908
+ "model.layers.43.self_attn.v_proj": {
909
+ "bits": 8
910
+ },
911
+ "model.layers.43.self_attn.o_proj": {
912
+ "bits": 8
913
+ },
914
+ "model.layers.44.self_attn.q_proj": {
915
+ "bits": 8
916
+ },
917
+ "model.layers.44.self_attn.k_proj": {
918
+ "bits": 8
919
+ },
920
+ "model.layers.44.self_attn.v_proj": {
921
+ "bits": 8
922
+ },
923
+ "model.layers.44.self_attn.o_proj": {
924
+ "bits": 8
925
+ },
926
+ ".*mlp.*": {
927
+ "bits": 8
928
+ },
929
+ ".*self_attn.*": {
930
+ "bits": 8
931
+ },
932
+ ".*layers\\.46.*": {
933
+ "bits": 8
934
+ },
935
+ ".*layers\\.47.*": {
936
+ "bits": 8
937
+ },
938
+ ".*eh_proj.*": {
939
+ "bits": 16,
940
+ "data_type": "float"
941
+ },
942
+ ".*shared_head.*": {
943
+ "bits": 16,
944
+ "data_type": "float"
945
+ },
946
+ ".*layers\\.45.*": {
947
+ "bits": 16,
948
+ "data_type": "float"
949
+ },
950
+ ".*g_proj.*": {
951
+ "bits": 16,
952
+ "data_type": "float"
953
+ },
954
+ ".*moe\\.gate.*": {
955
+ "bits": 16,
956
+ "data_type": "float"
957
+ }
958
+ }
959
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end▁of▁sentence|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff