prince-canuma commited on
Commit
ea01961
·
verified ·
1 Parent(s): fbbc69c

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: mlx
3
+ license: apache-2.0
4
+ license_link: https://huggingface.co/Qwen/Qwen3-Coder-Next/blob/main/LICENSE
5
+ pipeline_tag: text-generation
6
+ base_model: Qwen/Qwen3-Coder-Next
7
+ tags:
8
+ - mlx
9
+ ---
10
+
11
+ # mlx-community/Qwen3-Coder-Next-8bit
12
+
13
+ This model [mlx-community/Qwen3-Coder-Next-8bit](https://huggingface.co/mlx-community/Qwen3-Coder-Next-8bit) was
14
+ converted to MLX format from [Qwen/Qwen3-Coder-Next](https://huggingface.co/Qwen/Qwen3-Coder-Next)
15
+ using mlx-lm version **0.30.5**.
16
+
17
+ ## Use with mlx
18
+
19
+ ```bash
20
+ pip install mlx-lm
21
+ ```
22
+
23
+ ```python
24
+ from mlx_lm import load, generate
25
+
26
+ model, tokenizer = load("mlx-community/Qwen3-Coder-Next-8bit")
27
+
28
+ prompt = "hello"
29
+
30
+ if tokenizer.chat_template is not None:
31
+ messages = [{"role": "user", "content": prompt}]
32
+ prompt = tokenizer.apply_chat_template(
33
+ messages, add_generation_prompt=True, return_dict=False,
34
+ )
35
+
36
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
37
+ ```
chat_template.jinja ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% macro render_extra_keys(json_dict, handled_keys) %}
2
+ {%- if json_dict is mapping %}
3
+ {%- for json_key in json_dict if json_key not in handled_keys %}
4
+ {%- if json_dict[json_key] is string %}
5
+ {{-'\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | string) ~ '</' ~ json_key ~ '>' }}
6
+ {%- else %}
7
+ {{- '\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | tojson | safe) ~ '</' ~ json_key ~ '>' }}
8
+ {%- endif %}
9
+ {%- endfor %}
10
+ {%- endif %}
11
+ {%- endmacro %}
12
+
13
+ {%- if messages[0]["role"] == "system" %}
14
+ {%- set system_message = messages[0]["content"] %}
15
+ {%- set loop_messages = messages[1:] %}
16
+ {%- else %}
17
+ {%- set loop_messages = messages %}
18
+ {%- endif %}
19
+
20
+ {%- if not tools is defined %}
21
+ {%- set tools = [] %}
22
+ {%- endif %}
23
+
24
+ {%- if system_message is defined %}
25
+ {{- "<|im_start|>system\n" + system_message }}
26
+ {%- else %}
27
+ {%- if tools is iterable and tools | length > 0 %}
28
+ {{- "<|im_start|>system\nYou are Qwen, a helpful AI assistant that can interact with a computer to solve tasks." }}
29
+ {%- endif %}
30
+ {%- endif %}
31
+ {%- if tools is iterable and tools | length > 0 %}
32
+ {{- "\n\n# Tools\n\nYou have access to the following functions:\n\n" }}
33
+ {{- "<tools>" }}
34
+ {%- for tool in tools %}
35
+ {%- if tool.function is defined %}
36
+ {%- set tool = tool.function %}
37
+ {%- endif %}
38
+ {{- "\n<function>\n<name>" ~ tool.name ~ "</name>" }}
39
+ {%- if tool.description is defined %}
40
+ {{- '\n<description>' ~ (tool.description | trim) ~ '</description>' }}
41
+ {%- endif %}
42
+ {{- '\n<parameters>' }}
43
+ {%- if tool.parameters is defined and tool.parameters is mapping and tool.parameters.properties is defined and tool.parameters.properties is mapping %}
44
+ {%- for param_name, param_fields in tool.parameters.properties|items %}
45
+ {{- '\n<parameter>' }}
46
+ {{- '\n<name>' ~ param_name ~ '</name>' }}
47
+ {%- if param_fields.type is defined %}
48
+ {{- '\n<type>' ~ (param_fields.type | string) ~ '</type>' }}
49
+ {%- endif %}
50
+ {%- if param_fields.description is defined %}
51
+ {{- '\n<description>' ~ (param_fields.description | trim) ~ '</description>' }}
52
+ {%- endif %}
53
+ {%- set handled_keys = ['name', 'type', 'description'] %}
54
+ {{- render_extra_keys(param_fields, handled_keys) }}
55
+ {{- '\n</parameter>' }}
56
+ {%- endfor %}
57
+ {%- endif %}
58
+ {%- set handled_keys = ['type', 'properties'] %}
59
+ {{- render_extra_keys(tool.parameters, handled_keys) }}
60
+ {{- '\n</parameters>' }}
61
+ {%- set handled_keys = ['type', 'name', 'description', 'parameters'] %}
62
+ {{- render_extra_keys(tool, handled_keys) }}
63
+ {{- '\n</function>' }}
64
+ {%- endfor %}
65
+ {{- "\n</tools>" }}
66
+ {{- '\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n- Required parameters MUST be specified\n- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n</IMPORTANT>' }}
67
+ {%- endif %}
68
+ {%- if system_message is defined %}
69
+ {{- '<|im_end|>\n' }}
70
+ {%- else %}
71
+ {%- if tools is iterable and tools | length > 0 %}
72
+ {{- '<|im_end|>\n' }}
73
+ {%- endif %}
74
+ {%- endif %}
75
+ {%- for message in loop_messages %}
76
+ {%- if message.role == "assistant" and message.tool_calls is defined and message.tool_calls is iterable and message.tool_calls | length > 0 %}
77
+ {{- '<|im_start|>' + message.role }}
78
+ {%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}
79
+ {{- '\n' + message.content | trim + '\n' }}
80
+ {%- endif %}
81
+ {%- for tool_call in message.tool_calls %}
82
+ {%- if tool_call.function is defined %}
83
+ {%- set tool_call = tool_call.function %}
84
+ {%- endif %}
85
+ {{- '\n<tool_call>\n<function=' + tool_call.name + '>\n' }}
86
+ {%- if tool_call.arguments is defined %}
87
+ {%- for args_name, args_value in tool_call.arguments|items %}
88
+ {{- '<parameter=' + args_name + '>\n' }}
89
+ {%- set args_value = args_value if args_value is string else args_value | tojson | safe %}
90
+ {{- args_value }}
91
+ {{- '\n</parameter>\n' }}
92
+ {%- endfor %}
93
+ {%- endif %}
94
+ {{- '</function>\n</tool_call>' }}
95
+ {%- endfor %}
96
+ {{- '<|im_end|>\n' }}
97
+ {%- elif message.role == "user" or message.role == "system" or message.role == "assistant" %}
98
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
99
+ {%- elif message.role == "tool" %}
100
+ {%- if loop.previtem and loop.previtem.role != "tool" %}
101
+ {{- '<|im_start|>user' }}
102
+ {%- endif %}
103
+ {{- '\n<tool_response>\n' }}
104
+ {{- message.content }}
105
+ {{- '\n</tool_response>' }}
106
+ {%- if not loop.last and loop.nextitem.role != "tool" %}
107
+ {{- '<|im_end|>\n' }}
108
+ {%- elif loop.last %}
109
+ {{- '<|im_end|>\n' }}
110
+ {%- endif %}
111
+ {%- else %}
112
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' }}
113
+ {%- endif %}
114
+ {%- endfor %}
115
+ {%- if add_generation_prompt %}
116
+ {{- '<|im_start|>assistant\n' }}
117
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,825 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3NextForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0,
7
+ "bos_token_id": 151643,
8
+ "decoder_sparse_step": 1,
9
+ "eos_token_id": [
10
+ 151645,
11
+ 151643
12
+ ],
13
+ "full_attention_interval": 4,
14
+ "head_dim": 256,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 2048,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 5120,
19
+ "linear_conv_kernel_dim": 4,
20
+ "linear_key_head_dim": 128,
21
+ "linear_num_key_heads": 16,
22
+ "linear_num_value_heads": 32,
23
+ "linear_value_head_dim": 128,
24
+ "max_position_embeddings": 262144,
25
+ "mlp_only_layers": [],
26
+ "model_type": "qwen3_next",
27
+ "moe_intermediate_size": 512,
28
+ "norm_topk_prob": true,
29
+ "num_attention_heads": 16,
30
+ "num_experts": 512,
31
+ "num_experts_per_tok": 10,
32
+ "num_hidden_layers": 48,
33
+ "num_key_value_heads": 2,
34
+ "output_router_logits": false,
35
+ "partial_rotary_factor": 0.25,
36
+ "quantization": {
37
+ "group_size": 64,
38
+ "bits": 8,
39
+ "mode": "affine",
40
+ "model.layers.0.mlp.gate": {
41
+ "group_size": 64,
42
+ "bits": 8
43
+ },
44
+ "model.layers.0.mlp.shared_expert_gate": {
45
+ "group_size": 64,
46
+ "bits": 8
47
+ },
48
+ "model.layers.1.mlp.gate": {
49
+ "group_size": 64,
50
+ "bits": 8
51
+ },
52
+ "model.layers.1.mlp.shared_expert_gate": {
53
+ "group_size": 64,
54
+ "bits": 8
55
+ },
56
+ "model.layers.2.mlp.gate": {
57
+ "group_size": 64,
58
+ "bits": 8
59
+ },
60
+ "model.layers.2.mlp.shared_expert_gate": {
61
+ "group_size": 64,
62
+ "bits": 8
63
+ },
64
+ "model.layers.3.mlp.gate": {
65
+ "group_size": 64,
66
+ "bits": 8
67
+ },
68
+ "model.layers.3.mlp.shared_expert_gate": {
69
+ "group_size": 64,
70
+ "bits": 8
71
+ },
72
+ "model.layers.4.mlp.gate": {
73
+ "group_size": 64,
74
+ "bits": 8
75
+ },
76
+ "model.layers.4.mlp.shared_expert_gate": {
77
+ "group_size": 64,
78
+ "bits": 8
79
+ },
80
+ "model.layers.5.mlp.gate": {
81
+ "group_size": 64,
82
+ "bits": 8
83
+ },
84
+ "model.layers.5.mlp.shared_expert_gate": {
85
+ "group_size": 64,
86
+ "bits": 8
87
+ },
88
+ "model.layers.6.mlp.gate": {
89
+ "group_size": 64,
90
+ "bits": 8
91
+ },
92
+ "model.layers.6.mlp.shared_expert_gate": {
93
+ "group_size": 64,
94
+ "bits": 8
95
+ },
96
+ "model.layers.7.mlp.gate": {
97
+ "group_size": 64,
98
+ "bits": 8
99
+ },
100
+ "model.layers.7.mlp.shared_expert_gate": {
101
+ "group_size": 64,
102
+ "bits": 8
103
+ },
104
+ "model.layers.8.mlp.gate": {
105
+ "group_size": 64,
106
+ "bits": 8
107
+ },
108
+ "model.layers.8.mlp.shared_expert_gate": {
109
+ "group_size": 64,
110
+ "bits": 8
111
+ },
112
+ "model.layers.9.mlp.gate": {
113
+ "group_size": 64,
114
+ "bits": 8
115
+ },
116
+ "model.layers.9.mlp.shared_expert_gate": {
117
+ "group_size": 64,
118
+ "bits": 8
119
+ },
120
+ "model.layers.10.mlp.gate": {
121
+ "group_size": 64,
122
+ "bits": 8
123
+ },
124
+ "model.layers.10.mlp.shared_expert_gate": {
125
+ "group_size": 64,
126
+ "bits": 8
127
+ },
128
+ "model.layers.11.mlp.gate": {
129
+ "group_size": 64,
130
+ "bits": 8
131
+ },
132
+ "model.layers.11.mlp.shared_expert_gate": {
133
+ "group_size": 64,
134
+ "bits": 8
135
+ },
136
+ "model.layers.12.mlp.gate": {
137
+ "group_size": 64,
138
+ "bits": 8
139
+ },
140
+ "model.layers.12.mlp.shared_expert_gate": {
141
+ "group_size": 64,
142
+ "bits": 8
143
+ },
144
+ "model.layers.13.mlp.gate": {
145
+ "group_size": 64,
146
+ "bits": 8
147
+ },
148
+ "model.layers.13.mlp.shared_expert_gate": {
149
+ "group_size": 64,
150
+ "bits": 8
151
+ },
152
+ "model.layers.14.mlp.gate": {
153
+ "group_size": 64,
154
+ "bits": 8
155
+ },
156
+ "model.layers.14.mlp.shared_expert_gate": {
157
+ "group_size": 64,
158
+ "bits": 8
159
+ },
160
+ "model.layers.15.mlp.gate": {
161
+ "group_size": 64,
162
+ "bits": 8
163
+ },
164
+ "model.layers.15.mlp.shared_expert_gate": {
165
+ "group_size": 64,
166
+ "bits": 8
167
+ },
168
+ "model.layers.16.mlp.gate": {
169
+ "group_size": 64,
170
+ "bits": 8
171
+ },
172
+ "model.layers.16.mlp.shared_expert_gate": {
173
+ "group_size": 64,
174
+ "bits": 8
175
+ },
176
+ "model.layers.17.mlp.gate": {
177
+ "group_size": 64,
178
+ "bits": 8
179
+ },
180
+ "model.layers.17.mlp.shared_expert_gate": {
181
+ "group_size": 64,
182
+ "bits": 8
183
+ },
184
+ "model.layers.18.mlp.gate": {
185
+ "group_size": 64,
186
+ "bits": 8
187
+ },
188
+ "model.layers.18.mlp.shared_expert_gate": {
189
+ "group_size": 64,
190
+ "bits": 8
191
+ },
192
+ "model.layers.19.mlp.gate": {
193
+ "group_size": 64,
194
+ "bits": 8
195
+ },
196
+ "model.layers.19.mlp.shared_expert_gate": {
197
+ "group_size": 64,
198
+ "bits": 8
199
+ },
200
+ "model.layers.20.mlp.gate": {
201
+ "group_size": 64,
202
+ "bits": 8
203
+ },
204
+ "model.layers.20.mlp.shared_expert_gate": {
205
+ "group_size": 64,
206
+ "bits": 8
207
+ },
208
+ "model.layers.21.mlp.gate": {
209
+ "group_size": 64,
210
+ "bits": 8
211
+ },
212
+ "model.layers.21.mlp.shared_expert_gate": {
213
+ "group_size": 64,
214
+ "bits": 8
215
+ },
216
+ "model.layers.22.mlp.gate": {
217
+ "group_size": 64,
218
+ "bits": 8
219
+ },
220
+ "model.layers.22.mlp.shared_expert_gate": {
221
+ "group_size": 64,
222
+ "bits": 8
223
+ },
224
+ "model.layers.23.mlp.gate": {
225
+ "group_size": 64,
226
+ "bits": 8
227
+ },
228
+ "model.layers.23.mlp.shared_expert_gate": {
229
+ "group_size": 64,
230
+ "bits": 8
231
+ },
232
+ "model.layers.24.mlp.gate": {
233
+ "group_size": 64,
234
+ "bits": 8
235
+ },
236
+ "model.layers.24.mlp.shared_expert_gate": {
237
+ "group_size": 64,
238
+ "bits": 8
239
+ },
240
+ "model.layers.25.mlp.gate": {
241
+ "group_size": 64,
242
+ "bits": 8
243
+ },
244
+ "model.layers.25.mlp.shared_expert_gate": {
245
+ "group_size": 64,
246
+ "bits": 8
247
+ },
248
+ "model.layers.26.mlp.gate": {
249
+ "group_size": 64,
250
+ "bits": 8
251
+ },
252
+ "model.layers.26.mlp.shared_expert_gate": {
253
+ "group_size": 64,
254
+ "bits": 8
255
+ },
256
+ "model.layers.27.mlp.gate": {
257
+ "group_size": 64,
258
+ "bits": 8
259
+ },
260
+ "model.layers.27.mlp.shared_expert_gate": {
261
+ "group_size": 64,
262
+ "bits": 8
263
+ },
264
+ "model.layers.28.mlp.gate": {
265
+ "group_size": 64,
266
+ "bits": 8
267
+ },
268
+ "model.layers.28.mlp.shared_expert_gate": {
269
+ "group_size": 64,
270
+ "bits": 8
271
+ },
272
+ "model.layers.29.mlp.gate": {
273
+ "group_size": 64,
274
+ "bits": 8
275
+ },
276
+ "model.layers.29.mlp.shared_expert_gate": {
277
+ "group_size": 64,
278
+ "bits": 8
279
+ },
280
+ "model.layers.30.mlp.gate": {
281
+ "group_size": 64,
282
+ "bits": 8
283
+ },
284
+ "model.layers.30.mlp.shared_expert_gate": {
285
+ "group_size": 64,
286
+ "bits": 8
287
+ },
288
+ "model.layers.31.mlp.gate": {
289
+ "group_size": 64,
290
+ "bits": 8
291
+ },
292
+ "model.layers.31.mlp.shared_expert_gate": {
293
+ "group_size": 64,
294
+ "bits": 8
295
+ },
296
+ "model.layers.32.mlp.gate": {
297
+ "group_size": 64,
298
+ "bits": 8
299
+ },
300
+ "model.layers.32.mlp.shared_expert_gate": {
301
+ "group_size": 64,
302
+ "bits": 8
303
+ },
304
+ "model.layers.33.mlp.gate": {
305
+ "group_size": 64,
306
+ "bits": 8
307
+ },
308
+ "model.layers.33.mlp.shared_expert_gate": {
309
+ "group_size": 64,
310
+ "bits": 8
311
+ },
312
+ "model.layers.34.mlp.gate": {
313
+ "group_size": 64,
314
+ "bits": 8
315
+ },
316
+ "model.layers.34.mlp.shared_expert_gate": {
317
+ "group_size": 64,
318
+ "bits": 8
319
+ },
320
+ "model.layers.35.mlp.gate": {
321
+ "group_size": 64,
322
+ "bits": 8
323
+ },
324
+ "model.layers.35.mlp.shared_expert_gate": {
325
+ "group_size": 64,
326
+ "bits": 8
327
+ },
328
+ "model.layers.36.mlp.gate": {
329
+ "group_size": 64,
330
+ "bits": 8
331
+ },
332
+ "model.layers.36.mlp.shared_expert_gate": {
333
+ "group_size": 64,
334
+ "bits": 8
335
+ },
336
+ "model.layers.37.mlp.gate": {
337
+ "group_size": 64,
338
+ "bits": 8
339
+ },
340
+ "model.layers.37.mlp.shared_expert_gate": {
341
+ "group_size": 64,
342
+ "bits": 8
343
+ },
344
+ "model.layers.38.mlp.gate": {
345
+ "group_size": 64,
346
+ "bits": 8
347
+ },
348
+ "model.layers.38.mlp.shared_expert_gate": {
349
+ "group_size": 64,
350
+ "bits": 8
351
+ },
352
+ "model.layers.39.mlp.gate": {
353
+ "group_size": 64,
354
+ "bits": 8
355
+ },
356
+ "model.layers.39.mlp.shared_expert_gate": {
357
+ "group_size": 64,
358
+ "bits": 8
359
+ },
360
+ "model.layers.40.mlp.gate": {
361
+ "group_size": 64,
362
+ "bits": 8
363
+ },
364
+ "model.layers.40.mlp.shared_expert_gate": {
365
+ "group_size": 64,
366
+ "bits": 8
367
+ },
368
+ "model.layers.41.mlp.gate": {
369
+ "group_size": 64,
370
+ "bits": 8
371
+ },
372
+ "model.layers.41.mlp.shared_expert_gate": {
373
+ "group_size": 64,
374
+ "bits": 8
375
+ },
376
+ "model.layers.42.mlp.gate": {
377
+ "group_size": 64,
378
+ "bits": 8
379
+ },
380
+ "model.layers.42.mlp.shared_expert_gate": {
381
+ "group_size": 64,
382
+ "bits": 8
383
+ },
384
+ "model.layers.43.mlp.gate": {
385
+ "group_size": 64,
386
+ "bits": 8
387
+ },
388
+ "model.layers.43.mlp.shared_expert_gate": {
389
+ "group_size": 64,
390
+ "bits": 8
391
+ },
392
+ "model.layers.44.mlp.gate": {
393
+ "group_size": 64,
394
+ "bits": 8
395
+ },
396
+ "model.layers.44.mlp.shared_expert_gate": {
397
+ "group_size": 64,
398
+ "bits": 8
399
+ },
400
+ "model.layers.45.mlp.gate": {
401
+ "group_size": 64,
402
+ "bits": 8
403
+ },
404
+ "model.layers.45.mlp.shared_expert_gate": {
405
+ "group_size": 64,
406
+ "bits": 8
407
+ },
408
+ "model.layers.46.mlp.gate": {
409
+ "group_size": 64,
410
+ "bits": 8
411
+ },
412
+ "model.layers.46.mlp.shared_expert_gate": {
413
+ "group_size": 64,
414
+ "bits": 8
415
+ },
416
+ "model.layers.47.mlp.gate": {
417
+ "group_size": 64,
418
+ "bits": 8
419
+ },
420
+ "model.layers.47.mlp.shared_expert_gate": {
421
+ "group_size": 64,
422
+ "bits": 8
423
+ }
424
+ },
425
+ "quantization_config": {
426
+ "group_size": 64,
427
+ "bits": 8,
428
+ "mode": "affine",
429
+ "model.layers.0.mlp.gate": {
430
+ "group_size": 64,
431
+ "bits": 8
432
+ },
433
+ "model.layers.0.mlp.shared_expert_gate": {
434
+ "group_size": 64,
435
+ "bits": 8
436
+ },
437
+ "model.layers.1.mlp.gate": {
438
+ "group_size": 64,
439
+ "bits": 8
440
+ },
441
+ "model.layers.1.mlp.shared_expert_gate": {
442
+ "group_size": 64,
443
+ "bits": 8
444
+ },
445
+ "model.layers.2.mlp.gate": {
446
+ "group_size": 64,
447
+ "bits": 8
448
+ },
449
+ "model.layers.2.mlp.shared_expert_gate": {
450
+ "group_size": 64,
451
+ "bits": 8
452
+ },
453
+ "model.layers.3.mlp.gate": {
454
+ "group_size": 64,
455
+ "bits": 8
456
+ },
457
+ "model.layers.3.mlp.shared_expert_gate": {
458
+ "group_size": 64,
459
+ "bits": 8
460
+ },
461
+ "model.layers.4.mlp.gate": {
462
+ "group_size": 64,
463
+ "bits": 8
464
+ },
465
+ "model.layers.4.mlp.shared_expert_gate": {
466
+ "group_size": 64,
467
+ "bits": 8
468
+ },
469
+ "model.layers.5.mlp.gate": {
470
+ "group_size": 64,
471
+ "bits": 8
472
+ },
473
+ "model.layers.5.mlp.shared_expert_gate": {
474
+ "group_size": 64,
475
+ "bits": 8
476
+ },
477
+ "model.layers.6.mlp.gate": {
478
+ "group_size": 64,
479
+ "bits": 8
480
+ },
481
+ "model.layers.6.mlp.shared_expert_gate": {
482
+ "group_size": 64,
483
+ "bits": 8
484
+ },
485
+ "model.layers.7.mlp.gate": {
486
+ "group_size": 64,
487
+ "bits": 8
488
+ },
489
+ "model.layers.7.mlp.shared_expert_gate": {
490
+ "group_size": 64,
491
+ "bits": 8
492
+ },
493
+ "model.layers.8.mlp.gate": {
494
+ "group_size": 64,
495
+ "bits": 8
496
+ },
497
+ "model.layers.8.mlp.shared_expert_gate": {
498
+ "group_size": 64,
499
+ "bits": 8
500
+ },
501
+ "model.layers.9.mlp.gate": {
502
+ "group_size": 64,
503
+ "bits": 8
504
+ },
505
+ "model.layers.9.mlp.shared_expert_gate": {
506
+ "group_size": 64,
507
+ "bits": 8
508
+ },
509
+ "model.layers.10.mlp.gate": {
510
+ "group_size": 64,
511
+ "bits": 8
512
+ },
513
+ "model.layers.10.mlp.shared_expert_gate": {
514
+ "group_size": 64,
515
+ "bits": 8
516
+ },
517
+ "model.layers.11.mlp.gate": {
518
+ "group_size": 64,
519
+ "bits": 8
520
+ },
521
+ "model.layers.11.mlp.shared_expert_gate": {
522
+ "group_size": 64,
523
+ "bits": 8
524
+ },
525
+ "model.layers.12.mlp.gate": {
526
+ "group_size": 64,
527
+ "bits": 8
528
+ },
529
+ "model.layers.12.mlp.shared_expert_gate": {
530
+ "group_size": 64,
531
+ "bits": 8
532
+ },
533
+ "model.layers.13.mlp.gate": {
534
+ "group_size": 64,
535
+ "bits": 8
536
+ },
537
+ "model.layers.13.mlp.shared_expert_gate": {
538
+ "group_size": 64,
539
+ "bits": 8
540
+ },
541
+ "model.layers.14.mlp.gate": {
542
+ "group_size": 64,
543
+ "bits": 8
544
+ },
545
+ "model.layers.14.mlp.shared_expert_gate": {
546
+ "group_size": 64,
547
+ "bits": 8
548
+ },
549
+ "model.layers.15.mlp.gate": {
550
+ "group_size": 64,
551
+ "bits": 8
552
+ },
553
+ "model.layers.15.mlp.shared_expert_gate": {
554
+ "group_size": 64,
555
+ "bits": 8
556
+ },
557
+ "model.layers.16.mlp.gate": {
558
+ "group_size": 64,
559
+ "bits": 8
560
+ },
561
+ "model.layers.16.mlp.shared_expert_gate": {
562
+ "group_size": 64,
563
+ "bits": 8
564
+ },
565
+ "model.layers.17.mlp.gate": {
566
+ "group_size": 64,
567
+ "bits": 8
568
+ },
569
+ "model.layers.17.mlp.shared_expert_gate": {
570
+ "group_size": 64,
571
+ "bits": 8
572
+ },
573
+ "model.layers.18.mlp.gate": {
574
+ "group_size": 64,
575
+ "bits": 8
576
+ },
577
+ "model.layers.18.mlp.shared_expert_gate": {
578
+ "group_size": 64,
579
+ "bits": 8
580
+ },
581
+ "model.layers.19.mlp.gate": {
582
+ "group_size": 64,
583
+ "bits": 8
584
+ },
585
+ "model.layers.19.mlp.shared_expert_gate": {
586
+ "group_size": 64,
587
+ "bits": 8
588
+ },
589
+ "model.layers.20.mlp.gate": {
590
+ "group_size": 64,
591
+ "bits": 8
592
+ },
593
+ "model.layers.20.mlp.shared_expert_gate": {
594
+ "group_size": 64,
595
+ "bits": 8
596
+ },
597
+ "model.layers.21.mlp.gate": {
598
+ "group_size": 64,
599
+ "bits": 8
600
+ },
601
+ "model.layers.21.mlp.shared_expert_gate": {
602
+ "group_size": 64,
603
+ "bits": 8
604
+ },
605
+ "model.layers.22.mlp.gate": {
606
+ "group_size": 64,
607
+ "bits": 8
608
+ },
609
+ "model.layers.22.mlp.shared_expert_gate": {
610
+ "group_size": 64,
611
+ "bits": 8
612
+ },
613
+ "model.layers.23.mlp.gate": {
614
+ "group_size": 64,
615
+ "bits": 8
616
+ },
617
+ "model.layers.23.mlp.shared_expert_gate": {
618
+ "group_size": 64,
619
+ "bits": 8
620
+ },
621
+ "model.layers.24.mlp.gate": {
622
+ "group_size": 64,
623
+ "bits": 8
624
+ },
625
+ "model.layers.24.mlp.shared_expert_gate": {
626
+ "group_size": 64,
627
+ "bits": 8
628
+ },
629
+ "model.layers.25.mlp.gate": {
630
+ "group_size": 64,
631
+ "bits": 8
632
+ },
633
+ "model.layers.25.mlp.shared_expert_gate": {
634
+ "group_size": 64,
635
+ "bits": 8
636
+ },
637
+ "model.layers.26.mlp.gate": {
638
+ "group_size": 64,
639
+ "bits": 8
640
+ },
641
+ "model.layers.26.mlp.shared_expert_gate": {
642
+ "group_size": 64,
643
+ "bits": 8
644
+ },
645
+ "model.layers.27.mlp.gate": {
646
+ "group_size": 64,
647
+ "bits": 8
648
+ },
649
+ "model.layers.27.mlp.shared_expert_gate": {
650
+ "group_size": 64,
651
+ "bits": 8
652
+ },
653
+ "model.layers.28.mlp.gate": {
654
+ "group_size": 64,
655
+ "bits": 8
656
+ },
657
+ "model.layers.28.mlp.shared_expert_gate": {
658
+ "group_size": 64,
659
+ "bits": 8
660
+ },
661
+ "model.layers.29.mlp.gate": {
662
+ "group_size": 64,
663
+ "bits": 8
664
+ },
665
+ "model.layers.29.mlp.shared_expert_gate": {
666
+ "group_size": 64,
667
+ "bits": 8
668
+ },
669
+ "model.layers.30.mlp.gate": {
670
+ "group_size": 64,
671
+ "bits": 8
672
+ },
673
+ "model.layers.30.mlp.shared_expert_gate": {
674
+ "group_size": 64,
675
+ "bits": 8
676
+ },
677
+ "model.layers.31.mlp.gate": {
678
+ "group_size": 64,
679
+ "bits": 8
680
+ },
681
+ "model.layers.31.mlp.shared_expert_gate": {
682
+ "group_size": 64,
683
+ "bits": 8
684
+ },
685
+ "model.layers.32.mlp.gate": {
686
+ "group_size": 64,
687
+ "bits": 8
688
+ },
689
+ "model.layers.32.mlp.shared_expert_gate": {
690
+ "group_size": 64,
691
+ "bits": 8
692
+ },
693
+ "model.layers.33.mlp.gate": {
694
+ "group_size": 64,
695
+ "bits": 8
696
+ },
697
+ "model.layers.33.mlp.shared_expert_gate": {
698
+ "group_size": 64,
699
+ "bits": 8
700
+ },
701
+ "model.layers.34.mlp.gate": {
702
+ "group_size": 64,
703
+ "bits": 8
704
+ },
705
+ "model.layers.34.mlp.shared_expert_gate": {
706
+ "group_size": 64,
707
+ "bits": 8
708
+ },
709
+ "model.layers.35.mlp.gate": {
710
+ "group_size": 64,
711
+ "bits": 8
712
+ },
713
+ "model.layers.35.mlp.shared_expert_gate": {
714
+ "group_size": 64,
715
+ "bits": 8
716
+ },
717
+ "model.layers.36.mlp.gate": {
718
+ "group_size": 64,
719
+ "bits": 8
720
+ },
721
+ "model.layers.36.mlp.shared_expert_gate": {
722
+ "group_size": 64,
723
+ "bits": 8
724
+ },
725
+ "model.layers.37.mlp.gate": {
726
+ "group_size": 64,
727
+ "bits": 8
728
+ },
729
+ "model.layers.37.mlp.shared_expert_gate": {
730
+ "group_size": 64,
731
+ "bits": 8
732
+ },
733
+ "model.layers.38.mlp.gate": {
734
+ "group_size": 64,
735
+ "bits": 8
736
+ },
737
+ "model.layers.38.mlp.shared_expert_gate": {
738
+ "group_size": 64,
739
+ "bits": 8
740
+ },
741
+ "model.layers.39.mlp.gate": {
742
+ "group_size": 64,
743
+ "bits": 8
744
+ },
745
+ "model.layers.39.mlp.shared_expert_gate": {
746
+ "group_size": 64,
747
+ "bits": 8
748
+ },
749
+ "model.layers.40.mlp.gate": {
750
+ "group_size": 64,
751
+ "bits": 8
752
+ },
753
+ "model.layers.40.mlp.shared_expert_gate": {
754
+ "group_size": 64,
755
+ "bits": 8
756
+ },
757
+ "model.layers.41.mlp.gate": {
758
+ "group_size": 64,
759
+ "bits": 8
760
+ },
761
+ "model.layers.41.mlp.shared_expert_gate": {
762
+ "group_size": 64,
763
+ "bits": 8
764
+ },
765
+ "model.layers.42.mlp.gate": {
766
+ "group_size": 64,
767
+ "bits": 8
768
+ },
769
+ "model.layers.42.mlp.shared_expert_gate": {
770
+ "group_size": 64,
771
+ "bits": 8
772
+ },
773
+ "model.layers.43.mlp.gate": {
774
+ "group_size": 64,
775
+ "bits": 8
776
+ },
777
+ "model.layers.43.mlp.shared_expert_gate": {
778
+ "group_size": 64,
779
+ "bits": 8
780
+ },
781
+ "model.layers.44.mlp.gate": {
782
+ "group_size": 64,
783
+ "bits": 8
784
+ },
785
+ "model.layers.44.mlp.shared_expert_gate": {
786
+ "group_size": 64,
787
+ "bits": 8
788
+ },
789
+ "model.layers.45.mlp.gate": {
790
+ "group_size": 64,
791
+ "bits": 8
792
+ },
793
+ "model.layers.45.mlp.shared_expert_gate": {
794
+ "group_size": 64,
795
+ "bits": 8
796
+ },
797
+ "model.layers.46.mlp.gate": {
798
+ "group_size": 64,
799
+ "bits": 8
800
+ },
801
+ "model.layers.46.mlp.shared_expert_gate": {
802
+ "group_size": 64,
803
+ "bits": 8
804
+ },
805
+ "model.layers.47.mlp.gate": {
806
+ "group_size": 64,
807
+ "bits": 8
808
+ },
809
+ "model.layers.47.mlp.shared_expert_gate": {
810
+ "group_size": 64,
811
+ "bits": 8
812
+ }
813
+ },
814
+ "rms_norm_eps": 1e-06,
815
+ "rope_scaling": null,
816
+ "rope_theta": 5000000,
817
+ "router_aux_loss_coef": 0.001,
818
+ "shared_expert_intermediate_size": 512,
819
+ "tie_word_embeddings": false,
820
+ "torch_dtype": "bfloat16",
821
+ "transformers_version": "4.57.0.dev0",
822
+ "use_cache": true,
823
+ "use_sliding_window": false,
824
+ "vocab_size": 151936
825
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 1.0,
10
+ "top_k": 40,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.57.3"
13
+ }
model-00001-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f9ab10da8272e7d6d4c53d916f81706b6c4b6d36dbaa3cb603030dbedb622fa
3
+ size 5011654227
model-00002-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4418df9563555bcf5413e2e38ad235207cc98735143d02c3e6bd7e9a2f04cc06
3
+ size 5247924085
model-00003-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:433402b7ea8090a381c383d5e8bf52764642decbdaeff24ac68bd02a2e1c7eba
3
+ size 5247924143
model-00004-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fbc043e27f738ad0ea3d658777c5bf1db4a5e77c557f14492aeb934cabca953
3
+ size 5247924165
model-00005-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:894b5d4a524aaa46712fa643f1eb9d239e66746b97594adbf14c3f86124e9fc3
3
+ size 5254812915
model-00006-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e43a2dda889ac414ca014d69f123f21c42296db99d7e1b5a565a893ded0fbd4
3
+ size 5247924265
model-00007-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35e134e18585bd79f83896a7abe76ac0b648aaa162cd27abc9d9ca371cdf73f4
3
+ size 5247924259
model-00008-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37dacd3dd070981c4f093b6cac7cd101bf262967eeab160c371c9c3779a101e2
3
+ size 5247924127
model-00009-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ab8b11bbb3c7ef28bf4d7cc8ccc764fb7ed09883bc70ec142ad37145c885b9c
3
+ size 5254813027
model-00010-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddd529dba1335d18ae4f644badc95c1053e5f172628cb62ea688daa7b35bc7ae
3
+ size 5247924201
model-00011-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eff97b420def52688f9c3691dff837d1f7964904bcb6042de8c9a2656849b5b8
3
+ size 5247924259
model-00012-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0765861dbd3d79b5066e571d671563bd3923374985dec157ccdd0959f3a4298
3
+ size 5247924211
model-00013-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d24f5ec419d52dfa876eba312678a4fc5facbda15eec4c97f2a4a5decfe7dc6
3
+ size 5254812943
model-00014-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dc787e5fd81ad417f5f3ec2ee20ef278df0d71e1e5b0e135ee89f41a51585bd
3
+ size 5247924235
model-00015-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51fecbc28d25994e03eb56621ea68f1e6a7d47bb39c65c25c9b8d75122e5306b
3
+ size 5247924259
model-00016-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:731aab1bb4f29e0d857c5fa6fa35c2d933adf7d17df9f8e79199f4c8298adbad
3
+ size 5247924143
model-00017-of-00017.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1045fc969ea62487e2f6169594c3ede78025e18d30bfe6cdadb28b096f73839
3
+ size 904388913
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen3_coder_detector_sgl.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import json
3
+ import logging
4
+ import re
5
+ from typing import Any, List, Optional
6
+
7
+ from sglang.srt.entrypoints.openai.protocol import Tool
8
+ from sglang.srt.function_call.base_format_detector import BaseFormatDetector
9
+ from sglang.srt.function_call.core_types import (
10
+ StreamingParseResult,
11
+ ToolCallItem,
12
+ _GetInfoFunc,
13
+ )
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class Qwen3CoderDetector(BaseFormatDetector):
19
+ def __init__(self):
20
+ super().__init__()
21
+
22
+ # Sentinel tokens
23
+ self.tool_call_start_token: str = "<tool_call>"
24
+ self.tool_call_end_token: str = "</tool_call>"
25
+ self.tool_call_prefix: str = "<function="
26
+ self.function_end_token: str = "</function>"
27
+ self.parameter_prefix: str = "<parameter="
28
+ self.parameter_end_token: str = "</parameter>"
29
+
30
+ # Regex for non-streaming fallback
31
+ self.tool_call_regex = re.compile(r"<tool_call>(.*?)</tool_call>", re.DOTALL)
32
+ self.tool_call_function_regex = re.compile(
33
+ r"<function=(.*?)</function>|<function=(.*)$", re.DOTALL
34
+ )
35
+ self.tool_call_parameter_regex = re.compile(
36
+ r"<parameter=(.*?)(?:</parameter>|(?=<parameter=)|(?=</function>)|$)",
37
+ re.DOTALL,
38
+ )
39
+
40
+ # Streaming State
41
+ # Base class already initializes _buffer, we just use it directly
42
+ # No need to check with hasattr - we control the lifecycle through inheritance
43
+
44
+ # Index pointing to the next character to be processed in buffer
45
+ self.parsed_pos: int = 0
46
+ # Parameter count inside the current tool being processed, used to determine whether to add comma
47
+ self.current_tool_param_count: int = 0
48
+ # Flag indicating whether current tool has already sent '{'
49
+ self.json_started: bool = False
50
+
51
+ # [FIX] New state flag: mark whether inside tool_call structure block
52
+ self.is_inside_tool_call: bool = False
53
+
54
+ # Initialize attributes that were missing in the original PR
55
+ self.current_func_name: Optional[str] = None
56
+
57
+ def has_tool_call(self, text: str) -> bool:
58
+ return self.tool_call_start_token in text
59
+
60
+ def _get_arguments_config(
61
+ self, func_name: str, tools: Optional[list[Tool]]
62
+ ) -> dict:
63
+ """Extract argument configuration for a function."""
64
+ if tools is None:
65
+ return {}
66
+ for config in tools:
67
+ try:
68
+ config_type = config.type
69
+ config_function = config.function
70
+ config_function_name = config_function.name
71
+ except AttributeError:
72
+ continue
73
+
74
+ if config_type == "function" and config_function_name == func_name:
75
+ try:
76
+ params = config_function.parameters
77
+ except AttributeError:
78
+ return {}
79
+
80
+ if isinstance(params, dict) and "properties" in params:
81
+ return params["properties"]
82
+ elif isinstance(params, dict):
83
+ return params
84
+ else:
85
+ return {}
86
+ logger.warning(f"Tool '{func_name}' is not defined in the tools list.")
87
+ return {}
88
+
89
+ def _convert_param_value(
90
+ self, param_value: str, param_name: str, param_config: dict, func_name: str
91
+ ) -> Any:
92
+ """Convert parameter value based on its type in the schema."""
93
+ # Handle null value for any type
94
+ if param_value.lower() == "null":
95
+ return None
96
+
97
+ if param_name not in param_config:
98
+ if param_config != {}:
99
+ logger.warning(
100
+ f"Parsed parameter '{param_name}' is not defined in the tool "
101
+ f"parameters for tool '{func_name}', directly returning the string value."
102
+ )
103
+ return param_value
104
+
105
+ if (
106
+ isinstance(param_config[param_name], dict)
107
+ and "type" in param_config[param_name]
108
+ ):
109
+ param_type = str(param_config[param_name]["type"]).strip().lower()
110
+ else:
111
+ param_type = "string"
112
+ if param_type in ["string", "str", "text", "varchar", "char", "enum"]:
113
+ return param_value
114
+ elif (
115
+ param_type.startswith("int")
116
+ or param_type.startswith("uint")
117
+ or param_type.startswith("long")
118
+ or param_type.startswith("short")
119
+ or param_type.startswith("unsigned")
120
+ ):
121
+ try:
122
+ param_value = int(param_value)
123
+ except Exception:
124
+ logger.warning(
125
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not an integer in tool "
126
+ f"'{func_name}', degenerating to string."
127
+ )
128
+ return param_value
129
+ elif param_type.startswith("num") or param_type.startswith("float"):
130
+ try:
131
+ maybe_convert = (
132
+ False if "." in param_value or "e" in param_value.lower() else True
133
+ )
134
+ param_value: float = float(param_value)
135
+ if maybe_convert and param_value.is_integer():
136
+ param_value = int(param_value)
137
+ except Exception:
138
+ logger.warning(
139
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not a float in tool "
140
+ f"'{func_name}', degenerating to string."
141
+ )
142
+ return param_value
143
+ elif param_type in ["boolean", "bool", "binary"]:
144
+ param_value = param_value.lower()
145
+ if param_value not in ["true", "false"]:
146
+ logger.warning(
147
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not a boolean (`true` of `false`) in tool '{func_name}', degenerating to false."
148
+ )
149
+ return param_value == "true"
150
+ else:
151
+ if (
152
+ param_type in ["object", "array", "arr"]
153
+ or param_type.startswith("dict")
154
+ or param_type.startswith("list")
155
+ ):
156
+ try:
157
+ param_value = json.loads(param_value)
158
+ return param_value
159
+ except Exception:
160
+ logger.warning(
161
+ f"Parsed value '{param_value}' of parameter '{param_name}' cannot be parsed with json.loads in tool "
162
+ f"'{func_name}', will try other methods to parse it."
163
+ )
164
+ try:
165
+ param_value = ast.literal_eval(param_value) # safer
166
+ except Exception:
167
+ logger.warning(
168
+ f"Parsed value '{param_value}' of parameter '{param_name}' cannot be converted via Python `ast.literal_eval()` in tool '{func_name}', degenerating to string."
169
+ )
170
+ return param_value
171
+
172
+ def detect_and_parse(self, text: str, tools: List[Tool]) -> StreamingParseResult:
173
+ """One-shot parsing for non-streaming scenarios."""
174
+ if self.tool_call_start_token not in text:
175
+ return StreamingParseResult(normal_text=text)
176
+
177
+ calls = []
178
+ try:
179
+ # Simple cleanup of the text to find tool calls
180
+ # Note: This is a simplified regex approach consistent with vLLM
181
+ raw_tool_calls = self.tool_call_regex.findall(text)
182
+ if not raw_tool_calls:
183
+ # Fallback: maybe the whole text is inside the tag or tags are stripped
184
+ if self.tool_call_prefix in text:
185
+ raw_tool_calls = [text]
186
+
187
+ tool_idx = 0
188
+ for tool_content in raw_tool_calls:
189
+ # Find function calls
190
+ funcs = self.tool_call_function_regex.findall(tool_content)
191
+ for func_match in funcs:
192
+ func_body = func_match[0] or func_match[1]
193
+ if ">" not in func_body:
194
+ continue
195
+
196
+ name_end = func_body.index(">")
197
+ func_name = func_body[:name_end]
198
+ params_str = func_body[name_end + 1 :]
199
+
200
+ param_config = self._get_arguments_config(func_name, tools)
201
+ parsed_params = {}
202
+
203
+ for p_match in self.tool_call_parameter_regex.findall(params_str):
204
+ if ">" not in p_match:
205
+ continue
206
+ p_idx = p_match.index(">")
207
+ p_name = p_match[:p_idx]
208
+ p_val = p_match[p_idx + 1 :]
209
+ # Remove prefixing and trailing \n
210
+ if p_val.startswith("\n"):
211
+ p_val = p_val[1:]
212
+ if p_val.endswith("\n"):
213
+ p_val = p_val[:-1]
214
+
215
+ parsed_params[p_name] = self._convert_param_value(
216
+ p_val, p_name, param_config, func_name
217
+ )
218
+
219
+ calls.append(
220
+ ToolCallItem(
221
+ tool_index=tool_idx,
222
+ name=func_name,
223
+ parameters=json.dumps(parsed_params, ensure_ascii=False),
224
+ )
225
+ )
226
+ tool_idx += 1
227
+
228
+ # Determine normal text (text before the first tool call)
229
+ start_idx = text.find(self.tool_call_start_token)
230
+ if start_idx == -1:
231
+ start_idx = text.find(self.tool_call_prefix)
232
+ normal_text = text[:start_idx] if start_idx > 0 else ""
233
+
234
+ return StreamingParseResult(normal_text=normal_text, calls=calls)
235
+
236
+ except Exception as e:
237
+ logger.error(f"Error in detect_and_parse: {e}")
238
+ return StreamingParseResult(normal_text=text)
239
+
240
+ def parse_streaming_increment(
241
+ self, new_text: str, tools: List[Tool]
242
+ ) -> StreamingParseResult:
243
+ """
244
+ Robust cursor-based streaming parser.
245
+ """
246
+ self._buffer += new_text
247
+
248
+ # Guard against empty buffer
249
+ if not self._buffer:
250
+ return StreamingParseResult()
251
+
252
+ calls = []
253
+ normal_text_chunks = []
254
+
255
+ while True:
256
+ # Working text slice
257
+ current_slice = self._buffer[self.parsed_pos :]
258
+
259
+ # Optimization: If almost empty, wait for more
260
+ if not current_slice:
261
+ break
262
+
263
+ # -------------------------------------------------------
264
+ # 1. Priority detection: check if it's the start of Tool Call
265
+ # -------------------------------------------------------
266
+ if current_slice.startswith(self.tool_call_start_token):
267
+ self.parsed_pos += len(self.tool_call_start_token)
268
+ self.is_inside_tool_call = True
269
+ continue
270
+
271
+ # -------------------------------------------------------
272
+ # 2. Function Name: <function=name>
273
+ # -------------------------------------------------------
274
+ if current_slice.startswith(self.tool_call_prefix):
275
+ end_angle = current_slice.find(">")
276
+ if end_angle != -1:
277
+ func_name = current_slice[len(self.tool_call_prefix) : end_angle]
278
+
279
+ self.current_tool_id += 1
280
+ self.current_tool_name_sent = True
281
+ self.current_tool_param_count = 0
282
+ self.json_started = False
283
+ self.current_func_name = func_name
284
+
285
+ calls.append(
286
+ ToolCallItem(
287
+ tool_index=self.current_tool_id,
288
+ name=func_name,
289
+ parameters="",
290
+ )
291
+ )
292
+
293
+ self.parsed_pos += end_angle + 1
294
+ continue
295
+ else:
296
+ # Incomplete tag
297
+ break
298
+
299
+ # -------------------------------------------------------
300
+ # 3. Parameter: <parameter=name>value...
301
+ # -------------------------------------------------------
302
+ if current_slice.startswith(self.parameter_prefix):
303
+ name_end = current_slice.find(">")
304
+ if name_end != -1:
305
+ value_start_idx = name_end + 1
306
+ rest_of_slice = current_slice[value_start_idx:]
307
+
308
+ # A parameter can end in multiple ways:
309
+ # 1. [Normal] Encounter </parameter>
310
+ # 2. [Abnormal] Encounter next <parameter=
311
+ # 3. [Abnormal] Encounter </function>
312
+ # So we need to find the smallest one as the parameter end position.
313
+ cand_end_param = rest_of_slice.find(self.parameter_end_token)
314
+ cand_next_param = rest_of_slice.find(self.parameter_prefix)
315
+ cand_end_func = rest_of_slice.find(self.function_end_token)
316
+
317
+ candidates = []
318
+ if cand_end_param != -1:
319
+ candidates.append(
320
+ (cand_end_param, len(self.parameter_end_token))
321
+ )
322
+ if cand_next_param != -1:
323
+ candidates.append((cand_next_param, 0))
324
+ if cand_end_func != -1:
325
+ candidates.append((cand_end_func, 0))
326
+
327
+ if candidates:
328
+ best_cand = min(candidates, key=lambda x: x[0])
329
+ end_pos = best_cand[0]
330
+ end_token_len = best_cand[1]
331
+
332
+ param_name = current_slice[
333
+ len(self.parameter_prefix) : name_end
334
+ ]
335
+ raw_value = rest_of_slice[:end_pos]
336
+
337
+ # Cleanup value
338
+ if raw_value.startswith("\n"):
339
+ raw_value = raw_value[1:]
340
+ if raw_value.endswith("\n"):
341
+ raw_value = raw_value[:-1]
342
+
343
+ # JSON Construction
344
+ if not self.json_started:
345
+ calls.append(
346
+ ToolCallItem(
347
+ tool_index=self.current_tool_id, parameters="{"
348
+ )
349
+ )
350
+ self.json_started = True
351
+
352
+ param_config = self._get_arguments_config(
353
+ self.current_func_name, tools
354
+ )
355
+ converted_val = self._convert_param_value(
356
+ raw_value, param_name, param_config, self.current_func_name
357
+ )
358
+
359
+ # Construct JSON fragment: "key": value
360
+ # Note: We must be careful with json.dumps to ensure valid JSON streaming
361
+ json_key_val = f"{json.dumps(param_name)}: {json.dumps(converted_val, ensure_ascii=False)}"
362
+
363
+ if self.current_tool_param_count > 0:
364
+ fragment = f", {json_key_val}"
365
+ else:
366
+ fragment = json_key_val
367
+
368
+ calls.append(
369
+ ToolCallItem(
370
+ tool_index=self.current_tool_id, parameters=fragment
371
+ )
372
+ )
373
+ self.current_tool_param_count += 1
374
+
375
+ # Advance cursor
376
+ total_len = (name_end + 1) + end_pos + end_token_len
377
+ self.parsed_pos += total_len
378
+ continue
379
+
380
+ # Incomplete parameter tag or value
381
+ break
382
+
383
+ # -------------------------------------------------------
384
+ # 4. Function End: </function>
385
+ # -------------------------------------------------------
386
+ if current_slice.startswith(self.function_end_token):
387
+ if not self.json_started:
388
+ calls.append(
389
+ ToolCallItem(tool_index=self.current_tool_id, parameters="{")
390
+ )
391
+ self.json_started = True
392
+
393
+ calls.append(
394
+ ToolCallItem(tool_index=self.current_tool_id, parameters="}")
395
+ )
396
+ self.parsed_pos += len(self.function_end_token)
397
+ self.current_func_name = None
398
+ continue
399
+
400
+ # -------------------------------------------------------
401
+ # 5. Tool Call End: </tool_call>
402
+ # -------------------------------------------------------
403
+ if current_slice.startswith(self.tool_call_end_token):
404
+ self.parsed_pos += len(self.tool_call_end_token)
405
+ self.is_inside_tool_call = False # [FIX] Exit tool call region
406
+ continue
407
+
408
+ # -------------------------------------------------------
409
+ # 6. Handling content / whitespace / normal text
410
+ # -------------------------------------------------------
411
+ # If current position is not the start of a tag (i.e., doesn't start with <), it might be plain text,
412
+ # or a newline between two tags.
413
+ # But we need to be careful not to output truncated tags like "<fun" as text.
414
+
415
+ next_open_angle = current_slice.find("<")
416
+
417
+ if next_open_angle == -1:
418
+ # This entire segment is plain text
419
+ if not self.is_inside_tool_call:
420
+ normal_text_chunks.append(current_slice)
421
+ # [FIX] If inside tool call, discard this text (usually \n), don't append
422
+ self.parsed_pos += len(current_slice)
423
+ continue
424
+
425
+ elif next_open_angle == 0:
426
+ # Looks like a Tag, but doesn't match any known Tag above
427
+
428
+ possible_tags = [
429
+ self.tool_call_start_token,
430
+ self.tool_call_end_token,
431
+ self.tool_call_prefix,
432
+ self.function_end_token,
433
+ self.parameter_prefix,
434
+ self.parameter_end_token,
435
+ ]
436
+
437
+ is_potential_tag = False
438
+ for tag in possible_tags:
439
+ if tag.startswith(current_slice):
440
+ is_potential_tag = True
441
+ break
442
+
443
+ if is_potential_tag:
444
+ break # Wait for more
445
+ else:
446
+ # Just a plain '<' symbol
447
+ if not self.is_inside_tool_call:
448
+ normal_text_chunks.append("<")
449
+ self.parsed_pos += 1
450
+ continue
451
+
452
+ else:
453
+ # '<' is in the middle
454
+ text_segment = current_slice[:next_open_angle]
455
+ if not self.is_inside_tool_call:
456
+ normal_text_chunks.append(text_segment)
457
+ # [FIX] If inside tool call, discard whitespace/text before Tag
458
+ self.parsed_pos += next_open_angle
459
+ continue
460
+
461
+ # Memory Cleanup: Slice the buffer
462
+ # Keep unparsed part, discard parsed part
463
+ if self.parsed_pos > 0:
464
+ self._buffer = self._buffer[self.parsed_pos :]
465
+ self.parsed_pos = 0
466
+
467
+ normal_text = "".join(normal_text_chunks) if normal_text_chunks else ""
468
+ return StreamingParseResult(calls=calls, normal_text=normal_text)
469
+
470
+ def supports_structural_tag(self) -> bool:
471
+ return False
472
+
473
+ def structure_info(self) -> _GetInfoFunc:
474
+ raise NotImplementedError
qwen3coder_tool_parser_vllm.py ADDED
@@ -0,0 +1,690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import ast
4
+ import json
5
+ import uuid
6
+ from collections.abc import Sequence
7
+ from typing import Any, List, Optional, Union
8
+
9
+ import regex as re
10
+
11
+ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
12
+ ChatCompletionToolsParam,
13
+ DeltaFunctionCall, DeltaMessage,
14
+ DeltaToolCall,
15
+ ExtractedToolCallInformation,
16
+ FunctionCall, ToolCall)
17
+ from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import (
18
+ ToolParser, ToolParserManager)
19
+ from vllm.logger import init_logger
20
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
21
+
22
+ logger = init_logger(__name__)
23
+
24
+
25
+ @ToolParserManager.register_module("qwen3_coder")
26
+ class Qwen3CoderToolParser(ToolParser):
27
+
28
+ def __init__(self, tokenizer: AnyTokenizer):
29
+ super().__init__(tokenizer)
30
+
31
+ self.current_tool_name_sent: bool = False
32
+ self.prev_tool_call_arr: list[dict] = []
33
+ self.current_tool_id: int = -1
34
+ self.streamed_args_for_tool: list[str] = []
35
+
36
+ # Sentinel tokens for streaming mode
37
+ self.tool_call_start_token: str = "<tool_call>"
38
+ self.tool_call_end_token: str = "</tool_call>"
39
+ self.tool_call_prefix: str = "<function="
40
+ self.function_end_token: str = "</function>"
41
+ self.parameter_prefix: str = "<parameter="
42
+ self.parameter_end_token: str = "</parameter>"
43
+ self.is_tool_call_started: bool = False
44
+ self.failed_count: int = 0
45
+
46
+ # Enhanced streaming state - reset for each new message
47
+ self._reset_streaming_state()
48
+
49
+ # Regex patterns
50
+ self.tool_call_complete_regex = re.compile(
51
+ r"<tool_call>(.*?)</tool_call>", re.DOTALL)
52
+ self.tool_call_regex = re.compile(
53
+ r"<tool_call>(.*?)</tool_call>|<tool_call>(.*?)$", re.DOTALL)
54
+ self.tool_call_function_regex = re.compile(
55
+ r"<function=(.*?)</function>|<function=(.*)$", re.DOTALL)
56
+ self.tool_call_parameter_regex = re.compile(
57
+ r"<parameter=(.*?)(?:</parameter>|(?=<parameter=)|(?=</function>)|$)",
58
+ re.DOTALL)
59
+
60
+ if not self.model_tokenizer:
61
+ raise ValueError(
62
+ "The model tokenizer must be passed to the ToolParser "
63
+ "constructor during construction.")
64
+
65
+ self.tool_call_start_token_id = self.vocab.get(
66
+ self.tool_call_start_token)
67
+ self.tool_call_end_token_id = self.vocab.get(self.tool_call_end_token)
68
+
69
+ if self.tool_call_start_token_id is None or self.tool_call_end_token_id is None:
70
+ raise RuntimeError(
71
+ "Qwen3 XML Tool parser could not locate tool call start/end "
72
+ "tokens in the tokenizer!")
73
+
74
+ logger.info(
75
+ f"vLLM Successfully import tool parser {self.__class__.__name__} !"
76
+ )
77
+
78
+ def _generate_tool_call_id(self) -> str:
79
+ """Generate a unique tool call ID."""
80
+ return f"call_{uuid.uuid4().hex[:24]}"
81
+
82
+ def _reset_streaming_state(self):
83
+ """Reset all streaming state."""
84
+ self.current_tool_index = 0
85
+ self.is_tool_call_started = False
86
+ self.header_sent = False
87
+ self.current_tool_id = None
88
+ self.current_function_name = None
89
+ self.current_param_name = None
90
+ self.current_param_value = ""
91
+ self.param_count = 0
92
+ self.in_param = False
93
+ self.in_function = False
94
+ self.accumulated_text = ""
95
+ self.json_started = False
96
+ self.json_closed = False
97
+ # Store accumulated parameters for type conversion
98
+ self.accumulated_params = {}
99
+ self.streaming_request = None
100
+
101
+ def _get_arguments_config(
102
+ self, func_name: str,
103
+ tools: Optional[list[ChatCompletionToolsParam]]) -> dict:
104
+ """Extract argument configuration for a function."""
105
+ if tools is None:
106
+ return {}
107
+ for config in tools:
108
+ if not hasattr(config, "type") or not (hasattr(
109
+ config, "function") and hasattr(config.function, "name")):
110
+ continue
111
+ if config.type == "function" and config.function.name == func_name:
112
+ if not hasattr(config.function, "parameters"):
113
+ return {}
114
+ params = config.function.parameters
115
+ if isinstance(params, dict) and "properties" in params:
116
+ return params["properties"]
117
+ elif isinstance(params, dict):
118
+ return params
119
+ else:
120
+ return {}
121
+ logger.warning(f"Tool '{func_name}' is not defined in the tools list.")
122
+ return {}
123
+
124
+ def _convert_param_value(self, param_value: str, param_name: str,
125
+ param_config: dict, func_name: str) -> Any:
126
+ """Convert parameter value based on its type in the schema."""
127
+ # Handle null value for any type
128
+ if param_value.lower() == "null":
129
+ return None
130
+
131
+ if param_name not in param_config:
132
+ if param_config != {}:
133
+ logger.warning(
134
+ f"Parsed parameter '{param_name}' is not defined in the tool "
135
+ f"parameters for tool '{func_name}', directly returning the string value."
136
+ )
137
+ return param_value
138
+
139
+ if isinstance(param_config[param_name],
140
+ dict) and "type" in param_config[param_name]:
141
+ param_type = str(param_config[param_name]["type"]).strip().lower()
142
+ else:
143
+ param_type = "string"
144
+ if param_type in ["string", "str", "text", "varchar", "char", "enum"]:
145
+ return param_value
146
+ elif param_type.startswith("int") or param_type.startswith(
147
+ "uint") or param_type.startswith(
148
+ "long") or param_type.startswith(
149
+ "short") or param_type.startswith("unsigned"):
150
+ try:
151
+ param_value = int(param_value)
152
+ except:
153
+ logger.warning(
154
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not an integer in tool "
155
+ f"'{func_name}', degenerating to string.")
156
+ return param_value
157
+ elif param_type.startswith("num") or param_type.startswith("float"):
158
+ try:
159
+ maybe_convert = False if "." in param_value or "e" in param_value.lower() else True
160
+ param_value: float = float(param_value)
161
+ if maybe_convert and param_value.is_integer():
162
+ param_value = int(param_value)
163
+ except:
164
+ logger.warning(
165
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not a float in tool "
166
+ f"'{func_name}', degenerating to string.")
167
+ return param_value
168
+ elif param_type in ["boolean", "bool", "binary"]:
169
+ param_value = param_value.lower()
170
+ if param_value not in ["true", "false"]:
171
+ logger.warning(
172
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not a boolean (`true` of `false`) in tool '{func_name}', degenerating to false."
173
+ )
174
+ return param_value == "true"
175
+ else:
176
+ if param_type in ["object", "array", "arr"
177
+ ] or param_type.startswith(
178
+ "dict") or param_type.startswith("list"):
179
+ try:
180
+ param_value = json.loads(param_value)
181
+ return param_value
182
+ except:
183
+ logger.warning(
184
+ f"Parsed value '{param_value}' of parameter '{param_name}' cannot be parsed with json.loads in tool "
185
+ f"'{func_name}', will try other methods to parse it.")
186
+ try:
187
+ param_value = ast.literal_eval(param_value) # safer
188
+ except:
189
+ logger.warning(
190
+ f"Parsed value '{param_value}' of parameter '{param_name}' cannot be converted via Python `ast.literal_eval()` in tool '{func_name}', degenerating to string."
191
+ )
192
+ return param_value
193
+
194
+ def _parse_xml_function_call(
195
+ self, function_call_str: str,
196
+ tools: Optional[list[ChatCompletionToolsParam]]
197
+ ) -> Optional[ToolCall]:
198
+
199
+ # Extract function name
200
+ end_index = function_call_str.index(">")
201
+ function_name = function_call_str[:end_index]
202
+ param_config = self._get_arguments_config(function_name, tools)
203
+ parameters = function_call_str[end_index + 1:]
204
+ param_dict = {}
205
+ for match_text in self.tool_call_parameter_regex.findall(parameters):
206
+ idx = match_text.index(">")
207
+ param_name = match_text[:idx]
208
+ param_value = str(match_text[idx + 1:])
209
+ # Remove prefix and trailing \n
210
+ if param_value.startswith("\n"):
211
+ param_value = param_value[1:]
212
+ if param_value.endswith("\n"):
213
+ param_value = param_value[:-1]
214
+
215
+ param_dict[param_name] = self._convert_param_value(
216
+ param_value, param_name, param_config, function_name)
217
+ return ToolCall(
218
+ type="function",
219
+ function=FunctionCall(name=function_name,
220
+ arguments=json.dumps(param_dict,
221
+ ensure_ascii=False)),
222
+ )
223
+
224
+ def _get_function_calls(self, model_output: str) -> List[str]:
225
+ # Find all tool calls
226
+ matched_ranges = self.tool_call_regex.findall(model_output)
227
+ raw_tool_calls = [
228
+ match[0] if match[0] else match[1] for match in matched_ranges
229
+ ]
230
+
231
+ # Back-off strategy if no tool_call tags found
232
+ if len(raw_tool_calls) == 0:
233
+ raw_tool_calls = [model_output]
234
+
235
+ raw_function_calls = []
236
+ for tool_call in raw_tool_calls:
237
+ raw_function_calls.extend(
238
+ self.tool_call_function_regex.findall(tool_call))
239
+
240
+ function_calls = [
241
+ match[0] if match[0] else match[1] for match in raw_function_calls
242
+ ]
243
+ return function_calls
244
+
245
+ def extract_tool_calls(
246
+ self,
247
+ model_output: str,
248
+ request: ChatCompletionRequest,
249
+ ) -> ExtractedToolCallInformation:
250
+ # Quick check to avoid unnecessary processing
251
+ if self.tool_call_prefix not in model_output:
252
+ return ExtractedToolCallInformation(tools_called=False,
253
+ tool_calls=[],
254
+ content=model_output)
255
+
256
+ try:
257
+ function_calls = self._get_function_calls(model_output)
258
+ if len(function_calls) == 0:
259
+ return ExtractedToolCallInformation(tools_called=False,
260
+ tool_calls=[],
261
+ content=model_output)
262
+
263
+ tool_calls = [
264
+ self._parse_xml_function_call(function_call_str, request.tools)
265
+ for function_call_str in function_calls
266
+ ]
267
+
268
+ # Populate prev_tool_call_arr for serving layer to set finish_reason
269
+ self.prev_tool_call_arr.clear() # Clear previous calls
270
+ for tool_call in tool_calls:
271
+ if tool_call:
272
+ self.prev_tool_call_arr.append({
273
+ "name":
274
+ tool_call.function.name,
275
+ "arguments":
276
+ tool_call.function.arguments,
277
+ })
278
+
279
+ # Extract content before tool calls
280
+ content_index = model_output.find(self.tool_call_start_token)
281
+ content_index = content_index if content_index >= 0 else model_output.find(
282
+ self.tool_call_prefix)
283
+ content = model_output[:content_index] # .rstrip()
284
+
285
+ return ExtractedToolCallInformation(
286
+ tools_called=(len(tool_calls) > 0),
287
+ tool_calls=tool_calls,
288
+ content=content if content else None,
289
+ )
290
+
291
+ except Exception:
292
+ logger.exception("Error in extracting tool call from response.")
293
+ return ExtractedToolCallInformation(tools_called=False,
294
+ tool_calls=[],
295
+ content=model_output)
296
+
297
+ def extract_tool_calls_streaming(
298
+ self,
299
+ previous_text: str,
300
+ current_text: str,
301
+ delta_text: str,
302
+ previous_token_ids: Sequence[int],
303
+ current_token_ids: Sequence[int],
304
+ delta_token_ids: Sequence[int],
305
+ request: ChatCompletionRequest,
306
+ ) -> Union[DeltaMessage, None]:
307
+ # Store request for type conversion
308
+ if not previous_text:
309
+ self._reset_streaming_state()
310
+ self.streaming_request = request
311
+
312
+ # If no delta text, return None unless it's an EOS token after tool calls
313
+ if not delta_text:
314
+ # Check if this is an EOS token after all tool calls are complete
315
+ # We check for tool calls in the text even if is_tool_call_started is False
316
+ # because it might have been reset after processing all tools
317
+ if delta_token_ids and self.tool_call_end_token_id not in delta_token_ids:
318
+ # Count complete tool calls
319
+ complete_calls = len(
320
+ self.tool_call_complete_regex.findall(current_text))
321
+
322
+ # If we have completed tool calls and populated prev_tool_call_arr
323
+ if complete_calls > 0 and len(self.prev_tool_call_arr) > 0:
324
+ # Check if all tool calls are closed
325
+ open_calls = current_text.count(
326
+ self.tool_call_start_token) - current_text.count(
327
+ self.tool_call_end_token)
328
+ if open_calls == 0:
329
+ # Return empty delta message to allow finish_reason processing
330
+ return DeltaMessage(content="")
331
+ elif not self.is_tool_call_started and current_text:
332
+ # This is a regular content response that's now complete
333
+ return DeltaMessage(content="")
334
+ return None
335
+
336
+ # Update accumulated text
337
+ self.accumulated_text = current_text
338
+
339
+ # Check if we need to advance to next tool
340
+ if self.json_closed and not self.in_function:
341
+ # Check if this tool call has ended
342
+ tool_ends = current_text.count(self.tool_call_end_token)
343
+ if tool_ends > self.current_tool_index:
344
+ # This tool has ended, advance to next
345
+ self.current_tool_index += 1
346
+ self.header_sent = False
347
+ self.param_count = 0
348
+ self.json_started = False
349
+ self.json_closed = False
350
+ self.accumulated_params = {}
351
+
352
+ # Check if there are more tool calls
353
+ tool_starts = current_text.count(self.tool_call_start_token)
354
+ if self.current_tool_index >= tool_starts:
355
+ # No more tool calls
356
+ self.is_tool_call_started = False
357
+ # Continue processing next tool
358
+ return None
359
+
360
+ # Handle normal content before tool calls
361
+ if not self.is_tool_call_started:
362
+ # Check if tool call is starting
363
+ if self.tool_call_start_token_id in delta_token_ids or self.tool_call_start_token in delta_text:
364
+ self.is_tool_call_started = True
365
+ # Return any content before the tool call
366
+ if self.tool_call_start_token in delta_text:
367
+ content_before = delta_text[:delta_text.index(
368
+ self.tool_call_start_token)]
369
+ if content_before:
370
+ return DeltaMessage(content=content_before)
371
+ return None
372
+ else:
373
+ # Check if we're between tool calls - skip whitespace
374
+ if current_text.rstrip().endswith(self.tool_call_end_token):
375
+ # We just ended a tool call, skip whitespace
376
+ if delta_text.strip() == "":
377
+ return None
378
+ # Normal content, no tool call
379
+ return DeltaMessage(content=delta_text)
380
+
381
+ # Check if we're between tool calls (waiting for next one)
382
+ # Count tool calls we've seen vs processed
383
+ tool_starts_count = current_text.count(self.tool_call_start_token)
384
+ if self.current_tool_index >= tool_starts_count:
385
+ # We're past all tool calls, shouldn't be here
386
+ return None
387
+
388
+ # We're in a tool call, find the current tool call portion
389
+ # Need to find the correct tool call based on current_tool_index
390
+ tool_starts = []
391
+ idx = 0
392
+ while True:
393
+ idx = current_text.find(self.tool_call_start_token, idx)
394
+ if idx == -1:
395
+ break
396
+ tool_starts.append(idx)
397
+ idx += len(self.tool_call_start_token)
398
+
399
+ if self.current_tool_index >= len(tool_starts):
400
+ # No more tool calls to process yet
401
+ return None
402
+
403
+ tool_start_idx = tool_starts[self.current_tool_index]
404
+ # Find where this tool call ends (or current position if not ended yet)
405
+ tool_end_idx = current_text.find(self.tool_call_end_token,
406
+ tool_start_idx)
407
+ if tool_end_idx == -1:
408
+ tool_text = current_text[tool_start_idx:]
409
+ else:
410
+ tool_text = current_text[tool_start_idx:tool_end_idx +
411
+ len(self.tool_call_end_token)]
412
+
413
+ # Looking for function header
414
+ if not self.header_sent:
415
+ if self.tool_call_prefix in tool_text:
416
+ func_start = tool_text.find(self.tool_call_prefix) + len(
417
+ self.tool_call_prefix)
418
+ func_end = tool_text.find(">", func_start)
419
+
420
+ if func_end != -1:
421
+ # Found complete function name
422
+ self.current_function_name = tool_text[func_start:func_end]
423
+ self.current_tool_id = self._generate_tool_call_id()
424
+ self.header_sent = True
425
+ self.in_function = True
426
+
427
+ # IMPORTANT: Add to prev_tool_call_arr immediately when we detect a tool call
428
+ # This ensures finish_reason="tool_calls" even if parsing isn't complete
429
+ already_added = any(
430
+ tool.get("name") == self.current_function_name
431
+ for tool in self.prev_tool_call_arr)
432
+ if not already_added:
433
+ self.prev_tool_call_arr.append({
434
+ "name": self.current_function_name,
435
+ "arguments":
436
+ "{}", # Placeholder, will be updated later
437
+ })
438
+
439
+ # Send header with function info
440
+ return DeltaMessage(tool_calls=[
441
+ DeltaToolCall(
442
+ index=self.current_tool_index,
443
+ id=self.current_tool_id,
444
+ function=DeltaFunctionCall(
445
+ name=self.current_function_name, arguments=""),
446
+ type="function",
447
+ )
448
+ ])
449
+ return None
450
+
451
+ # We've sent header, now handle function body
452
+ if self.in_function:
453
+ # Send opening brace if not sent yet
454
+ if not self.json_started and self.parameter_prefix not in delta_text:
455
+ self.json_started = True
456
+ return DeltaMessage(tool_calls=[
457
+ DeltaToolCall(
458
+ index=self.current_tool_index,
459
+ function=DeltaFunctionCall(arguments="{"),
460
+ )
461
+ ])
462
+
463
+ # Make sure json_started is set if we're processing parameters
464
+ if not self.json_started:
465
+ self.json_started = True
466
+
467
+ # Check for function end in accumulated text
468
+ if not self.json_closed and self.function_end_token in tool_text:
469
+ # Close JSON
470
+ self.json_closed = True
471
+
472
+ # Extract the complete tool call to update prev_tool_call_arr with final arguments
473
+ # Find the function content
474
+ func_start = tool_text.find(self.tool_call_prefix) + len(
475
+ self.tool_call_prefix)
476
+ func_content_end = tool_text.find(self.function_end_token,
477
+ func_start)
478
+ if func_content_end != -1:
479
+ func_content = tool_text[func_start:func_content_end]
480
+ # Parse to get the complete arguments
481
+ try:
482
+ parsed_tool = self._parse_xml_function_call(
483
+ func_content, self.streaming_request.tools
484
+ if self.streaming_request else None)
485
+ if parsed_tool:
486
+ # Update existing entry in prev_tool_call_arr with complete arguments
487
+ for i, tool in enumerate(self.prev_tool_call_arr):
488
+ if tool.get(
489
+ "name") == parsed_tool.function.name:
490
+ self.prev_tool_call_arr[i][
491
+ "arguments"] = parsed_tool.function.arguments
492
+ break
493
+ except Exception:
494
+ pass # Ignore parsing errors during streaming
495
+
496
+ result = DeltaMessage(tool_calls=[
497
+ DeltaToolCall(
498
+ index=self.current_tool_index,
499
+ function=DeltaFunctionCall(arguments="}"),
500
+ )
501
+ ])
502
+
503
+ # Reset state for next tool
504
+ self.in_function = False
505
+ self.json_closed = True
506
+ self.accumulated_params = {}
507
+
508
+ return result
509
+
510
+ # Look for parameters
511
+ # Find all parameter starts
512
+ param_starts = []
513
+ idx = 0
514
+ while True:
515
+ idx = tool_text.find(self.parameter_prefix, idx)
516
+ if idx == -1:
517
+ break
518
+ param_starts.append(idx)
519
+ idx += len(self.parameter_prefix)
520
+
521
+ # Check if we should start a new parameter
522
+ if not self.in_param and self.param_count < len(param_starts):
523
+
524
+ if len(param_starts) > self.param_count:
525
+ # Process the next parameter
526
+ param_idx = param_starts[self.param_count]
527
+ param_start = param_idx + len(self.parameter_prefix)
528
+ remaining = tool_text[param_start:]
529
+
530
+ if ">" in remaining:
531
+ # We have the complete parameter name
532
+ name_end = remaining.find(">")
533
+ self.current_param_name = remaining[:name_end]
534
+
535
+ # Find the parameter value
536
+ value_start = param_start + name_end + 1
537
+ value_text = tool_text[value_start:]
538
+ if value_text.startswith("\n"):
539
+ value_text = value_text[1:]
540
+
541
+ # Find where this parameter ends
542
+ param_end_idx = value_text.find(
543
+ self.parameter_end_token)
544
+ if param_end_idx == -1:
545
+ # No closing tag, look for next parameter or function end
546
+ next_param_idx = value_text.find(
547
+ self.parameter_prefix)
548
+ func_end_idx = value_text.find(
549
+ self.function_end_token)
550
+
551
+ if next_param_idx != -1 and (func_end_idx == -1
552
+ or next_param_idx
553
+ < func_end_idx):
554
+ param_end_idx = next_param_idx
555
+ elif func_end_idx != -1:
556
+ param_end_idx = func_end_idx
557
+ else:
558
+ # Neither found, check if tool call is complete
559
+ if self.tool_call_end_token in tool_text:
560
+ # Tool call is complete, so parameter must be complete too
561
+ # Use all remaining text before function end as value
562
+ param_end_idx = len(value_text)
563
+ else:
564
+ # Still streaming, wait for more content
565
+ return None
566
+
567
+ if param_end_idx != -1:
568
+ # Complete parameter found
569
+ param_value = value_text[:param_end_idx]
570
+ if param_value.endswith("\n"):
571
+ param_value = param_value[:-1]
572
+
573
+ # Store raw value for later processing
574
+ self.accumulated_params[
575
+ self.current_param_name] = param_value
576
+
577
+ # Get parameter configuration for type conversion
578
+ param_config = self._get_arguments_config(
579
+ self.current_function_name,
580
+ self.streaming_request.tools
581
+ if self.streaming_request else None)
582
+
583
+ # Convert the parameter value to the appropriate type
584
+ converted_value = self._convert_param_value(
585
+ param_value, self.current_param_name,
586
+ param_config, self.current_function_name)
587
+
588
+ # Build JSON fragment based on the converted type
589
+ # Use json.dumps to properly serialize the value
590
+ serialized_value = json.dumps(converted_value,
591
+ ensure_ascii=False)
592
+
593
+ if self.param_count == 0:
594
+ json_fragment = f'"{self.current_param_name}": {serialized_value}'
595
+ else:
596
+ json_fragment = f', "{self.current_param_name}": {serialized_value}'
597
+
598
+ self.param_count += 1
599
+
600
+ return DeltaMessage(tool_calls=[
601
+ DeltaToolCall(
602
+ index=self.current_tool_index,
603
+ function=DeltaFunctionCall(
604
+ arguments=json_fragment),
605
+ )
606
+ ])
607
+
608
+ # Continue parameter value - Not used in the current implementation
609
+ # since we process complete parameters above
610
+ if self.in_param:
611
+ if self.parameter_end_token in delta_text:
612
+ # End of parameter
613
+ end_idx = delta_text.find(self.parameter_end_token)
614
+ value_chunk = delta_text[:end_idx]
615
+
616
+ # Skip past > if at start
617
+ if not self.current_param_value and ">" in value_chunk:
618
+ gt_idx = value_chunk.find(">")
619
+ value_chunk = value_chunk[gt_idx + 1:]
620
+
621
+ if not self.current_param_value and value_chunk.startswith(
622
+ "\n"):
623
+ value_chunk = value_chunk[1:]
624
+
625
+ # Store complete value
626
+ full_value = self.current_param_value + value_chunk
627
+ self.accumulated_params[
628
+ self.current_param_name] = full_value
629
+
630
+ # Get parameter configuration for type conversion
631
+ param_config = self._get_arguments_config(
632
+ self.current_function_name,
633
+ self.streaming_request.tools
634
+ if self.streaming_request else None)
635
+
636
+ # Convert the parameter value to the appropriate type
637
+ converted_value = self._convert_param_value(
638
+ full_value, self.current_param_name, param_config,
639
+ self.current_function_name)
640
+
641
+ # Serialize the converted value
642
+ serialized_value = json.dumps(converted_value,
643
+ ensure_ascii=False)
644
+
645
+ # Since we've been streaming the quoted version, we need to close it properly
646
+ # This is complex - for now just complete the value
647
+ self.in_param = False
648
+ self.current_param_value = ""
649
+
650
+ # Just close the current parameter string
651
+ return DeltaMessage(tool_calls=[
652
+ DeltaToolCall(
653
+ index=self.current_tool_index,
654
+ function=DeltaFunctionCall(
655
+ arguments='"'), # Close the string quote
656
+ )
657
+ ])
658
+ else:
659
+ # Continue accumulating value
660
+ value_chunk = delta_text
661
+
662
+ # Handle first chunk after param name
663
+ if not self.current_param_value and ">" in value_chunk:
664
+ gt_idx = value_chunk.find(">")
665
+ value_chunk = value_chunk[gt_idx + 1:]
666
+
667
+ if not self.current_param_value and value_chunk.startswith(
668
+ "\n"):
669
+ value_chunk = value_chunk[1:]
670
+
671
+ if value_chunk:
672
+ # Stream the escaped delta
673
+ prev_escaped = json.dumps(
674
+ self.current_param_value, ensure_ascii=False
675
+ )[1:-1] if self.current_param_value else ""
676
+ self.current_param_value += value_chunk
677
+ full_escaped = json.dumps(self.current_param_value,
678
+ ensure_ascii=False)[1:-1]
679
+ delta_escaped = full_escaped[len(prev_escaped):]
680
+
681
+ if delta_escaped:
682
+ return DeltaMessage(tool_calls=[
683
+ DeltaToolCall(
684
+ index=self.current_tool_index,
685
+ function=DeltaFunctionCall(
686
+ arguments=delta_escaped),
687
+ )
688
+ ])
689
+
690
+ return None
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be75606093db2094d7cd20f3c2f385c212750648bd6ea4fb2bf507a6a4c55506
3
+ size 11422650
tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": true,
24
+ "model_max_length": 1048576,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "tool_parser_type": "json_tools",
29
+ "unk_token": null
30
+ }