knowrohit07 commited on
Commit
01bb932
·
verified ·
1 Parent(s): e2a1711

Upload model files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: mlx
4
+ tags:
5
+ - bodega
6
+ - inference
7
+ - optimized
8
+ ---
9
+
10
+ # axe-turbo-31b
11
+
12
+ **Optimized for Bodega Inference Engine**
13
+
14
+ This edition ensures peak performance and compatibility with the Bodega Inference Engine infrastructure.
15
+
16
+ ## Model Information
17
+
18
+ - **Trained By:** SRSWTI
19
+ - **Optimized For:** Bodega Inference Engine
20
+ - **Use Cases:** Fast text generation, chat, reasoning, code generation
21
+
22
+ ## Optimizations
23
+
24
+ - Verified compatibility with Bodega Inference Engine
25
+ - Benchmarked for low-latency inference
26
+ - Production-ready deployment
27
+
28
+ ## Disclaimer
29
+
30
+ SRSWTI is not the creator or owner of the underlying foundation model architecture. The foundation model is created and provided by third parties. SRSWTI has trained this model on top of the foundation model but does not endorse, support, represent or guarantee the completeness, truthfulness, accuracy, or reliability of any outputs. You understand that this model can produce content that might be offensive, harmful, inaccurate or otherwise inappropriate, or deceptive. SRSWTI may not monitor or control all model outputs and cannot, and does not, take responsibility for any such outputs. SRSWTI disclaims all warranties or guarantees about the accuracy, reliability or benefits of this model. SRSWTI further disclaims any warranty that the model will meet your requirements, be secure, uninterrupted or available at any time or location, or error-free, viruses-free, or that any errors will be corrected, or otherwise. You will be solely responsible for any damage resulting from your use of or access to this model, your downloading of this model, or use of this model provided by or through SRSWTI.
31
+
32
+ ---
33
+
34
+ *Developed by [SRSWTI Inc.](https://www.srswti.com) - Building world's fastest retrieval and inference engines.*
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% macro render_item_list(item_list, tag_name='required') %}
2
+ {%- if item_list is defined and item_list is iterable and item_list | length > 0 %}
3
+ {%- if tag_name %}{{- '\n<' ~ tag_name ~ '>' -}}{% endif %}
4
+ {{- '[' }}
5
+ {%- for item in item_list -%}
6
+ {%- if loop.index > 1 %}{{- ", "}}{% endif -%}
7
+ {%- if item is string -%}
8
+ {{ "`" ~ item ~ "`" }}
9
+ {%- else -%}
10
+ {{ item }}
11
+ {%- endif -%}
12
+ {%- endfor -%}
13
+ {{- ']' }}
14
+ {%- if tag_name %}{{- '</' ~ tag_name ~ '>' -}}{% endif %}
15
+ {%- endif %}
16
+ {% endmacro %}
17
+
18
+ {%- if messages[0]["role"] == "system" %}
19
+ {%- set system_message = messages[0]["content"] %}
20
+ {%- set loop_messages = messages[1:] %}
21
+ {%- else %}
22
+ {%- set loop_messages = messages %}
23
+ {%- endif %}
24
+
25
+ {%- if not tools is defined %}
26
+ {%- set tools = [] %}
27
+ {%- endif %}
28
+
29
+ {%- if system_message is defined %}
30
+ {{- "<|im_start|>system\n" + system_message }}
31
+ {%- else %}
32
+ {%- if tools is iterable and tools | length > 0 %}
33
+ {{- "<|im_start|>system\nYou are Qwen, a helpful AI assistant that can interact with a computer to solve tasks." }}
34
+ {%- endif %}
35
+ {%- endif %}
36
+ {%- if tools is iterable and tools | length > 0 %}
37
+ {{- "\n\nYou have access to the following functions:\n\n" }}
38
+ {{- "<tools>" }}
39
+ {%- for tool in tools %}
40
+ {%- if tool.function is defined %}
41
+ {%- set tool = tool.function %}
42
+ {%- endif %}
43
+ {{- "\n<function>\n<name>" ~ tool.name ~ "</name>" }}
44
+ {{- '\n<description>' ~ (tool.description | trim) ~ '</description>' }}
45
+ {{- '\n<parameters>' }}
46
+ {%- for param_name, param_fields in tool.parameters.properties|items %}
47
+ {{- '\n<parameter>' }}
48
+ {{- '\n<name>' ~ param_name ~ '</name>' }}
49
+ {%- if param_fields.type is defined %}
50
+ {{- '\n<type>' ~ (param_fields.type | string) ~ '</type>' }}
51
+ {%- endif %}
52
+ {%- if param_fields.description is defined %}
53
+ {{- '\n<description>' ~ (param_fields.description | trim) ~ '</description>' }}
54
+ {%- endif %}
55
+ {{- render_item_list(param_fields.enum, 'enum') }}
56
+ {%- set handled_keys = ['type', 'description', 'enum', 'required'] %}
57
+ {%- for json_key in param_fields.keys() | reject("in", handled_keys) %}
58
+ {%- set normed_json_key = json_key | replace("-", "_") | replace(" ", "_") | replace("$", "") %}
59
+ {%- if param_fields[json_key] is mapping %}
60
+ {{- '\n<' ~ normed_json_key ~ '>' ~ (param_fields[json_key] | tojson | safe) ~ '</' ~ normed_json_key ~ '>' }}
61
+ {%- else %}
62
+ {{-'\n<' ~ normed_json_key ~ '>' ~ (param_fields[json_key] | string) ~ '</' ~ normed_json_key ~ '>' }}
63
+ {%- endif %}
64
+ {%- endfor %}
65
+ {{- render_item_list(param_fields.required, 'required') }}
66
+ {{- '\n</parameter>' }}
67
+ {%- endfor %}
68
+ {{- render_item_list(tool.parameters.required, 'required') }}
69
+ {{- '\n</parameters>' }}
70
+ {%- if tool.return is defined %}
71
+ {%- if tool.return is mapping %}
72
+ {{- '\n<return>' ~ (tool.return | tojson | safe) ~ '</return>' }}
73
+ {%- else %}
74
+ {{- '\n<return>' ~ (tool.return | string) ~ '</return>' }}
75
+ {%- endif %}
76
+ {%- endif %}
77
+ {{- '\n</function>' }}
78
+ {%- endfor %}
79
+ {{- "\n</tools>" }}
80
+ {{- '\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n- Required parameters MUST be specified\n- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n</IMPORTANT>' }}
81
+ {%- endif %}
82
+ {%- if system_message is defined %}
83
+ {{- '<|im_end|>\n' }}
84
+ {%- else %}
85
+ {%- if tools is iterable and tools | length > 0 %}
86
+ {{- '<|im_end|>\n' }}
87
+ {%- endif %}
88
+ {%- endif %}
89
+ {%- for message in loop_messages %}
90
+ {%- if message.role == "assistant" and message.tool_calls is defined and message.tool_calls is iterable and message.tool_calls | length > 0 %}
91
+ {{- '<|im_start|>' + message.role }}
92
+ {%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}
93
+ {{- '\n' + message.content | trim + '\n' }}
94
+ {%- endif %}
95
+ {%- for tool_call in message.tool_calls %}
96
+ {%- if tool_call.function is defined %}
97
+ {%- set tool_call = tool_call.function %}
98
+ {%- endif %}
99
+ {{- '\n<tool_call>\n<function=' + tool_call.name + '>\n' }}
100
+ {%- if tool_call.arguments is defined %}
101
+ {%- for args_name, args_value in tool_call.arguments|items %}
102
+ {{- '<parameter=' + args_name + '>\n' }}
103
+ {%- set args_value = args_value if args_value is string else args_value | string %}
104
+ {{- args_value }}
105
+ {{- '\n</parameter>\n' }}
106
+ {%- endfor %}
107
+ {%- endif %}
108
+ {{- '</function>\n</tool_call>' }}
109
+ {%- endfor %}
110
+ {{- '<|im_end|>\n' }}
111
+ {%- elif message.role == "user" or message.role == "system" or message.role == "assistant" %}
112
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
113
+ {%- elif message.role == "tool" %}
114
+ {%- if loop.previtem and loop.previtem.role != "tool" %}
115
+ {{- '<|im_start|>user\n' }}
116
+ {%- endif %}
117
+ {{- '<tool_response>\n' }}
118
+ {{- message.content }}
119
+ {{- '\n</tool_response>\n' }}
120
+ {%- if not loop.last and loop.nextitem.role != "tool" %}
121
+ {{- '<|im_end|>\n' }}
122
+ {%- elif loop.last %}
123
+ {{- '<|im_end|>\n' }}
124
+ {%- endif %}
125
+ {%- else %}
126
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' }}
127
+ {%- endif %}
128
+ {%- endfor %}
129
+ {%- if add_generation_prompt %}
130
+ {{- '<|im_start|>assistant\n' }}
131
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3MoeForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "decoder_sparse_step": 1,
7
+ "eos_token_id": 151645,
8
+ "head_dim": 128,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 2048,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 5472,
13
+ "max_position_embeddings": 262144,
14
+ "max_window_layers": 28,
15
+ "mlp_only_layers": [],
16
+ "model_type": "qwen3_moe",
17
+ "moe_intermediate_size": 768,
18
+ "norm_topk_prob": true,
19
+ "num_attention_heads": 32,
20
+ "num_experts": 128,
21
+ "num_experts_per_tok": 8,
22
+ "num_hidden_layers": 48,
23
+ "num_key_value_heads": 4,
24
+ "output_router_logits": false,
25
+ "qkv_bias": false,
26
+ "quantization": {
27
+ "group_size": 64,
28
+ "bits": 8
29
+ },
30
+ "quantization_config": {
31
+ "group_size": 64,
32
+ "bits": 8
33
+ },
34
+ "rms_norm_eps": 1e-06,
35
+ "rope_scaling": null,
36
+ "rope_theta": 10000000,
37
+ "router_aux_loss_coef": 0.0,
38
+ "shared_expert_intermediate_size": 0,
39
+ "sliding_window": null,
40
+ "tie_word_embeddings": false,
41
+ "torch_dtype": "bfloat16",
42
+ "transformers_version": "4.52.3",
43
+ "use_cache": true,
44
+ "use_qk_norm": true,
45
+ "use_sliding_window": false,
46
+ "vocab_size": 151936
47
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "repetition_penalty": 1.05,
9
+ "temperature": 0.7,
10
+ "top_p": 0.8,
11
+ "top_k": 20
12
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f7cbc1b29b47c24cad7c87ad5d78d8d10eb0dd42278ddb6422a6ce6dcaa98a8
3
+ size 5199377497
model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:013918a532a2673efc3e22676b5dc9a0025d28f1823df0fdd5dfd8a8c94b2657
3
+ size 5296584322
model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e227aaa501f644974e64b4db35adb042ef25474abadd552bf1d7a7b74132d478
3
+ size 5296584402
model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:549fd2970c9780fab83bf574ef348338f9b85ab2b6e9535cfb58df7a4d06ac84
3
+ size 5296584404
model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:302c97dc377be5b0543d1bbfc996b22c20d5ecd779ff972301189015e09096bc
3
+ size 5296584480
model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f672186d62aeace0909643fe9304745052b51aa471196e3a5a9bbb73756d3487
3
+ size 5296584426
model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:454991147bf856ca2e813ba6f4d19448fe2ec922e3e9bb95008d71932db2f105
3
+ size 758436972
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen3coder_tool_parser.py ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import json
4
+ import re
5
+ import uuid
6
+ from collections.abc import Sequence
7
+ from typing import Union, Optional, Any, List, Dict
8
+ from enum import Enum
9
+
10
+ from vllm.entrypoints.openai.protocol import (
11
+ ChatCompletionRequest,
12
+ ChatCompletionToolsParam,
13
+ DeltaMessage,
14
+ DeltaToolCall,
15
+ DeltaFunctionCall,
16
+ ExtractedToolCallInformation,
17
+ FunctionCall,
18
+ ToolCall,
19
+ )
20
+ from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import (
21
+ ToolParser,
22
+ ToolParserManager,
23
+ )
24
+ from vllm.logger import init_logger
25
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
26
+
27
+ logger = init_logger(__name__)
28
+
29
+
30
+ @ToolParserManager.register_module("qwen3_xml")
31
+ class Qwen3XMLToolParser(ToolParser):
32
+ def __init__(self, tokenizer: AnyTokenizer):
33
+ super().__init__(tokenizer)
34
+
35
+ self.current_tool_name_sent: bool = False
36
+ self.prev_tool_call_arr: list[dict] = []
37
+ self.current_tool_id: int = -1
38
+ self.streamed_args_for_tool: list[str] = []
39
+
40
+ # Sentinel tokens for streaming mode
41
+ self.tool_call_start_token: str = "<tool_call>"
42
+ self.tool_call_end_token: str = "</tool_call>"
43
+ self.tool_call_prefix: str = "<function="
44
+ self.function_end_token: str = "</function>"
45
+ self.parameter_prefix: str = "<parameter="
46
+ self.parameter_end_token: str = "</parameter>"
47
+ self.is_tool_call_started: bool = False
48
+ self.failed_count: int = 0
49
+
50
+ # Enhanced streaming state - reset for each new message
51
+ self._reset_streaming_state()
52
+
53
+ # Regex patterns
54
+ self.tool_call_complete_regex = re.compile(
55
+ r"<tool_call>(.*?)</tool_call>", re.DOTALL
56
+ )
57
+ self.tool_call_regex = re.compile(
58
+ r"<tool_call>(.*?)</tool_call>|<tool_call>(.*?)$", re.DOTALL
59
+ )
60
+ self.tool_call_function_regex = re.compile(
61
+ r"<function=(.*?)</function>|<function=(.*)$", re.DOTALL
62
+ )
63
+ self.tool_call_parameter_regex = re.compile(
64
+ r"<parameter=(.*?)</parameter>|<parameter=(.*?)$", re.DOTALL
65
+ )
66
+
67
+ if not self.model_tokenizer:
68
+ raise ValueError(
69
+ "The model tokenizer must be passed to the ToolParser "
70
+ "constructor during construction."
71
+ )
72
+
73
+ self.tool_call_start_token_id = self.vocab.get(self.tool_call_start_token)
74
+ self.tool_call_end_token_id = self.vocab.get(self.tool_call_end_token)
75
+
76
+ if self.tool_call_start_token_id is None or self.tool_call_end_token_id is None:
77
+ raise RuntimeError(
78
+ "Qwen3 XML Tool parser could not locate tool call start/end "
79
+ "tokens in the tokenizer!"
80
+ )
81
+
82
+ logger.info(f"vLLM Successfully import tool parser {self.__class__.__name__} !")
83
+
84
+ def _generate_tool_call_id(self) -> str:
85
+ """Generate a unique tool call ID."""
86
+ return f"call_{uuid.uuid4().hex[:24]}"
87
+
88
+ def _reset_streaming_state(self):
89
+ """Reset all streaming state."""
90
+ self.current_tool_index = 0
91
+ self.is_tool_call_started = False
92
+ self.header_sent = False
93
+ self.current_tool_id = None
94
+ self.current_function_name = None
95
+ self.current_param_name = None
96
+ self.current_param_value = ""
97
+ self.param_count = 0
98
+ self.in_param = False
99
+ self.in_function = False
100
+ self.accumulated_text = ""
101
+ self.json_started = False
102
+ self.json_closed = False
103
+
104
+ def _parse_xml_function_call(
105
+ self, function_call_str: str, tools: Optional[list[ChatCompletionToolsParam]]
106
+ ) -> Optional[ToolCall]:
107
+ def get_arguments_config(func_name: str) -> dict:
108
+ if tools is None:
109
+ return {}
110
+ for config in tools:
111
+ if not hasattr(config, "type") or not (
112
+ hasattr(config, "function") and hasattr(config.function, "name")
113
+ ):
114
+ continue
115
+ if config.type == "function" and config.function.name == func_name:
116
+ if not hasattr(config.function, "parameters"):
117
+ return {}
118
+ params = config.function.parameters
119
+ if isinstance(params, dict) and "properties" in params:
120
+ return params["properties"]
121
+ elif isinstance(params, dict):
122
+ return params
123
+ else:
124
+ return {}
125
+ logger.warning(f"Tool '{func_name}' is not defined in the tools list.")
126
+ return {}
127
+
128
+ def convert_param_value(
129
+ param_value: str, param_name: str, param_config: dict, func_name: str
130
+ ) -> Any:
131
+ # Handle null value for any type
132
+ if param_value.lower() == "null":
133
+ return None
134
+
135
+ if param_name not in param_config:
136
+ if param_config != {}:
137
+ logger.warning(
138
+ f"Parsed parameter '{param_name}' is not defined in the tool "
139
+ f"parameters for tool '{func_name}', directly returning the string value."
140
+ )
141
+ return param_value
142
+
143
+ if (
144
+ isinstance(param_config[param_name], dict)
145
+ and "type" in param_config[param_name]
146
+ ):
147
+ param_type = str(param_config[param_name]["type"]).strip().lower()
148
+ else:
149
+ param_type = "string"
150
+ if param_type in ["string", "str", "text", "varchar", "char", "enum"]:
151
+ return param_value
152
+ elif (
153
+ param_type.startswith("int")
154
+ or param_type.startswith("uint")
155
+ or param_type.startswith("long")
156
+ or param_type.startswith("short")
157
+ or param_type.startswith("unsigned")
158
+ ):
159
+ try:
160
+ param_value = int(param_value)
161
+ except:
162
+ logger.warning(
163
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not an integer in tool "
164
+ f"'{func_name}', degenerating to string."
165
+ )
166
+ return param_value
167
+ elif param_type.startswith("num") or param_type.startswith("float"):
168
+ try:
169
+ float_param_value = float(param_value)
170
+ param_value = float_param_value if float_param_value - int(float_param_value) != 0 else int(float_param_value)
171
+ except:
172
+ logger.warning(
173
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not a float in tool "
174
+ f"'{func_name}', degenerating to string."
175
+ )
176
+ return param_value
177
+ elif param_type in ["boolean", "bool", "binary"]:
178
+ param_value = param_value.lower()
179
+ if param_value not in ["true", "false"]:
180
+ logger.warning(
181
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not a boolean (`true` of `false`) in tool '{func_name}', degenerating to false."
182
+ )
183
+ return param_value == "true"
184
+ else:
185
+ if param_type == "object" or param_type.startswith("dict"):
186
+ try:
187
+ param_value = json.loads(param_value)
188
+ return param_value
189
+ except:
190
+ logger.warning(
191
+ f"Parsed value '{param_value}' of parameter '{param_name}' is not a valid JSON object in tool "
192
+ f"'{func_name}', will try other methods to parse it."
193
+ )
194
+ try:
195
+ param_value = eval(param_value)
196
+ except:
197
+ logger.warning(
198
+ f"Parsed value '{param_value}' of parameter '{param_name}' cannot be converted via Python `eval()` in tool '{func_name}', degenerating to string."
199
+ )
200
+ return param_value
201
+
202
+ # Extract function name
203
+ end_index = function_call_str.index(">")
204
+ function_name = function_call_str[:end_index]
205
+ param_config = get_arguments_config(function_name)
206
+ parameters = function_call_str[end_index + 1 :]
207
+ param_dict = {}
208
+ for match in self.tool_call_parameter_regex.findall(parameters):
209
+ match_text = match[0] if match[0] else match[1]
210
+ idx = match_text.index(">")
211
+ param_name = match_text[:idx]
212
+ param_value = str(match_text[idx + 1 :])
213
+ # Remove prefix and trailing \n
214
+ if param_value.startswith("\n"):
215
+ param_value = param_value[1:]
216
+ if param_value.endswith("\n"):
217
+ param_value = param_value[:-1]
218
+
219
+ param_dict[param_name] = convert_param_value(
220
+ param_value, param_name, param_config, function_name
221
+ )
222
+ return ToolCall(
223
+ type="function",
224
+ function=FunctionCall(
225
+ name=function_name, arguments=json.dumps(param_dict, ensure_ascii=False)
226
+ ),
227
+ )
228
+
229
+ def _get_function_calls(self, model_output: str) -> List[str]:
230
+ # Find all tool calls
231
+ matched_ranges = self.tool_call_regex.findall(model_output)
232
+ raw_tool_calls = [
233
+ match[0] if match[0] else match[1] for match in matched_ranges
234
+ ]
235
+
236
+ # Back-off strategy if no tool_call tags found
237
+ if len(raw_tool_calls) == 0:
238
+ raw_tool_calls = [model_output]
239
+
240
+ raw_function_calls = []
241
+ for tool_call in raw_tool_calls:
242
+ raw_function_calls.extend(self.tool_call_function_regex.findall(tool_call))
243
+
244
+ function_calls = [
245
+ match[0] if match[0] else match[1] for match in raw_function_calls
246
+ ]
247
+ return function_calls
248
+
249
+ def extract_tool_calls(
250
+ self,
251
+ model_output: str,
252
+ request: ChatCompletionRequest,
253
+ ) -> ExtractedToolCallInformation:
254
+ # Quick check to avoid unnecessary processing
255
+ if self.tool_call_prefix not in model_output:
256
+ return ExtractedToolCallInformation(
257
+ tools_called=False, tool_calls=[], content=model_output
258
+ )
259
+
260
+ try:
261
+ function_calls = self._get_function_calls(model_output)
262
+ if len(function_calls) == 0:
263
+ return ExtractedToolCallInformation(
264
+ tools_called=False, tool_calls=[], content=model_output
265
+ )
266
+
267
+ tool_calls = [
268
+ self._parse_xml_function_call(function_call_str, request.tools)
269
+ for function_call_str in function_calls
270
+ ]
271
+
272
+ # Populate prev_tool_call_arr for serving layer to set finish_reason
273
+ self.prev_tool_call_arr.clear() # Clear previous calls
274
+ for tool_call in tool_calls:
275
+ if tool_call:
276
+ self.prev_tool_call_arr.append(
277
+ {
278
+ "name": tool_call.function.name,
279
+ "arguments": tool_call.function.arguments,
280
+ }
281
+ )
282
+
283
+ # Extract content before tool calls
284
+ content_index = model_output.find(self.tool_call_start_token)
285
+ content_index = (
286
+ content_index
287
+ if content_index >= 0
288
+ else model_output.find(self.tool_call_prefix)
289
+ )
290
+ content = model_output[:content_index] # .rstrip()
291
+
292
+ return ExtractedToolCallInformation(
293
+ tools_called=(len(tool_calls) > 0),
294
+ tool_calls=tool_calls,
295
+ content=content if content else None,
296
+ )
297
+
298
+ except Exception:
299
+ logger.exception("Error in extracting tool call from response.")
300
+ return ExtractedToolCallInformation(
301
+ tools_called=False, tool_calls=[], content=model_output
302
+ )
303
+
304
+ def extract_tool_calls_streaming(
305
+ self,
306
+ previous_text: str,
307
+ current_text: str,
308
+ delta_text: str,
309
+ previous_token_ids: Sequence[int],
310
+ current_token_ids: Sequence[int],
311
+ delta_token_ids: Sequence[int],
312
+ request: ChatCompletionRequest,
313
+ ) -> Union[DeltaMessage, None]:
314
+ # If no delta text, return None unless it's an EOS token after tool calls
315
+ if not delta_text:
316
+ # Check if this is an EOS token after all tool calls are complete
317
+ # We check for tool calls in the text even if is_tool_call_started is False
318
+ # because it might have been reset after processing all tools
319
+ if delta_token_ids and self.tool_call_end_token_id not in delta_token_ids:
320
+ # Count complete tool calls
321
+ complete_calls = len(
322
+ self.tool_call_complete_regex.findall(current_text)
323
+ )
324
+
325
+ # If we have completed tool calls and populated prev_tool_call_arr
326
+ if complete_calls > 0 and len(self.prev_tool_call_arr) > 0:
327
+ # Check if all tool calls are closed
328
+ open_calls = current_text.count(
329
+ self.tool_call_start_token
330
+ ) - current_text.count(self.tool_call_end_token)
331
+ if open_calls == 0:
332
+ # Return empty delta message to allow finish_reason processing
333
+ return DeltaMessage(content="")
334
+ elif not self.is_tool_call_started and current_text:
335
+ # This is a regular content response that's now complete
336
+ return DeltaMessage(content="")
337
+ return None
338
+
339
+ # Check if this is the first call (reset state if needed)
340
+ if not previous_text:
341
+ self._reset_streaming_state()
342
+
343
+ # Update accumulated text
344
+ self.accumulated_text = current_text
345
+
346
+ # Check if we need to advance to next tool
347
+ if self.json_closed and not self.in_function:
348
+ # Check if this tool call has ended
349
+ tool_ends = current_text.count(self.tool_call_end_token)
350
+ if tool_ends > self.current_tool_index:
351
+ # This tool has ended, advance to next
352
+ self.current_tool_index += 1
353
+ self.header_sent = False
354
+ self.param_count = 0
355
+ self.json_started = False
356
+ self.json_closed = False
357
+
358
+ # Check if there are more tool calls
359
+ tool_starts = current_text.count(self.tool_call_start_token)
360
+ if self.current_tool_index >= tool_starts:
361
+ # No more tool calls
362
+ self.is_tool_call_started = False
363
+ # Continue processing next tool
364
+ return None
365
+
366
+ # Handle normal content before tool calls
367
+ if not self.is_tool_call_started:
368
+ # Check if tool call is starting
369
+ if (
370
+ self.tool_call_start_token_id in delta_token_ids
371
+ or self.tool_call_start_token in delta_text
372
+ ):
373
+ self.is_tool_call_started = True
374
+ # Return any content before the tool call
375
+ if self.tool_call_start_token in delta_text:
376
+ content_before = delta_text[
377
+ : delta_text.index(self.tool_call_start_token)
378
+ ]
379
+ if content_before:
380
+ return DeltaMessage(content=content_before)
381
+ return None
382
+ else:
383
+ # Check if we're between tool calls - skip whitespace
384
+ if current_text.rstrip().endswith(self.tool_call_end_token):
385
+ # We just ended a tool call, skip whitespace
386
+ if delta_text.strip() == "":
387
+ return None
388
+ # Normal content, no tool call
389
+ return DeltaMessage(content=delta_text)
390
+
391
+ # Check if we're between tool calls (waiting for next one)
392
+ # Count tool calls we've seen vs processed
393
+ tool_starts_count = current_text.count(self.tool_call_start_token)
394
+ if self.current_tool_index >= tool_starts_count:
395
+ # We're past all tool calls, shouldn't be here
396
+ return None
397
+
398
+ # We're in a tool call, find the current tool call portion
399
+ # Need to find the correct tool call based on current_tool_index
400
+ tool_starts = []
401
+ idx = 0
402
+ while True:
403
+ idx = current_text.find(self.tool_call_start_token, idx)
404
+ if idx == -1:
405
+ break
406
+ tool_starts.append(idx)
407
+ idx += len(self.tool_call_start_token)
408
+
409
+ if self.current_tool_index >= len(tool_starts):
410
+ # No more tool calls to process yet
411
+ return None
412
+
413
+ tool_start_idx = tool_starts[self.current_tool_index]
414
+ # Find where this tool call ends (or current position if not ended yet)
415
+ tool_end_idx = current_text.find(self.tool_call_end_token, tool_start_idx)
416
+ if tool_end_idx == -1:
417
+ tool_text = current_text[tool_start_idx:]
418
+ else:
419
+ tool_text = current_text[
420
+ tool_start_idx : tool_end_idx + len(self.tool_call_end_token)
421
+ ]
422
+
423
+ # Looking for function header
424
+ if not self.header_sent:
425
+ if self.tool_call_prefix in tool_text:
426
+ func_start = tool_text.find(self.tool_call_prefix) + len(
427
+ self.tool_call_prefix
428
+ )
429
+ func_end = tool_text.find(">", func_start)
430
+
431
+ if func_end != -1:
432
+ # Found complete function name
433
+ self.current_function_name = tool_text[func_start:func_end]
434
+ self.current_tool_id = self._generate_tool_call_id()
435
+ self.header_sent = True
436
+ self.in_function = True
437
+
438
+ # IMPORTANT: Add to prev_tool_call_arr immediately when we detect a tool call
439
+ # This ensures finish_reason="tool_calls" even if parsing isn't complete
440
+ already_added = any(
441
+ tool.get("name") == self.current_function_name
442
+ for tool in self.prev_tool_call_arr
443
+ )
444
+ if not already_added:
445
+ self.prev_tool_call_arr.append(
446
+ {
447
+ "name": self.current_function_name,
448
+ "arguments": "{}", # Placeholder, will be updated later
449
+ }
450
+ )
451
+
452
+ # Send header with function info
453
+ return DeltaMessage(
454
+ tool_calls=[
455
+ DeltaToolCall(
456
+ index=self.current_tool_index,
457
+ id=self.current_tool_id,
458
+ function=DeltaFunctionCall(
459
+ name=self.current_function_name, arguments=""
460
+ ),
461
+ type="function",
462
+ )
463
+ ]
464
+ )
465
+ return None
466
+
467
+ # We've sent header, now handle function body
468
+ if self.in_function:
469
+ # Send opening brace if not sent yet
470
+ if not self.json_started and not self.parameter_prefix in delta_text:
471
+ self.json_started = True
472
+ return DeltaMessage(
473
+ tool_calls=[
474
+ DeltaToolCall(
475
+ index=self.current_tool_index,
476
+ function=DeltaFunctionCall(arguments="{"),
477
+ )
478
+ ]
479
+ )
480
+
481
+ # Make sure json_started is set if we're processing parameters
482
+ if not self.json_started:
483
+ self.json_started = True
484
+
485
+ # Check for function end in accumulated text
486
+ if not self.json_closed and self.function_end_token in tool_text:
487
+ # Close JSON
488
+ self.json_closed = True
489
+
490
+ # Extract the complete tool call to update prev_tool_call_arr with final arguments
491
+ # Find the function content
492
+ func_start = tool_text.find(self.tool_call_prefix) + len(
493
+ self.tool_call_prefix
494
+ )
495
+ func_content_end = tool_text.find(self.function_end_token, func_start)
496
+ if func_content_end != -1:
497
+ func_content = tool_text[func_start:func_content_end]
498
+ # Parse to get the complete arguments
499
+ try:
500
+ parsed_tool = self._parse_xml_function_call(
501
+ func_content, request.tools if request else None
502
+ )
503
+ if parsed_tool:
504
+ # Update existing entry in prev_tool_call_arr with complete arguments
505
+ for i, tool in enumerate(self.prev_tool_call_arr):
506
+ if tool.get("name") == parsed_tool.function.name:
507
+ self.prev_tool_call_arr[i]["arguments"] = (
508
+ parsed_tool.function.arguments
509
+ )
510
+ break
511
+ except Exception:
512
+ pass # Ignore parsing errors during streaming
513
+
514
+ result = DeltaMessage(
515
+ tool_calls=[
516
+ DeltaToolCall(
517
+ index=self.current_tool_index,
518
+ function=DeltaFunctionCall(arguments="}"),
519
+ )
520
+ ]
521
+ )
522
+
523
+ # Reset state for next tool
524
+ self.in_function = False
525
+ self.json_closed = True
526
+
527
+ return result
528
+
529
+ # Look for parameters
530
+ # Count how many complete parameters we have processed
531
+ complete_params = tool_text.count(self.parameter_end_token)
532
+
533
+ # Check if we should start a new parameter
534
+ if not self.in_param and self.param_count < complete_params:
535
+ # Find the unprocessed parameter
536
+ # Count parameter starts
537
+ param_starts = []
538
+ idx = 0
539
+ while True:
540
+ idx = tool_text.find(self.parameter_prefix, idx)
541
+ if idx == -1:
542
+ break
543
+ param_starts.append(idx)
544
+ idx += len(self.parameter_prefix)
545
+
546
+ if len(param_starts) > self.param_count:
547
+ # Process the next parameter
548
+ param_idx = param_starts[self.param_count]
549
+ param_start = param_idx + len(self.parameter_prefix)
550
+ remaining = tool_text[param_start:]
551
+
552
+ if ">" in remaining:
553
+ # We have the complete parameter name
554
+ name_end = remaining.find(">")
555
+ self.current_param_name = remaining[:name_end]
556
+
557
+ # Find the parameter value
558
+ value_start = param_start + name_end + 1
559
+ value_text = tool_text[value_start:]
560
+ if value_text.startswith("\n"):
561
+ value_text = value_text[1:]
562
+
563
+ # Find where this parameter ends
564
+ param_end_idx = value_text.find(self.parameter_end_token)
565
+ if param_end_idx != -1:
566
+ # Complete parameter found
567
+ param_value = value_text[:param_end_idx]
568
+ if param_value.endswith("\n"):
569
+ param_value = param_value[:-1]
570
+
571
+ # Build complete JSON fragment for this parameter
572
+ if self.param_count == 0:
573
+ json_fragment = (
574
+ '"'
575
+ + self.current_param_name
576
+ + '": "'
577
+ + json.dumps(param_value)[1:-1]
578
+ + '"'
579
+ )
580
+ else:
581
+ json_fragment = (
582
+ ', "'
583
+ + self.current_param_name
584
+ + '": "'
585
+ + json.dumps(param_value)[1:-1]
586
+ + '"'
587
+ )
588
+
589
+ self.param_count += 1
590
+
591
+ return DeltaMessage(
592
+ tool_calls=[
593
+ DeltaToolCall(
594
+ index=self.current_tool_index,
595
+ function=DeltaFunctionCall(
596
+ arguments=json_fragment
597
+ ),
598
+ )
599
+ ]
600
+ )
601
+
602
+ # Continue parameter value
603
+ if self.in_param:
604
+ if self.parameter_end_token in delta_text:
605
+ # End of parameter
606
+ end_idx = delta_text.find(self.parameter_end_token)
607
+ value_chunk = delta_text[:end_idx]
608
+
609
+ # Skip past > if at start
610
+ if not self.current_param_value and ">" in value_chunk:
611
+ gt_idx = value_chunk.find(">")
612
+ value_chunk = value_chunk[gt_idx + 1 :]
613
+
614
+ if not self.current_param_value and value_chunk.startswith("\n"):
615
+ value_chunk = value_chunk[1:]
616
+
617
+ # Calculate incremental JSON
618
+ full_value = self.current_param_value + value_chunk
619
+ prev_escaped = (
620
+ json.dumps(self.current_param_value)[1:-1]
621
+ if self.current_param_value
622
+ else ""
623
+ )
624
+ full_escaped = json.dumps(full_value)[1:-1]
625
+ delta_escaped = full_escaped[len(prev_escaped) :]
626
+
627
+ self.in_param = False
628
+ self.current_param_value = ""
629
+
630
+ return DeltaMessage(
631
+ tool_calls=[
632
+ DeltaToolCall(
633
+ index=self.current_tool_index,
634
+ function=DeltaFunctionCall(
635
+ arguments=delta_escaped + '"'
636
+ ),
637
+ )
638
+ ]
639
+ )
640
+ else:
641
+ # Continue accumulating value
642
+ value_chunk = delta_text
643
+
644
+ # Handle first chunk after param name
645
+ if not self.current_param_value and ">" in value_chunk:
646
+ gt_idx = value_chunk.find(">")
647
+ value_chunk = value_chunk[gt_idx + 1 :]
648
+
649
+ if not self.current_param_value and value_chunk.startswith("\n"):
650
+ value_chunk = value_chunk[1:]
651
+
652
+ if value_chunk:
653
+ # Stream the escaped delta
654
+ prev_escaped = (
655
+ json.dumps(self.current_param_value)[1:-1]
656
+ if self.current_param_value
657
+ else ""
658
+ )
659
+ self.current_param_value += value_chunk
660
+ full_escaped = json.dumps(self.current_param_value)[1:-1]
661
+ delta_escaped = full_escaped[len(prev_escaped) :]
662
+
663
+ if delta_escaped:
664
+ return DeltaMessage(
665
+ tool_calls=[
666
+ DeltaToolCall(
667
+ index=self.current_tool_index,
668
+ function=DeltaFunctionCall(
669
+ arguments=delta_escaped
670
+ ),
671
+ )
672
+ ]
673
+ )
674
+
675
+ return None
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 1048576,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff