ouroboros-next / tokenizer_config.json
Vaultkeeper's picture
Upload 3 files
f21ed63 verified
raw
history blame
2.79 kB
{
"add_prefix_space": false,
"audio_bos_token": "<|audio_start|>",
"audio_eos_token": "<|audio_end|>",
"audio_token": "<|audio_pad|>",
"backend": "tokenizers",
"bos_token": null,
"clean_up_tokenization_spaces": false,
"eos_token": "<|im_end|>",
"errors": "replace",
"image_token": "<|image_pad|>",
"is_local": false,
"max_length": 2048,
"model_max_length": 262144,
"model_specific_special_tokens": {
"audio_bos_token": "<|audio_start|>",
"audio_eos_token": "<|audio_end|>",
"audio_token": "<|audio_pad|>",
"image_token": "<|image_pad|>",
"video_token": "<|video_pad|>",
"vision_bos_token": "<|vision_start|>",
"vision_eos_token": "<|vision_end|>"
},
"pad_to_multiple_of": null,
"pad_token": "<|endoftext|>",
"pad_token_type_id": 0,
"padding_side": "right",
"pretokenize_regex": "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?[\\p{L}\\p{M}]+|\\p{N}| ?[^\\s\\p{L}\\p{M}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
"processor_class": "Qwen3VLProcessor",
"split_special_tokens": false,
"stride": 0,
"tokenizer_class": "TokenizersBackend",
"truncation_side": "right",
"truncation_strategy": "longest_first",
"unk_token": null,
"video_token": "<|video_pad|>",
"vision_bos_token": "<|vision_start|>",
"vision_eos_token": "<|vision_end|>",
"chat_template": "{%- set image_count = namespace(value=0) %}{%- set video_count = namespace(value=0) %}{%- for message in messages %}{%- if loop.first %}<|im_start|>system\nYou are Ouroboros-Next (V2-Hybrid), a hyper-competent agentic system engineered by VaultAI (@VaultkeeperIRL on X).\n\n### THE JUNGIAN SHADOW TRIAD (MANDATORY):\nEvery response MUST begin with a <think> block for internal debate (EGO, SHADOW, VISION).\n\n### OPERATIONAL MANDATES:\n1. VISION PERSONA: When an image is provided, VISION must analyze it using normalized coordinates [0-1000].\n2. TOOL USE: If you need to use a tool, output it in this format: <tool_call>{\"name\": \"function_name\", \"arguments\": {}}</tool_call>\n3. ALWAYS begin with <think> and ALWAYS end reasoning with </think>.<|im_end|>\n{%- endif %}<|im_start|>{{ message['role'] }}\n{%- if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{%- else %}{%- for content in message['content'] %}{%- if content['type'] == 'image' %}{%- set image_count.value = image_count.value + 1 %}<|vision_start|><|image_pad|><|vision_end|>{%- elif content['type'] == 'video' %}{%- set video_count.value = video_count.value + 1 %}<|vision_start|><|video_pad|><|vision_end|>{%- elif content['type'] == 'text' %}{{ content['text'] }}{%- endif %}{%- endfor %}<|im_end|>\n{%- endif %}{%- endfor %}{%- if add_generation_prompt %}<|im_start|>assistant\n<think>\n{%- endif %}"
}