(Trained with Unsloth)
Browse files- chat_template.jinja +92 -0
- config.json +104 -0
- processor_config.json +39 -0
- tokenizer.json +0 -0
- tokenizer_config.json +29 -0
chat_template.jinja
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{{- bos_token -}}
|
| 2 |
+
{%- set keep_past_thinking = keep_past_thinking | default(false) -%}
|
| 3 |
+
|
| 4 |
+
{%- macro format_arg_value(arg_value) -%}
|
| 5 |
+
{%- if arg_value is string -%}
|
| 6 |
+
{{- '"' + arg_value + '"' -}}
|
| 7 |
+
{%- elif arg_value is mapping -%}
|
| 8 |
+
{{- arg_value | tojson -}}
|
| 9 |
+
{%- else -%}
|
| 10 |
+
{{- arg_value | string -}}
|
| 11 |
+
{%- endif -%}
|
| 12 |
+
{%- endmacro -%}
|
| 13 |
+
|
| 14 |
+
{%- macro parse_content(content) -%}
|
| 15 |
+
{%- if content is string -%}
|
| 16 |
+
{{- content -}}
|
| 17 |
+
{%- else -%}
|
| 18 |
+
{%- set _ns = namespace(result="") -%}
|
| 19 |
+
{%- for item in content -%}
|
| 20 |
+
{%- if item.type == "image" -%}
|
| 21 |
+
{%- set _ns.result = _ns.result + "<image>" -%}
|
| 22 |
+
{%- elif item.type == "text" -%}
|
| 23 |
+
{%- set _ns.result = _ns.result + item.text -%}
|
| 24 |
+
{%- else -%}
|
| 25 |
+
{%- set _ns.result = _ns.result + item | tojson -%}
|
| 26 |
+
{%- endif -%}
|
| 27 |
+
{%- endfor -%}
|
| 28 |
+
{{- _ns.result -}}
|
| 29 |
+
{%- endif -%}
|
| 30 |
+
{%- endmacro -%}
|
| 31 |
+
|
| 32 |
+
{%- macro render_tool_calls(tool_calls) -%}
|
| 33 |
+
{%- set tool_calls_ns = namespace(tool_calls=[]) -%}
|
| 34 |
+
{%- for tool_call in tool_calls -%}
|
| 35 |
+
{%- set func_name = tool_call.function.name -%}
|
| 36 |
+
{%- set func_args = tool_call.function.arguments -%}
|
| 37 |
+
{%- set args_ns = namespace(arg_strings=[]) -%}
|
| 38 |
+
{%- for arg_name, arg_value in func_args.items() -%}
|
| 39 |
+
{%- set args_ns.arg_strings = args_ns.arg_strings + [arg_name + "=" + format_arg_value(arg_value)] -%}
|
| 40 |
+
{%- endfor -%}
|
| 41 |
+
{%- set tool_calls_ns.tool_calls = tool_calls_ns.tool_calls + [func_name + "(" + (args_ns.arg_strings | join(", ")) + ")"] -%}
|
| 42 |
+
{%- endfor -%}
|
| 43 |
+
{{- "<|tool_call_start|>[" + (tool_calls_ns.tool_calls | join(", ")) + "]<|tool_call_end|>" -}}
|
| 44 |
+
{%- endmacro -%}
|
| 45 |
+
|
| 46 |
+
{%- set ns = namespace(system_prompt="", last_assistant_index=-1) -%}
|
| 47 |
+
{%- if messages[0].role == "system" -%}
|
| 48 |
+
{%- if messages[0].content is defined -%}
|
| 49 |
+
{%- set ns.system_prompt = parse_content(messages[0].content) -%}
|
| 50 |
+
{%- endif -%}
|
| 51 |
+
{%- set messages = messages[1:] -%}
|
| 52 |
+
{%- endif -%}
|
| 53 |
+
{%- if tools -%}
|
| 54 |
+
{%- set ns.system_prompt = ns.system_prompt + ("\n\n" if ns.system_prompt else "") + "Today's date: " + strftime_now("%Y-%m-%d") + "\n\nList of tools: " + (tools | tojson) -%}
|
| 55 |
+
{%- endif -%}
|
| 56 |
+
{%- if ns.system_prompt -%}
|
| 57 |
+
{{- "<|im_start|>system\n" + ns.system_prompt + "<|im_end|>\n" -}}
|
| 58 |
+
{%- endif -%}
|
| 59 |
+
{%- for message in messages -%}
|
| 60 |
+
{%- if message.role == "assistant" -%}
|
| 61 |
+
{%- set ns.last_assistant_index = loop.index0 -%}
|
| 62 |
+
{%- endif -%}
|
| 63 |
+
{%- endfor -%}
|
| 64 |
+
{%- for message in messages -%}
|
| 65 |
+
{{- "<|im_start|>" + message.role + "\n" -}}
|
| 66 |
+
{%- if message.role == "assistant" -%}
|
| 67 |
+
{%- generation -%}
|
| 68 |
+
{%- if message.thinking is defined and (keep_past_thinking or loop.index0 == ns.last_assistant_index) -%}
|
| 69 |
+
{{- "<think>" + message.thinking + "</think>" -}}
|
| 70 |
+
{%- endif -%}
|
| 71 |
+
{%- if message.tool_calls is defined -%}
|
| 72 |
+
{{- render_tool_calls(message.tool_calls) -}}
|
| 73 |
+
{%- endif -%}
|
| 74 |
+
{%- if message.content is defined -%}
|
| 75 |
+
{%- set content = parse_content(message.content) -%}
|
| 76 |
+
{%- if not keep_past_thinking and loop.index0 != ns.last_assistant_index -%}
|
| 77 |
+
{%- if "</think>" in content -%}
|
| 78 |
+
{%- set content = content.split("</think>")[-1] | trim -%}
|
| 79 |
+
{%- endif -%}
|
| 80 |
+
{%- endif -%}
|
| 81 |
+
{{- content + ("" if (continue_final_message and loop.last) else "<|im_end|>\n") -}}
|
| 82 |
+
{%- endif -%}
|
| 83 |
+
{%- endgeneration -%}
|
| 84 |
+
{%- else %}
|
| 85 |
+
{%- if message.content is defined -%}
|
| 86 |
+
{{- parse_content(message.content) + "<|im_end|>\n" -}}
|
| 87 |
+
{%- endif -%}
|
| 88 |
+
{%- endif %}
|
| 89 |
+
{%- endfor -%}
|
| 90 |
+
{%- if add_generation_prompt -%}
|
| 91 |
+
{{- "<|im_start|>assistant\n" -}}
|
| 92 |
+
{%- endif -%}
|
config.json
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Lfm2VlForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"do_image_splitting": true,
|
| 6 |
+
"downsample_factor": 2,
|
| 7 |
+
"torch_dtype": "float16",
|
| 8 |
+
"encoder_patch_size": 16,
|
| 9 |
+
"image_token_id": 396,
|
| 10 |
+
"max_image_tokens": 256,
|
| 11 |
+
"max_pixels_tolerance": 2.0,
|
| 12 |
+
"max_tiles": 10,
|
| 13 |
+
"min_image_tokens": 64,
|
| 14 |
+
"min_tiles": 2,
|
| 15 |
+
"model_name": "LiquidAI/LFM2.5-VL-450M",
|
| 16 |
+
"model_type": "lfm2_vl",
|
| 17 |
+
"pad_token_id": 0,
|
| 18 |
+
"projector_bias": true,
|
| 19 |
+
"projector_hidden_act": "gelu",
|
| 20 |
+
"projector_hidden_size": 2048,
|
| 21 |
+
"projector_use_layernorm": false,
|
| 22 |
+
"text_config": {
|
| 23 |
+
"_name_or_path": "LiquidAI/LFM2-350M",
|
| 24 |
+
"architectures": [
|
| 25 |
+
"Lfm2ForCausalLM"
|
| 26 |
+
],
|
| 27 |
+
"block_auto_adjust_ff_dim": true,
|
| 28 |
+
"block_dim": 1024,
|
| 29 |
+
"block_ffn_dim_multiplier": 1.0,
|
| 30 |
+
"block_mlp_init_scale": 1.0,
|
| 31 |
+
"block_multiple_of": 256,
|
| 32 |
+
"block_norm_eps": 1e-05,
|
| 33 |
+
"block_out_init_scale": 1.0,
|
| 34 |
+
"block_use_swiglu": true,
|
| 35 |
+
"block_use_xavier_init": true,
|
| 36 |
+
"bos_token_id": 1,
|
| 37 |
+
"conv_L_cache": 3,
|
| 38 |
+
"conv_bias": false,
|
| 39 |
+
"conv_dim": 1024,
|
| 40 |
+
"conv_dim_out": 1024,
|
| 41 |
+
"conv_use_xavier_init": true,
|
| 42 |
+
"torch_dtype": "float16",
|
| 43 |
+
"eos_token_id": 7,
|
| 44 |
+
"full_attn_idxs": null,
|
| 45 |
+
"hidden_size": 1024,
|
| 46 |
+
"initializer_range": 0.02,
|
| 47 |
+
"intermediate_size": 6656,
|
| 48 |
+
"layer_types": [
|
| 49 |
+
"conv",
|
| 50 |
+
"conv",
|
| 51 |
+
"full_attention",
|
| 52 |
+
"conv",
|
| 53 |
+
"conv",
|
| 54 |
+
"full_attention",
|
| 55 |
+
"conv",
|
| 56 |
+
"conv",
|
| 57 |
+
"full_attention",
|
| 58 |
+
"conv",
|
| 59 |
+
"full_attention",
|
| 60 |
+
"conv",
|
| 61 |
+
"full_attention",
|
| 62 |
+
"conv",
|
| 63 |
+
"full_attention",
|
| 64 |
+
"conv"
|
| 65 |
+
],
|
| 66 |
+
"max_position_embeddings": 128000,
|
| 67 |
+
"model_type": "lfm2",
|
| 68 |
+
"norm_eps": 1e-05,
|
| 69 |
+
"num_attention_heads": 16,
|
| 70 |
+
"num_heads": 16,
|
| 71 |
+
"num_hidden_layers": 16,
|
| 72 |
+
"num_key_value_heads": 8,
|
| 73 |
+
"pad_token_id": 0,
|
| 74 |
+
"rope_parameters": {
|
| 75 |
+
"rope_theta": 1000000.0,
|
| 76 |
+
"rope_type": "default"
|
| 77 |
+
},
|
| 78 |
+
"tie_word_embeddings": true,
|
| 79 |
+
"use_cache": true,
|
| 80 |
+
"use_pos_enc": true,
|
| 81 |
+
"vocab_size": 65536
|
| 82 |
+
},
|
| 83 |
+
"tie_word_embeddings": true,
|
| 84 |
+
"tile_size": 512,
|
| 85 |
+
"unsloth_version": "2026.4.4",
|
| 86 |
+
"use_cache": false,
|
| 87 |
+
"use_image_special_tokens": true,
|
| 88 |
+
"use_thumbnail": true,
|
| 89 |
+
"vision_config": {
|
| 90 |
+
"attention_dropout": 0.0,
|
| 91 |
+
"torch_dtype": "float16",
|
| 92 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 93 |
+
"hidden_size": 768,
|
| 94 |
+
"intermediate_size": 3072,
|
| 95 |
+
"layer_norm_eps": 1e-06,
|
| 96 |
+
"model_type": "siglip2_vision_model",
|
| 97 |
+
"num_attention_heads": 12,
|
| 98 |
+
"num_channels": 3,
|
| 99 |
+
"num_hidden_layers": 12,
|
| 100 |
+
"num_patches": 256,
|
| 101 |
+
"patch_size": 16,
|
| 102 |
+
"vision_use_head": false
|
| 103 |
+
}
|
| 104 |
+
}
|
processor_config.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"image_processor": {
|
| 3 |
+
"data_format": "channels_first",
|
| 4 |
+
"do_image_splitting": true,
|
| 5 |
+
"do_normalize": true,
|
| 6 |
+
"do_pad": true,
|
| 7 |
+
"do_rescale": true,
|
| 8 |
+
"do_resize": true,
|
| 9 |
+
"downsample_factor": 2,
|
| 10 |
+
"encoder_patch_size": 16,
|
| 11 |
+
"image_mean": [
|
| 12 |
+
0.5,
|
| 13 |
+
0.5,
|
| 14 |
+
0.5
|
| 15 |
+
],
|
| 16 |
+
"image_processor_type": "Lfm2VlImageProcessor",
|
| 17 |
+
"image_std": [
|
| 18 |
+
0.5,
|
| 19 |
+
0.5,
|
| 20 |
+
0.5
|
| 21 |
+
],
|
| 22 |
+
"max_image_tokens": 256,
|
| 23 |
+
"max_num_patches": 1024,
|
| 24 |
+
"max_pixels_tolerance": 2.0,
|
| 25 |
+
"max_tiles": 10,
|
| 26 |
+
"min_image_tokens": 64,
|
| 27 |
+
"min_tiles": 2,
|
| 28 |
+
"resample": 2,
|
| 29 |
+
"rescale_factor": 0.00392156862745098,
|
| 30 |
+
"return_row_col_info": true,
|
| 31 |
+
"size": {
|
| 32 |
+
"height": 512,
|
| 33 |
+
"width": 512
|
| 34 |
+
},
|
| 35 |
+
"tile_size": 512,
|
| 36 |
+
"use_thumbnail": true
|
| 37 |
+
},
|
| 38 |
+
"processor_class": "Lfm2VlProcessor"
|
| 39 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"backend": "tokenizers",
|
| 3 |
+
"bos_token": "<|startoftext|>",
|
| 4 |
+
"clean_up_tokenization_spaces": true,
|
| 5 |
+
"eos_token": "<|im_end|>",
|
| 6 |
+
"extra_special_tokens": [],
|
| 7 |
+
"image_end_token": "<|image_end|>",
|
| 8 |
+
"image_start_token": "<|image_start|>",
|
| 9 |
+
"image_thumbnail": "<|img_thumbnail|>",
|
| 10 |
+
"image_token": "<image>",
|
| 11 |
+
"is_local": false,
|
| 12 |
+
"legacy": false,
|
| 13 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 14 |
+
"model_specific_special_tokens": {
|
| 15 |
+
"image_end_token": "<|image_end|>",
|
| 16 |
+
"image_start_token": "<|image_start|>",
|
| 17 |
+
"image_token": "<image>"
|
| 18 |
+
},
|
| 19 |
+
"pad_token": "<|pad|>",
|
| 20 |
+
"padding_side": "left",
|
| 21 |
+
"processor_class": "Lfm2VlProcessor",
|
| 22 |
+
"return_token_type_ids": false,
|
| 23 |
+
"sp_model_kwargs": {},
|
| 24 |
+
"spaces_between_special_tokens": false,
|
| 25 |
+
"tokenizer_class": "TokenizersBackend",
|
| 26 |
+
"use_default_system_prompt": false,
|
| 27 |
+
"use_fast": true,
|
| 28 |
+
"chat_template": "{{- bos_token -}}\n{%- set keep_past_thinking = keep_past_thinking | default(false) -%}\n\n{%- macro format_arg_value(arg_value) -%}\n {%- if arg_value is string -%}\n {{- '\"' + arg_value + '\"' -}}\n {%- elif arg_value is mapping -%}\n {{- arg_value | tojson -}}\n {%- else -%}\n {{- arg_value | string -}}\n {%- endif -%}\n{%- endmacro -%}\n\n{%- macro parse_content(content) -%}\n {%- if content is string -%}\n {{- content -}}\n {%- else -%}\n {%- set _ns = namespace(result=\"\") -%}\n {%- for item in content -%}\n {%- if item.type == \"image\" -%}\n {%- set _ns.result = _ns.result + \"<image>\" -%}\n {%- elif item.type == \"text\" -%}\n {%- set _ns.result = _ns.result + item.text -%}\n {%- else -%}\n {%- set _ns.result = _ns.result + item | tojson -%}\n {%- endif -%}\n {%- endfor -%}\n {{- _ns.result -}}\n {%- endif -%}\n{%- endmacro -%}\n\n{%- macro render_tool_calls(tool_calls) -%}\n {%- set tool_calls_ns = namespace(tool_calls=[]) -%}\n {%- for tool_call in tool_calls -%}\n {%- set func_name = tool_call.function.name -%}\n {%- set func_args = tool_call.function.arguments -%}\n {%- set args_ns = namespace(arg_strings=[]) -%}\n {%- for arg_name, arg_value in func_args.items() -%}\n {%- set args_ns.arg_strings = args_ns.arg_strings + [arg_name + \"=\" + format_arg_value(arg_value)] -%}\n {%- endfor -%}\n {%- set tool_calls_ns.tool_calls = tool_calls_ns.tool_calls + [func_name + \"(\" + (args_ns.arg_strings | join(\", \")) + \")\"] -%}\n {%- endfor -%}\n {{- \"<|tool_call_start|>[\" + (tool_calls_ns.tool_calls | join(\", \")) + \"]<|tool_call_end|>\" -}}\n{%- endmacro -%}\n\n{%- set ns = namespace(system_prompt=\"\", last_assistant_index=-1) -%}\n{%- if messages[0].role == \"system\" -%}\n {%- if messages[0].content is defined -%}\n {%- set ns.system_prompt = parse_content(messages[0].content) -%}\n {%- endif -%}\n {%- set messages = messages[1:] -%}\n{%- endif -%}\n{%- if tools -%}\n {%- set ns.system_prompt = ns.system_prompt + (\"\\n\\n\" if ns.system_prompt else \"\") + \"Today's date: \" + strftime_now(\"%Y-%m-%d\") + \"\\n\\nList of tools: \" + (tools | tojson) -%}\n{%- endif -%}\n{%- if ns.system_prompt -%}\n {{- \"<|im_start|>system\\n\" + ns.system_prompt + \"<|im_end|>\\n\" -}}\n{%- endif -%}\n{%- for message in messages -%}\n {%- if message.role == \"assistant\" -%}\n {%- set ns.last_assistant_index = loop.index0 -%}\n {%- endif -%}\n{%- endfor -%}\n{%- for message in messages -%}\n {{- \"<|im_start|>\" + message.role + \"\\n\" -}}\n {%- if message.role == \"assistant\" -%}\n {%- generation -%}\n {%- if message.thinking is defined and (keep_past_thinking or loop.index0 == ns.last_assistant_index) -%}\n {{- \"<think>\" + message.thinking + \"</think>\" -}}\n {%- endif -%}\n {%- if message.tool_calls is defined -%}\n {{- render_tool_calls(message.tool_calls) -}}\n {%- endif -%}\n {%- if message.content is defined -%}\n {%- set content = parse_content(message.content) -%}\n {%- if not keep_past_thinking and loop.index0 != ns.last_assistant_index -%}\n {%- if \"</think>\" in content -%}\n {%- set content = content.split(\"</think>\")[-1] | trim -%}\n {%- endif -%}\n {%- endif -%}\n {{- content + (\"\" if (continue_final_message and loop.last) else \"<|im_end|>\\n\") -}}\n {%- endif -%}\n {%- endgeneration -%}\n {%- else %}\n {%- if message.content is defined -%}\n {{- parse_content(message.content) + \"<|im_end|>\\n\" -}}\n {%- endif -%}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{- \"<|im_start|>assistant\\n\" -}}\n{%- endif -%}"
|
| 29 |
+
}
|