Upload folder using huggingface_hub
Browse files- .gitattributes +9 -0
- onnx/qwen2_5_vl_3b_instruct_ort/chat_template.jinja +7 -0
- onnx/qwen2_5_vl_3b_instruct_ort/config.json +131 -0
- onnx/qwen2_5_vl_3b_instruct_ort/generation_config.json +13 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_1 +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_2 +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_3 +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_4 +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_5 +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/embed_tokens.onnx +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/embed_tokens.onnx_data +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/vision_encoder.onnx +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/vision_encoder.onnx_data +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/onnx/vision_encoder.onnx_data_1 +3 -0
- onnx/qwen2_5_vl_3b_instruct_ort/preprocessor_config.json +19 -0
- onnx/qwen2_5_vl_3b_instruct_ort/processor_config.json +63 -0
- onnx/qwen2_5_vl_3b_instruct_ort/tokenizer.json +0 -0
- onnx/qwen2_5_vl_3b_instruct_ort/tokenizer_config.json +31 -0
.gitattributes
CHANGED
|
@@ -71,3 +71,12 @@ llm/GGUF/unsloth/Qwen3.5-27B-GGUF/Qwen3.5-27B-UD-Q4_K_XL.gguf filter=lfs diff=lf
|
|
| 71 |
llm/GGUF/unsloth/Qwen3.5-27B-GGUF/mmproj-BF16.gguf filter=lfs diff=lfs merge=lfs -text
|
| 72 |
llm/GGUF/unsloth/Qwen3.5-4B-GGUF/Qwen3.5-4B-UD-Q4_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
|
| 73 |
llm/GGUF/unsloth/Qwen3.5-4B-GGUF/mmproj-BF16.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
llm/GGUF/unsloth/Qwen3.5-27B-GGUF/mmproj-BF16.gguf filter=lfs diff=lfs merge=lfs -text
|
| 72 |
llm/GGUF/unsloth/Qwen3.5-4B-GGUF/Qwen3.5-4B-UD-Q4_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
|
| 73 |
llm/GGUF/unsloth/Qwen3.5-4B-GGUF/mmproj-BF16.gguf filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_1 filter=lfs diff=lfs merge=lfs -text
|
| 76 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_2 filter=lfs diff=lfs merge=lfs -text
|
| 77 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_3 filter=lfs diff=lfs merge=lfs -text
|
| 78 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_4 filter=lfs diff=lfs merge=lfs -text
|
| 79 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_5 filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/embed_tokens.onnx_data filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/vision_encoder.onnx_data filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
onnx/qwen2_5_vl_3b_instruct_ort/onnx/vision_encoder.onnx_data_1 filter=lfs diff=lfs merge=lfs -text
|
onnx/qwen2_5_vl_3b_instruct_ort/chat_template.jinja
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
|
| 2 |
+
You are a helpful assistant.<|im_end|>
|
| 3 |
+
{% endif %}<|im_start|>{{ message['role'] }}
|
| 4 |
+
{% if message['content'] is string %}{{ message['content'] }}<|im_end|>
|
| 5 |
+
{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
|
| 6 |
+
{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
|
| 7 |
+
{% endif %}
|
onnx/qwen2_5_vl_3b_instruct_ort/config.json
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen2_5_VLForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"dtype": "bfloat16",
|
| 6 |
+
"image_token_id": 151655,
|
| 7 |
+
"model_type": "qwen2_5_vl",
|
| 8 |
+
"text_config": {
|
| 9 |
+
"attention_dropout": 0.0,
|
| 10 |
+
"bos_token_id": 151643,
|
| 11 |
+
"dtype": "bfloat16",
|
| 12 |
+
"eos_token_id": 151645,
|
| 13 |
+
"hidden_act": "silu",
|
| 14 |
+
"hidden_size": 2048,
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"intermediate_size": 11008,
|
| 17 |
+
"layer_types": [
|
| 18 |
+
"full_attention",
|
| 19 |
+
"full_attention",
|
| 20 |
+
"full_attention",
|
| 21 |
+
"full_attention",
|
| 22 |
+
"full_attention",
|
| 23 |
+
"full_attention",
|
| 24 |
+
"full_attention",
|
| 25 |
+
"full_attention",
|
| 26 |
+
"full_attention",
|
| 27 |
+
"full_attention",
|
| 28 |
+
"full_attention",
|
| 29 |
+
"full_attention",
|
| 30 |
+
"full_attention",
|
| 31 |
+
"full_attention",
|
| 32 |
+
"full_attention",
|
| 33 |
+
"full_attention",
|
| 34 |
+
"full_attention",
|
| 35 |
+
"full_attention",
|
| 36 |
+
"full_attention",
|
| 37 |
+
"full_attention",
|
| 38 |
+
"full_attention",
|
| 39 |
+
"full_attention",
|
| 40 |
+
"full_attention",
|
| 41 |
+
"full_attention",
|
| 42 |
+
"full_attention",
|
| 43 |
+
"full_attention",
|
| 44 |
+
"full_attention",
|
| 45 |
+
"full_attention",
|
| 46 |
+
"full_attention",
|
| 47 |
+
"full_attention",
|
| 48 |
+
"full_attention",
|
| 49 |
+
"full_attention",
|
| 50 |
+
"full_attention",
|
| 51 |
+
"full_attention",
|
| 52 |
+
"full_attention",
|
| 53 |
+
"full_attention"
|
| 54 |
+
],
|
| 55 |
+
"max_position_embeddings": 128000,
|
| 56 |
+
"max_window_layers": 70,
|
| 57 |
+
"model_type": "qwen2_5_vl_text",
|
| 58 |
+
"num_attention_heads": 16,
|
| 59 |
+
"num_hidden_layers": 36,
|
| 60 |
+
"num_key_value_heads": 2,
|
| 61 |
+
"pad_token_id": null,
|
| 62 |
+
"rms_norm_eps": 1e-06,
|
| 63 |
+
"rope_parameters": {
|
| 64 |
+
"mrope_section": [
|
| 65 |
+
16,
|
| 66 |
+
24,
|
| 67 |
+
24
|
| 68 |
+
],
|
| 69 |
+
"rope_theta": 1000000.0,
|
| 70 |
+
"rope_type": "default",
|
| 71 |
+
"type": "mrope"
|
| 72 |
+
},
|
| 73 |
+
"sliding_window": null,
|
| 74 |
+
"use_cache": true,
|
| 75 |
+
"use_sliding_window": false,
|
| 76 |
+
"vocab_size": 151936
|
| 77 |
+
},
|
| 78 |
+
"tie_word_embeddings": true,
|
| 79 |
+
"transformers_version": "5.3.0.dev0",
|
| 80 |
+
"video_token_id": 151656,
|
| 81 |
+
"vision_config": {
|
| 82 |
+
"depth": 32,
|
| 83 |
+
"fullatt_block_indexes": [
|
| 84 |
+
7,
|
| 85 |
+
15,
|
| 86 |
+
23,
|
| 87 |
+
31
|
| 88 |
+
],
|
| 89 |
+
"hidden_act": "silu",
|
| 90 |
+
"hidden_size": 1280,
|
| 91 |
+
"in_channels": 3,
|
| 92 |
+
"in_chans": 3,
|
| 93 |
+
"initializer_range": 0.02,
|
| 94 |
+
"intermediate_size": 3420,
|
| 95 |
+
"model_type": "qwen2_5_vl",
|
| 96 |
+
"num_heads": 16,
|
| 97 |
+
"out_hidden_size": 2048,
|
| 98 |
+
"patch_size": 14,
|
| 99 |
+
"spatial_merge_size": 2,
|
| 100 |
+
"spatial_patch_size": 14,
|
| 101 |
+
"temporal_patch_size": 2,
|
| 102 |
+
"tokens_per_second": 2,
|
| 103 |
+
"window_size": 112
|
| 104 |
+
},
|
| 105 |
+
"vision_end_token_id": 151653,
|
| 106 |
+
"vision_start_token_id": 151652,
|
| 107 |
+
"vision_token_id": 151654,
|
| 108 |
+
"transformers.js_config": {
|
| 109 |
+
"use_external_data_format": {
|
| 110 |
+
"vision_encoder.onnx": 2,
|
| 111 |
+
"vision_encoder_fp16.onnx": 1,
|
| 112 |
+
"vision_encoder_q4.onnx": 1,
|
| 113 |
+
"vision_encoder_quantized.onnx": 1,
|
| 114 |
+
"vision_encoder_q4f16.onnx": 1,
|
| 115 |
+
"decoder_model_merged.onnx": 6,
|
| 116 |
+
"decoder_model_merged_fp16.onnx": 3,
|
| 117 |
+
"decoder_model_merged_quantized.onnx": 2,
|
| 118 |
+
"decoder_model_merged_q4.onnx": 1,
|
| 119 |
+
"decoder_model_merged_q4f16.onnx": 1,
|
| 120 |
+
"embed_tokens.onnx": 1,
|
| 121 |
+
"embed_tokens_fp16.onnx": 1,
|
| 122 |
+
"embed_tokens_quantized.onnx": 1,
|
| 123 |
+
"embed_tokens_q4.onnx": 1,
|
| 124 |
+
"embed_tokens_q4f16.onnx": 1
|
| 125 |
+
},
|
| 126 |
+
"kv_cache_dtype": {
|
| 127 |
+
"q4f16": "float16",
|
| 128 |
+
"fp16": "float16"
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
}
|
onnx/qwen2_5_vl_3b_instruct_ort/generation_config.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151645,
|
| 6 |
+
151643
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 151643,
|
| 9 |
+
"repetition_penalty": 1.05,
|
| 10 |
+
"temperature": 1e-06,
|
| 11 |
+
"transformers_version": "5.3.0.dev0",
|
| 12 |
+
"trust_remote_code": false
|
| 13 |
+
}
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4f2d238cb9f31f358466bb1a76983e522789b8ed4df76a3906d595860f4911f
|
| 3 |
+
size 566476
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dcf5727c71d6808633b16e2cf0c37560f4091200558f2f6eafd8798a8bae1b64
|
| 3 |
+
size 2067978240
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1277ffdea9d43ec7b4debd5b1b7fcc5c665c5b65b09badf0474d7426d062962e
|
| 3 |
+
size 2067978240
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9efc4850ce1e6cf54b449b427ae19246c484faa7ae83578592623256bd71ac3e
|
| 3 |
+
size 2067978240
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abfc69df7d4b3a2f63fb778d3062df95562bbeb32925c3da1be47c61e5bbf8ff
|
| 3 |
+
size 2030202880
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:492b876f351e3e4ba9d987b4593e7b3e3805c923437ca51b7056f9ddb38729ce
|
| 3 |
+
size 2067978240
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/decoder_model_merged.onnx_data_5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33a48588642b388e61f782aa88d7f76af54705150eccfad8cdab4f61c4fc378f
|
| 3 |
+
size 2041638912
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/embed_tokens.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:117668cd2f53666060b6b2a6160e10d1a8fcbbc8b8e751b1e82a0ce32f6c1925
|
| 3 |
+
size 435
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/embed_tokens.onnx_data
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df608b38b5f74245af866d89d946866029c818eeb5808fe05a9b0bfbcad0db77
|
| 3 |
+
size 1244659712
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/vision_encoder.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1f5b88099db0f2cba2aa0d9edd50c96a028d4db1a21fbbce14c8d6f880ec211
|
| 3 |
+
size 421329
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/vision_encoder.onnx_data
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1a4d4587cf072871157ca82c5ca95248781ecc33077458eda73a51eb37418b0b
|
| 3 |
+
size 2081295040
|
onnx/qwen2_5_vl_3b_instruct_ort/onnx/vision_encoder.onnx_data_1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8436636c8b42d164a9aacd57c2cdfbea01dc90ca7e3b15d8528414695db55a14
|
| 3 |
+
size 593442112
|
onnx/qwen2_5_vl_3b_instruct_ort/preprocessor_config.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"min_pixels": 3136,
|
| 3 |
+
"max_pixels": 12845056,
|
| 4 |
+
"patch_size": 14,
|
| 5 |
+
"temporal_patch_size": 2,
|
| 6 |
+
"merge_size": 2,
|
| 7 |
+
"image_mean": [
|
| 8 |
+
0.48145466,
|
| 9 |
+
0.4578275,
|
| 10 |
+
0.40821073
|
| 11 |
+
],
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.26862954,
|
| 14 |
+
0.26130258,
|
| 15 |
+
0.27577711
|
| 16 |
+
],
|
| 17 |
+
"image_processor_type": "Qwen2VLImageProcessor",
|
| 18 |
+
"processor_class": "Qwen2_5_VLProcessor"
|
| 19 |
+
}
|
onnx/qwen2_5_vl_3b_instruct_ort/processor_config.json
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"image_processor": {
|
| 3 |
+
"data_format": "channels_first",
|
| 4 |
+
"do_convert_rgb": true,
|
| 5 |
+
"do_normalize": true,
|
| 6 |
+
"do_rescale": true,
|
| 7 |
+
"do_resize": true,
|
| 8 |
+
"image_mean": [
|
| 9 |
+
0.48145466,
|
| 10 |
+
0.4578275,
|
| 11 |
+
0.40821073
|
| 12 |
+
],
|
| 13 |
+
"image_processor_type": "Qwen2VLImageProcessorFast",
|
| 14 |
+
"image_std": [
|
| 15 |
+
0.26862954,
|
| 16 |
+
0.26130258,
|
| 17 |
+
0.27577711
|
| 18 |
+
],
|
| 19 |
+
"merge_size": 2,
|
| 20 |
+
"patch_size": 14,
|
| 21 |
+
"resample": 3,
|
| 22 |
+
"rescale_factor": 0.00392156862745098,
|
| 23 |
+
"size": {
|
| 24 |
+
"longest_edge": 12845056,
|
| 25 |
+
"shortest_edge": 3136
|
| 26 |
+
},
|
| 27 |
+
"temporal_patch_size": 2
|
| 28 |
+
},
|
| 29 |
+
"processor_class": "Qwen2_5_VLProcessor",
|
| 30 |
+
"video_processor": {
|
| 31 |
+
"data_format": "channels_first",
|
| 32 |
+
"default_to_square": true,
|
| 33 |
+
"do_convert_rgb": true,
|
| 34 |
+
"do_normalize": true,
|
| 35 |
+
"do_rescale": true,
|
| 36 |
+
"do_resize": true,
|
| 37 |
+
"do_sample_frames": false,
|
| 38 |
+
"image_mean": [
|
| 39 |
+
0.48145466,
|
| 40 |
+
0.4578275,
|
| 41 |
+
0.40821073
|
| 42 |
+
],
|
| 43 |
+
"image_processor_type": "Qwen2VLImageProcessor",
|
| 44 |
+
"image_std": [
|
| 45 |
+
0.26862954,
|
| 46 |
+
0.26130258,
|
| 47 |
+
0.27577711
|
| 48 |
+
],
|
| 49 |
+
"max_frames": 768,
|
| 50 |
+
"merge_size": 2,
|
| 51 |
+
"min_frames": 4,
|
| 52 |
+
"patch_size": 14,
|
| 53 |
+
"resample": 3,
|
| 54 |
+
"rescale_factor": 0.00392156862745098,
|
| 55 |
+
"return_metadata": false,
|
| 56 |
+
"size": {
|
| 57 |
+
"longest_edge": 12845056,
|
| 58 |
+
"shortest_edge": 3136
|
| 59 |
+
},
|
| 60 |
+
"temporal_patch_size": 2,
|
| 61 |
+
"video_processor_type": "Qwen2VLVideoProcessor"
|
| 62 |
+
}
|
| 63 |
+
}
|
onnx/qwen2_5_vl_3b_instruct_ort/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
onnx/qwen2_5_vl_3b_instruct_ort/tokenizer_config.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"backend": "tokenizers",
|
| 4 |
+
"bos_token": null,
|
| 5 |
+
"clean_up_tokenization_spaces": false,
|
| 6 |
+
"eos_token": "<|im_end|>",
|
| 7 |
+
"errors": "replace",
|
| 8 |
+
"extra_special_tokens": {
|
| 9 |
+
"extra_token_0": "<|im_start|>",
|
| 10 |
+
"extra_token_1": "<|im_end|>",
|
| 11 |
+
"extra_token_2": "<|object_ref_start|>",
|
| 12 |
+
"extra_token_3": "<|object_ref_end|>",
|
| 13 |
+
"extra_token_4": "<|box_start|>",
|
| 14 |
+
"extra_token_5": "<|box_end|>",
|
| 15 |
+
"extra_token_6": "<|quad_start|>",
|
| 16 |
+
"extra_token_7": "<|quad_end|>",
|
| 17 |
+
"extra_token_8": "<|vision_start|>",
|
| 18 |
+
"extra_token_9": "<|vision_end|>",
|
| 19 |
+
"extra_token_10": "<|vision_pad|>",
|
| 20 |
+
"extra_token_11": "<|image_pad|>",
|
| 21 |
+
"extra_token_12": "<|video_pad|>"
|
| 22 |
+
},
|
| 23 |
+
"is_local": false,
|
| 24 |
+
"model_max_length": 131072,
|
| 25 |
+
"pad_token": "<|endoftext|>",
|
| 26 |
+
"processor_class": "Qwen2_5_VLProcessor",
|
| 27 |
+
"split_special_tokens": false,
|
| 28 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 29 |
+
"unk_token": null,
|
| 30 |
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
|
| 31 |
+
}
|