niobures commited on
Commit
f697d36
·
verified ·
1 Parent(s): b5827c5

Qwen2-VL-2B-Instruct-ONNX-Q4-F16

Browse files
Files changed (29) hide show
  1. .gitattributes +2 -0
  2. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/.gitattributes +38 -0
  3. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/.gitignore +2 -0
  4. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/EXPORT.md +57 -0
  5. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/Makefile +164 -0
  6. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/README.md +146 -0
  7. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/config.json +52 -0
  8. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/embeddings_bf16.bin +3 -0
  9. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/generation_config.json +15 -0
  10. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/infer.py +120 -0
  11. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/llm_config.json +28 -0
  12. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/merges.txt +0 -0
  13. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_A.onnx +3 -0
  14. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_A.onnx.data +3 -0
  15. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_A_q4f16.onnx +3 -0
  16. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_B.onnx +3 -0
  17. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_B_q4f16.onnx +3 -0
  18. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_C.onnx +3 -0
  19. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_C_q4f16.onnx +3 -0
  20. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_D.onnx +3 -0
  21. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_D_q4f16.onnx +3 -0
  22. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_E.onnx +3 -0
  23. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_E.onnx.data +3 -0
  24. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_E_q4f16.onnx +3 -0
  25. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/source.txt +1 -0
  26. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/tokenizer.json +0 -0
  27. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/tokenizer.txt +0 -0
  28. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/tokenizer_config.json +129 -0
  29. models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/vocab.json +0 -0
.gitattributes CHANGED
@@ -47,3 +47,5 @@ models/Qwen2-VL-2B-Instruct/onnx/vision_encoder.onnx_data filter=lfs diff=lfs me
47
  models/Qwen2-VL-2B-Instruct/tokenizer.json filter=lfs diff=lfs merge=lfs -text
48
  models/Qwen2-VL-2B-Instruct/onnx/vision_encoder_bnb4.onnx_data filter=lfs diff=lfs merge=lfs -text
49
  models/Qwen2-VL-2B-Instruct/onnx/vision_encoder_q4.onnx_data filter=lfs diff=lfs merge=lfs -text
 
 
 
47
  models/Qwen2-VL-2B-Instruct/tokenizer.json filter=lfs diff=lfs merge=lfs -text
48
  models/Qwen2-VL-2B-Instruct/onnx/vision_encoder_bnb4.onnx_data filter=lfs diff=lfs merge=lfs -text
49
  models/Qwen2-VL-2B-Instruct/onnx/vision_encoder_q4.onnx_data filter=lfs diff=lfs merge=lfs -text
50
+ models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_A.onnx.data filter=lfs diff=lfs merge=lfs -text
51
+ models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_E.onnx.data filter=lfs diff=lfs merge=lfs -text
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.onnx.data filter=lfs diff=lfs merge=lfs -text
37
+ onnx/**/* filter=lfs diff=lfs merge=lfs -text
38
+ **/.git* filter= diff= merge= text
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .DS_STORE
2
+ /onnx-dest
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/EXPORT.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Export
2
+
3
+ The original model was exported using the following process:
4
+
5
+ The following repos were used:
6
+ * https://github.com/pdufour/Native-LLM-for-Android
7
+ * https://github.com/pdufour/transformers.js/tree/add-block-list
8
+
9
+ If you close this repo and the above 2 to the same directory you can run the following commands:
10
+
11
+ **From `Qwen2-VL-2B-Instruct-ONNX-Q4-F16`, run:**
12
+
13
+ `make all-in-one`
14
+
15
+ This will create an export of the onnx models.
16
+
17
+ **The following is a list of all commands available:**
18
+
19
+ **all-in-one**
20
+
21
+ Runs all steps (exporting, slimming, quantizing, cleaning, fixing GPU buffers) to produce fully prepared ONNX models.
22
+
23
+ **export**
24
+
25
+ Combines export-abcd and export-e to generate ONNX models for all parts.
26
+
27
+ **export-abcd**
28
+
29
+ Exports model parts A, B, C, and D by running QwenVL_Export_ABCD.py.
30
+
31
+ **export-e**
32
+
33
+ Exports model part E by running QwenVL_Export_E.py.
34
+
35
+ **slim**
36
+
37
+ Reduces ONNX model size by removing unnecessary elements for optimized deployment.
38
+
39
+ **quantize**
40
+
41
+ Quantizes all model parts (A, B, C, D, and E) to optimize size and performance.
42
+
43
+ **quantize-%**
44
+
45
+ Quantizes a specific model part (% can be A, B, C, D, or E) with targeted configurations.
46
+
47
+ **clean-large-files**
48
+
49
+ Deletes ONNX files larger than 2GB from the destination directory to retain models that will work for onnx environments.
50
+
51
+ **fix-gpu-buffers**
52
+
53
+ Applies fixes to GPU buffers in ONNX files for part E to ensure GPU memory compatibility.
54
+
55
+ **all**
56
+
57
+ Alias for all-in-one to run the full ONNX model preparation pipeline.
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/Makefile ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SHELL := /bin/bash
2
+
3
+ # Configuration variables
4
+ NATIVE_ANDROID = $(abspath ../Native-LLM-for-Android)
5
+ QWEN_VL_DIR = $(NATIVE_ANDROID)/Export_ONNX/QwenVL
6
+ ONNX_SRC_DIR = $(QWEN_VL_DIR)/onnx
7
+ ONNX_DEST_DIR = $(QWEN_VL_DIR)/onnx-dist
8
+ STAGING_DIR = /tmp/transformers.js/staging
9
+ TRANSFORMERS_JS_PATH = ../transformers.js
10
+ ONNX_TOOLS_PATH = $(NATIVE_ANDROID)/ONNX_Tools
11
+
12
+ # Python paths from venvs
13
+ NATIVE_PYTHON = $(NATIVE_ANDROID)/.venv/bin/python3
14
+ TRANSFORMERS_PYTHON = $(TRANSFORMERS_JS_PATH)/.venv/bin/python3
15
+
16
+ # Model parts
17
+ PARTS = A B C D E
18
+
19
+ define progress_bar
20
+ printf "\r Progress: \033[1;32m["; \
21
+ _done=$$(($1 * 20 / $2)); \
22
+ for ((i=0; i<_done; i++)); do printf "="; done; \
23
+ printf "\033[0m"; \
24
+ _left=$$((20 - _done)); \
25
+ for ((i=0; i<_left; i++)); do printf " "; done; \
26
+ printf "\033[1;32m]\033[0m $1/$2 Processing: \033[1;34m%s\033[K\033[0m\r" "$3"
27
+ endef
28
+
29
+ # See https://github.com/pytorch/pytorch/issues/94280#issuecomment-2089196400
30
+ # Original export scripts export a bunch of tensor files, so we merge into one / two files instead.
31
+ export-merged-source-models: export-merged-source-models-first-pass export-merged-source-models-second-pass
32
+ @echo "✅ Exporting merged source models complete"
33
+
34
+ export-merged-source-models-first-pass:
35
+ @echo "💾 First pass: Export all models with merged tensors..."
36
+ @mkdir -p $(ONNX_DEST_DIR)
37
+ @files=`find $(ONNX_SRC_DIR) -name "*.onnx"`; \
38
+ total=`echo "$$files" | wc -w | tr -d ' '`; \
39
+ echo "Files found (first pass): $$total"; \
40
+ current=0; \
41
+ for item in $$files; do \
42
+ current=$$((current + 1)); \
43
+ $(call progress_bar,$$current,$$total,$$item); \
44
+ $(NATIVE_PYTHON) -u -c "import onnx, os, sys; src='$$item'; dest_dir='$(ONNX_DEST_DIR)'; \
45
+ m = onnx.load(src); \
46
+ d = os.path.join(dest_dir, os.path.basename(src)); \
47
+ onnx.save_model(m, d, all_tensors_to_one_file=True, save_as_external_data=True, location=os.path.basename(d)+'.data')" || exit 1; \
48
+ done; \
49
+ echo "✅ Done first pass"
50
+
51
+ export-merged-source-models-second-pass:
52
+ @echo "💾 Second pass: Converting large models to external data format..."
53
+ @files=`find $(ONNX_DEST_DIR) -name "*.onnx"`; \
54
+ total=`echo "$$files" | wc -w | tr -d ' '`; \
55
+ echo "Files found (second pass): $$total"; \
56
+ current=0; \
57
+ for item in $$files; do \
58
+ current=$$((current + 1)); \
59
+ $(call progress_bar,$$current,$$total,$$item); \
60
+ $(NATIVE_PYTHON) -c 'import onnx, os, sys; \
61
+ src = """'"$$item"'"""; \
62
+ total_size = os.path.getsize(src); \
63
+ d = os.path.join(dest_dir, os.path.basename(src)); \
64
+ total_size += os.path.getsize(src + ".data") if os.path.exists(src + ".data") else 0; \
65
+ needs_external = total_size > 2e9; \
66
+ onnx.save_model( \
67
+ onnx.load(src), \
68
+ d, \
69
+ save_as_external_data=needs_external, \
70
+ all_tensors_to_one_file=True, \
71
+ location=(os.path.basename(src) + ".data") if needs_external else None \
72
+ ); \
73
+ not needs_external and os.path.exists(src + ".data") and os.remove(src + ".data") \
74
+ ' || exit 1; \
75
+ done; \
76
+ echo "✅ Done second models"
77
+
78
+
79
+ all-in-one: export quantize clean-large-files fix-gpu-buffers export-merged-source-models
80
+ @echo "✨ All done! ONNX models exported, slimmed, quantized and fixed"
81
+
82
+ export: export-abcd export-e
83
+ @echo "✅ Export complete"
84
+
85
+ export-abcd:
86
+ @echo "🚀 Exporting parts A, B, C, D..."
87
+ cd ../Native-LLM-for-Android/Export_ONNX/QwenVL && \
88
+ $(NATIVE_PYTHON) QwenVL_Export_ABCD.py "Qwen/Qwen2-VL-2B-Instruct"
89
+
90
+ export-e:
91
+ @echo "🚀 Exporting part E..."
92
+ cd ../Native-LLM-for-Android/Export_ONNX/QwenVL && \
93
+ $(NATIVE_PYTHON) QwenVL_Export_E.py "Qwen/Qwen2-VL-2B-Instruct"
94
+
95
+ slim:
96
+ @echo "🗜️ Slimming ONNX models..."
97
+ @files=`find $(ONNX_SRC_DIR) -name "*.onnx" -type f ! -name "QwenVL_E.onnx"`; \
98
+ total=`echo "$$files" | wc -w | tr -d ' '`; \
99
+ echo "Files found: $$total"; \
100
+ current=0; \
101
+ for item in $$files; do \
102
+ current=$$((current + 1)); \
103
+ $(call progress_bar,$$current,$$total,$$item); \
104
+ onnxslim --verbose "$$item" "$$item" || exit 1; \
105
+ done; \
106
+ echo "✅ Slimming complete"
107
+
108
+ quantize:
109
+ @echo "⚡ Starting quantization..."
110
+ for part in $(PARTS); do \
111
+ $(MAKE) quantize-$$part || exit 1; \
112
+ done
113
+ @echo "✅ Quantization complete"
114
+
115
+ quantize-%:
116
+ @echo "⚡ Quantizing part $*..."
117
+ mkdir -p $(ONNX_DEST_DIR)
118
+ cd $(TRANSFORMERS_JS_PATH) && \
119
+ mkdir -p $(STAGING_DIR) && \
120
+ rm -f $(STAGING_DIR)/* && \
121
+ ln -sf $$(realpath $(ONNX_SRC_DIR))/* $(STAGING_DIR)/ && \
122
+ find $(STAGING_DIR) -name "*_*_*.onnx_data" -delete && \
123
+ find $(STAGING_DIR) -name "*_*_*.onnx" -delete && \
124
+ find $(STAGING_DIR) -name "*.onnx" ! -name "QwenVL_$**.onnx" -delete && \
125
+ EXTRA_FLAGS=""; \
126
+ if [ "$*" = "A" ]; then EXTRA_FLAGS="--op_block_list Conv DynamicQuantizeLinear DequantizeLinear Resize"; fi; \
127
+ echo "Extra Flags for part $*: $$EXTRA_FLAGS" && \
128
+ PYTHONPATH=$(TRANSFORMERS_JS_PATH) .venv/bin/python3 -m scripts.quantize \
129
+ --input_folder '$(STAGING_DIR)' \
130
+ --output_folder '$(ONNX_DEST_DIR)' \
131
+ --mode q4f16 $$EXTRA_FLAGS
132
+
133
+ clean-large-files:
134
+ @echo "🧹 Removing ONNX files over 2GB..."
135
+ cd $(ONNX_DEST_DIR) && \
136
+ for f in $$(find . -name "*.onnx" -type f); do \
137
+ total_size=0; \
138
+ if [ -f "$$f"".data" ]; then \
139
+ total_size=$$(( $$(stat -f %z "$$f") + $$(stat -f %z "$$f"".data") )); \
140
+ elif [ -f "$$f""_data" ]; then \
141
+ total_size=$$(( $$(stat -f %z "$$f") + $$(stat -f %z "$$f""_data") )); \
142
+ else \
143
+ total_size=$$(stat -f %z "$$f"); \
144
+ fi; \
145
+ size_mb=$$(( total_size / 1048576 )); \
146
+ if [ $$total_size -ge 2147483648 ]; then \
147
+ echo " Removing $$f (size: $$size_mb MB)..."; \
148
+ rm -f "$$f" "$$f"".data" "$$f""_data"; \
149
+ fi \
150
+ done
151
+ @echo "✅ Large file cleanup complete"
152
+
153
+ fix-gpu-buffers:
154
+ @echo "🔧 Fixing GPU buffers for E models..."
155
+ @files=`find $(ONNX_DEST_DIR) -name "QwenVL_E_*.onnx" -type f`; \
156
+ total=`echo "$$files" | wc -w | tr -d ' '`; \
157
+ echo "Files found: $$total"; \
158
+ current=0; \
159
+ for item in $$files; do \
160
+ current=$$((current + 1)); \
161
+ $(call progress_bar,$$current,$$total,$$item); \
162
+ cd $(NATIVE_ANDROID) && .venv/bin/python3 ONNX_Tools/clamp_for_gpu_buffers.py --overwrite "$$item" || exit 1; \
163
+ done; \
164
+ echo "✅ GPU buffer fixes complete"
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/README.md ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model:
4
+ - Qwen/Qwen2-VL-2B-Instruct
5
+ ---
6
+ # Requirements
7
+ This is compatible with any onnx runtime.
8
+
9
+ # Running this model
10
+
11
+ **Javascript**
12
+
13
+ See https://huggingface.co/spaces/pdufour/Qwen2-VL-2B-Instruct-ONNX-Q4-F16 for a demo.
14
+
15
+
16
+ **Python**
17
+
18
+ Download the following script ./infer.py and then run like so:
19
+ python3 infer.py Qwen/Qwen2-VL-2B-Instruct 'path-to/Qwen2-VL-2B-Instruct-onnx/onnx'
20
+
21
+ ```
22
+ import os
23
+ import sys
24
+ import time
25
+ import torch
26
+ import numpy as np
27
+ import requests
28
+ import onnxruntime as ort
29
+ from PIL import Image
30
+ from io import BytesIO
31
+ from transformers import Qwen2VLConfig, AutoTokenizer
32
+
33
+ # Command line arguments
34
+ model_path = sys.argv[1]
35
+ onnx_path = sys.argv[2]
36
+
37
+ # Initialize model config and tokenizer
38
+ model_config = Qwen2VLConfig.from_pretrained(model_path)
39
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
40
+
41
+ # Model configuration
42
+ max_length = 1024
43
+ num_attention_heads = model_config.num_attention_heads
44
+ num_key_value_heads = model_config.num_key_value_heads
45
+ head_dim = model_config.hidden_size // num_attention_heads
46
+ num_layers = model_config.num_hidden_layers
47
+
48
+ # Setup ONNX sessions
49
+ session_options = ort.SessionOptions()
50
+ session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
51
+
52
+ # Model paths and sessions
53
+ models = ['A', 'B', 'C', 'D', 'E']
54
+ model_paths = {m: os.path.join(onnx_path, f'QwenVL_{m}_q4f16.onnx') for m in models}
55
+ sessions = {m: ort.InferenceSession(path, sess_options=session_options) for m, path in model_paths.items()}
56
+
57
+ # Input/output names
58
+ inputs = {
59
+ 'A': sessions['A'].get_inputs()[0].name,
60
+ 'B': [sessions['B'].get_inputs()[i].name for i in range(2)],
61
+ 'C': sessions['C'].get_inputs()[0].name,
62
+ 'D': [inp.name for inp in sessions['D'].get_inputs()],
63
+ 'E': [inp.name for inp in sessions['E'].get_inputs()]
64
+ }
65
+
66
+ outputs = {
67
+ 'A': sessions['A'].get_outputs()[0].name,
68
+ 'B': sessions['B'].get_outputs()[0].name,
69
+ 'C': sessions['C'].get_outputs()[0].name,
70
+ 'D': [out.name for out in sessions['D'].get_outputs()],
71
+ 'E': [out.name for out in sessions['E'].get_outputs()]
72
+ }
73
+
74
+ # Process image
75
+ image_url = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg'
76
+ image = Image.open(BytesIO(requests.get(image_url).content)).resize((960, 960)).convert('RGB')
77
+ image_array = np.expand_dims(np.transpose(np.array(image).astype(np.float32), (2, 0, 1)), axis=0) / 255.
78
+
79
+ # Prepare inputs
80
+ prompt = "Describe this image."
81
+ formatted_prompt = f"\n<|im_start|>user\n<|vision_start|><|vision_end|>{prompt}<|im_end|>\n<|im_start|>assistant\n"
82
+ input_ids = tokenizer(formatted_prompt, return_tensors='pt')['input_ids']
83
+ input_lengths = np.array([input_ids.shape[1]], dtype=np.int64)
84
+ tokens = np.zeros(max_length, dtype=np.int32)
85
+ tokens[:input_ids.shape[1]] = input_ids[0, :]
86
+ position = np.zeros(1, dtype=np.int64)
87
+
88
+ # Initialize caches
89
+ key_cache = np.zeros((num_layers, num_key_value_heads, max_length, head_dim), dtype=np.float16)
90
+ value_cache = key_cache.copy()
91
+
92
+ # Process initial inputs
93
+ hidden_states = sessions['B'].run(
94
+ [outputs['B']],
95
+ {inputs['B'][0]: tokens, inputs['B'][1]: input_lengths}
96
+ )[0]
97
+
98
+ batch_size = np.array(0, dtype=np.int32)
99
+ batch_size, = sessions['C'].run([outputs['C']], {inputs['C']: batch_size})
100
+
101
+ # Process image features
102
+ image_features = sessions['A'].run([outputs['A']], {inputs['A']: image_array})[0]
103
+ total_ids = 100 # 10 * 10 from original factors
104
+ input_lengths += total_ids
105
+ remaining_tokens = np.array(max_length - input_lengths[0] - total_ids, dtype=np.int32)
106
+ tokens_to_stop = np.array(input_lengths[0] - 5, dtype=np.int32)
107
+
108
+ hidden_states, batch_size = sessions['D'].run(
109
+ outputs['D'],
110
+ dict(zip(inputs['D'],
111
+ [hidden_states, image_features, input_lengths, tokens_to_stop, remaining_tokens]))
112
+ )
113
+
114
+ # Generate tokens
115
+ start_time = time.time()
116
+ for i in range(12): # MAX_ITERATIONS
117
+ token, key_cache, value_cache = sessions['E'].run(
118
+ outputs['E'],
119
+ dict(zip(inputs['E'],
120
+ [hidden_states, np.array([-65504. if i==0 else 0.], dtype=np.float16),
121
+ key_cache, value_cache, position, input_lengths, batch_size,
122
+ np.array([1-total_ids+10 if i==0 else position[0]+1], dtype=np.float16)]))
123
+ )
124
+
125
+ if token in [151643, 151645]: # End tokens
126
+ break
127
+
128
+ if i < 1:
129
+ position += input_lengths[0]
130
+ input_lengths[0] = 1
131
+ else:
132
+ position += 1
133
+
134
+ tokens[0] = token
135
+ hidden_states = sessions['B'].run(
136
+ [outputs['B']],
137
+ {inputs['B'][0]: tokens, inputs['B'][1]: input_lengths}
138
+ )[0]
139
+ print(tokenizer.decode(token), end='', flush=True)
140
+
141
+ print(f"\nTotal time: {time.time() - start_time:.2f}s")
142
+
143
+ ```
144
+
145
+ # Technical Information:
146
+ - [EXPORT.md](EXPORT.md)
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2VLForConditionalGeneration"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "vision_start_token_id": 151652,
9
+ "vision_end_token_id": 151653,
10
+ "vision_token_id": 151654,
11
+ "image_token_id": 151655,
12
+ "video_token_id": 151656,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 1536,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 8960,
17
+ "max_position_embeddings": 32768,
18
+ "max_window_layers": 28,
19
+ "model_type": "qwen2_vl",
20
+ "num_attention_heads": 12,
21
+ "num_hidden_layers": 28,
22
+ "num_key_value_heads": 2,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_theta": 1000000.0,
25
+ "sliding_window": 32768,
26
+ "tie_word_embeddings": true,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.41.2",
29
+ "use_cache": true,
30
+ "use_sliding_window": false,
31
+ "vision_config": {
32
+ "depth": 32,
33
+ "embed_dim": 1280,
34
+ "mlp_ratio": 4,
35
+ "num_heads": 16,
36
+ "in_chans": 3,
37
+ "hidden_size": 1536,
38
+ "patch_size": 14,
39
+ "spatial_merge_size": 2,
40
+ "spatial_patch_size": 14,
41
+ "temporal_patch_size": 2
42
+ },
43
+ "rope_scaling": {
44
+ "type": "mrope",
45
+ "mrope_section": [
46
+ 16,
47
+ 24,
48
+ 24
49
+ ]
50
+ },
51
+ "vocab_size": 151936
52
+ }
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/embeddings_bf16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5387220a9d57908c2c6fa69bcfa64fda4234e59103fa74f56d07eaa6f9af2493
3
+ size 466747392
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "pad_token_id": 151643,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 151645,
7
+ 151643
8
+ ],
9
+ "repetition_penalty": 1.0,
10
+ "temperature": 0.01,
11
+ "top_p": 0.001,
12
+ "top_k": 1,
13
+ "transformers_version": "4.37.0"
14
+ }
15
+
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/infer.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import time
4
+ import torch
5
+ import numpy as np
6
+ import requests
7
+ import onnxruntime as ort
8
+ from PIL import Image
9
+ from io import BytesIO
10
+ from transformers import Qwen2VLConfig, AutoTokenizer
11
+
12
+ # Command line arguments
13
+ model_path = sys.argv[1]
14
+ onnx_path = sys.argv[2]
15
+
16
+ # Initialize model config and tokenizer
17
+ model_config = Qwen2VLConfig.from_pretrained(model_path)
18
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
19
+
20
+ # Model configuration
21
+ max_length = 1024
22
+ num_attention_heads = model_config.num_attention_heads
23
+ num_key_value_heads = model_config.num_key_value_heads
24
+ head_dim = model_config.hidden_size // num_attention_heads
25
+ num_layers = model_config.num_hidden_layers
26
+
27
+ # Setup ONNX sessions
28
+ session_options = ort.SessionOptions()
29
+ session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
30
+
31
+ # Model paths and sessions
32
+ models = ['A', 'B', 'C', 'D', 'E']
33
+ model_paths = {m: os.path.join(onnx_path, f'QwenVL_{m}_q4f16.onnx') for m in models}
34
+ sessions = {m: ort.InferenceSession(path, sess_options=session_options) for m, path in model_paths.items()}
35
+
36
+ # Input/output names
37
+ inputs = {
38
+ 'A': sessions['A'].get_inputs()[0].name,
39
+ 'B': [sessions['B'].get_inputs()[i].name for i in range(2)],
40
+ 'C': sessions['C'].get_inputs()[0].name,
41
+ 'D': [inp.name for inp in sessions['D'].get_inputs()],
42
+ 'E': [inp.name for inp in sessions['E'].get_inputs()]
43
+ }
44
+
45
+ outputs = {
46
+ 'A': sessions['A'].get_outputs()[0].name,
47
+ 'B': sessions['B'].get_outputs()[0].name,
48
+ 'C': sessions['C'].get_outputs()[0].name,
49
+ 'D': [out.name for out in sessions['D'].get_outputs()],
50
+ 'E': [out.name for out in sessions['E'].get_outputs()]
51
+ }
52
+
53
+ # Process image
54
+ image_url = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg'
55
+ image = Image.open(BytesIO(requests.get(image_url).content)).resize((960, 960)).convert('RGB')
56
+ image_array = np.expand_dims(np.transpose(np.array(image).astype(np.float32), (2, 0, 1)), axis=0) / 255.
57
+
58
+ # Prepare inputs
59
+ prompt = "Describe this image."
60
+ formatted_prompt = f"\n<|im_start|>user\n<|vision_start|><|vision_end|>{prompt}<|im_end|>\n<|im_start|>assistant\n"
61
+ input_ids = tokenizer(formatted_prompt, return_tensors='pt')['input_ids']
62
+ input_lengths = np.array([input_ids.shape[1]], dtype=np.int64)
63
+ tokens = np.zeros(max_length, dtype=np.int32)
64
+ tokens[:input_ids.shape[1]] = input_ids[0, :]
65
+ position = np.zeros(1, dtype=np.int64)
66
+
67
+ # Initialize caches
68
+ key_cache = np.zeros((num_layers, num_key_value_heads, max_length, head_dim), dtype=np.float16)
69
+ value_cache = key_cache.copy()
70
+
71
+ # Process initial inputs
72
+ hidden_states = sessions['B'].run(
73
+ [outputs['B']],
74
+ {inputs['B'][0]: tokens, inputs['B'][1]: input_lengths}
75
+ )[0]
76
+
77
+ batch_size = np.array(0, dtype=np.int32)
78
+ batch_size, = sessions['C'].run([outputs['C']], {inputs['C']: batch_size})
79
+
80
+ # Process image features
81
+ image_features = sessions['A'].run([outputs['A']], {inputs['A']: image_array})[0]
82
+ total_ids = 100 # 10 * 10 from original factors
83
+ input_lengths += total_ids
84
+ remaining_tokens = np.array(max_length - input_lengths[0] - total_ids, dtype=np.int32)
85
+ tokens_to_stop = np.array(input_lengths[0] - 5, dtype=np.int32)
86
+
87
+ hidden_states, batch_size = sessions['D'].run(
88
+ outputs['D'],
89
+ dict(zip(inputs['D'],
90
+ [hidden_states, image_features, input_lengths, tokens_to_stop, remaining_tokens]))
91
+ )
92
+
93
+ # Generate tokens
94
+ start_time = time.time()
95
+ for i in range(12): # MAX_ITERATIONS
96
+ token, key_cache, value_cache = sessions['E'].run(
97
+ outputs['E'],
98
+ dict(zip(inputs['E'],
99
+ [hidden_states, np.array([-65504. if i==0 else 0.], dtype=np.float16),
100
+ key_cache, value_cache, position, input_lengths, batch_size,
101
+ np.array([1-total_ids+10 if i==0 else position[0]+1], dtype=np.float16)]))
102
+ )
103
+
104
+ if token in [151643, 151645]: # End tokens
105
+ break
106
+
107
+ if i < 1:
108
+ position += input_lengths[0]
109
+ input_lengths[0] = 1
110
+ else:
111
+ position += 1
112
+
113
+ tokens[0] = token
114
+ hidden_states = sessions['B'].run(
115
+ [outputs['B']],
116
+ {inputs['B'][0]: tokens, inputs['B'][1]: input_lengths}
117
+ )[0]
118
+ print(tokenizer.decode(token), end='', flush=True)
119
+
120
+ print(f"\nTotal time: {time.time() - start_time:.2f}s")
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/llm_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "hidden_size": 1536,
3
+ "layer_nums": 28,
4
+ "attention_mask": "float",
5
+ "key_value_shape": [
6
+ 2,
7
+ 1,
8
+ 0,
9
+ 2,
10
+ 128
11
+ ],
12
+ "prompt_template": "<|im_start|>user\n%s<|im_end|>\n<|im_start|>assistant\n",
13
+ "is_visual": true,
14
+ "image_mean": [
15
+ 122.7709383,
16
+ 116.7460125,
17
+ 104.09373615000001
18
+ ],
19
+ "image_norm": [
20
+ 0.01459842661924292,
21
+ 0.015007768493717056,
22
+ 0.014220065717024088
23
+ ],
24
+ "image_size": 420,
25
+ "vision_start": 151652,
26
+ "vision_end": 151653,
27
+ "image_pad": 151655
28
+ }
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_A.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9359181d8a217fd066b6201ca88d39ceef8d84464e886fa3af3634b767807967
3
+ size 22863481
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_A.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48c2e8d0ebb88762b324860ca74abd35d4848b08f84619e71acc5122a0e46c8f
3
+ size 5322170368
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_A_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdd6e4c85b5a835227106c01b31be8220eb4684026e726372a97c74cfdbcd983
3
+ size 1330987067
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_B.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b752394955396a0684cb491ebf802645ad6e73a29f4f2392c6bfd77759d7d86
3
+ size 234019162
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_B_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c5981ece4d144bc7f5352e56bb19d0d4b3bf22d1f8c472a106fcdcf83a9ebdf
3
+ size 233983290
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_C.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09090f067d75cbfb62f90fc1f783529ede85e07006da80681fbb6f535baa29d6
3
+ size 10335
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_C_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc4f5f601f5ac0b16632e4dc953ce7009f8c2bf0c5e5c1553b5250cda832a68a
3
+ size 6364
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_D.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4393146a8d328f1eae43e9058f391a1ef07048d6793747dab948838fcdfd1e6
3
+ size 26762
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_D_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5b46671d4d41a864d1390ef87eb5819e8c6fd044cded45e688301ae8eb7ab57
3
+ size 25118
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_E.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b602930000f109874f028142d62fc488908d65e30be235565efa310d3d32c89
3
+ size 1505816
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_E.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:691badb0cb9420b85d6483c5cf529af29cf8e864f8214f27b4412de3b5ab3097
3
+ size 12349714432
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/onnx/QwenVL_E_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:599c81da0035adf98d77db9b5776e7070017887394d06dd901c4d72125f6fd2b
3
+ size 996827324
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/pdufour/Qwen2-VL-2B-Instruct-ONNX-Q4-F16
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/tokenizer.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/tokenizer_config.json ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<|object_ref_start|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151647": {
37
+ "content": "<|object_ref_end|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "151648": {
45
+ "content": "<|box_start|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "151649": {
53
+ "content": "<|box_end|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "151650": {
61
+ "content": "<|quad_start|>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "151651": {
69
+ "content": "<|quad_end|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "151652": {
77
+ "content": "<|vision_start|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "151653": {
85
+ "content": "<|vision_end|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "151654": {
93
+ "content": "<|vision_pad|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "151655": {
101
+ "content": "<|image_pad|>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "151656": {
109
+ "content": "<|video_pad|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ }
116
+ },
117
+ "additional_special_tokens": ["<|im_start|>", "<|im_end|>", "<|object_ref_start|>","<|object_ref_end|>","<|box_start|>","<|box_end|>","<|quad_start|>","<|quad_end|>","<|vision_start|>","<|vision_end|>","<|vision_pad|>","<|image_pad|>","<|video_pad|>"],
118
+ "bos_token": null,
119
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
120
+ "clean_up_tokenization_spaces": false,
121
+ "eos_token": "<|im_end|>",
122
+ "padding_side": "left",
123
+ "errors": "replace",
124
+ "model_max_length": 32768,
125
+ "pad_token": "<|endoftext|>",
126
+ "split_special_tokens": false,
127
+ "tokenizer_class": "Qwen2Tokenizer",
128
+ "unk_token": null
129
+ }
models/Qwen2-VL-2B-Instruct-ONNX-Q4-F16/vocab.json ADDED
The diff for this file is too large to render. See raw diff