LittleMouse commited on
Commit ·
af04edb
1
Parent(s): 69ac7bb
Upload File
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +6 -0
- Qwen3-VL-2B-Instruct-ax8850/Qwen3-VL-2B-Instruct_vision.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/model.embed_tokens.weight.bfloat16.bin +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l0_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l10_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l11_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l12_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l13_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l14_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l15_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l16_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l17_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l18_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l19_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l1_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l20_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l21_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l22_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l23_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l24_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l25_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l26_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l27_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l2_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l3_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l4_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l5_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l6_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l7_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l8_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l9_together.axmodel +3 -0
- Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_post.axmodel +3 -0
- images/demo.jpg +3 -0
- images/demo1.jpg +3 -0
- images/recoAll_attractions_1.jpg +3 -0
- images/recoAll_attractions_2.jpg +3 -0
- images/recoAll_attractions_3.jpg +3 -0
- images/recoAll_attractions_4.jpg +3 -0
- images/ssd_car.jpg +3 -0
- images/ssd_horse.jpg +3 -0
- main_axcl_aarch64 +3 -0
- post_config.json +14 -0
- qwen3-vl-tokenizer/README.md +192 -0
- qwen3-vl-tokenizer/chat_template.json +4 -0
- qwen3-vl-tokenizer/config.json +63 -0
- qwen3-vl-tokenizer/configuration.json +1 -0
- qwen3-vl-tokenizer/generation_config.json +14 -0
- qwen3-vl-tokenizer/merges.txt +3 -0
- qwen3-vl-tokenizer/preprocessor_config.json +21 -0
- qwen3-vl-tokenizer/tokenizer.json +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.axmodel filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
qwen3-vl-tokenizer/merges.txt filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
qwen3-vl-tokenizer/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
qwen3-vl-tokenizer/vocab.json filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
main_axcl_aarch64 filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
Qwen3-VL-2B-Instruct-ax8850/Qwen3-VL-2B-Instruct_vision.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f8bd7c1864e378ebccd7c50b006586360dea7f0302db8662cfe93773603ae749
|
| 3 |
+
size 452430541
|
Qwen3-VL-2B-Instruct-ax8850/model.embed_tokens.weight.bfloat16.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:91c45bb2b9d8b678ceba1e3f0da785e47e5e6316384be544aa71b3c5d68ed733
|
| 3 |
+
size 622329856
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l0_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d35ad01c9630b23ae0715c3604ff01581debc033934f7e42feb38fd385cab4d9
|
| 3 |
+
size 73017579
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l10_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6fae923bae282c1dce739e360615684ecd5790aa8d221c52bec0a10b54b95745
|
| 3 |
+
size 73017227
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l11_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61a0661023037cafe2666be3d6e74e6656eccfd4e08a4eada20476a46cfc78ac
|
| 3 |
+
size 73013195
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l12_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec00c49eb7c6f282a3b8df8c5ed12060abd652e4a6fded9feb69c472354ab261
|
| 3 |
+
size 73018731
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l13_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c991fe51f02c8e1f0c3095c7e84888d0aa31d85cb148bf2696fadc31dfbc6cc0
|
| 3 |
+
size 73015723
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l14_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6606f5e112ce0116c5fb2a75e1e859b1262dc88520f9fcce7af9803e2f3b5ed5
|
| 3 |
+
size 73010379
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l15_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37c53e7d6d1601107b4fd220b5ec7c569754ee8a3ec55876157d06499c1781eb
|
| 3 |
+
size 73010379
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l16_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:371364a9461c0905e97f239b465b3b2f9cbb147b7ae95a356b47ebf74190e98e
|
| 3 |
+
size 73002091
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l17_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0d661a297436ecf2c20b537c562b54446abb7aeba206fe6ec1f9a7e05a1c57d
|
| 3 |
+
size 73009131
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l18_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24b0392b48879fb2cb35ff600c11f240b31e7b22126992cfa727047f35c7565c
|
| 3 |
+
size 73010827
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l19_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:78f7899e82de2c02d4a37e5ffe6b37b2fd9f573e4e0ba3aaeaa10dda590adf64
|
| 3 |
+
size 73008747
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l1_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2356413dbd20507f502c05b387d90700f92e960cc5c27a5a8cd56b7b6fa9da44
|
| 3 |
+
size 73015243
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l20_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d92b4ed9e9fa201f09cfd5af7d433d7001fceeb6da3f2332098c178a84e8781f
|
| 3 |
+
size 73007627
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l21_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6fdf7d91127a444f67391aac8c6990582612ca22eaabc4706a5743eb44d7d9a4
|
| 3 |
+
size 73010923
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l22_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6b386c94c28a85954ba42ad5b679f21b69450e18c5f48e2152c6c33de7e257a
|
| 3 |
+
size 73011915
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l23_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1fa54ebaf7e6f3438a4665a2a106afe6940e379b18c5441c947f46565c80ebf3
|
| 3 |
+
size 73012939
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l24_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4691860fa6623e93e90d6cac9f0f1c35aa39cf444b7867401ba4e954d3f6ef77
|
| 3 |
+
size 73018251
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l25_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0a4cbeea075f7e2666ec64452581769b21d245fa42ec917f891c4a74a5ea3694
|
| 3 |
+
size 73017227
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l26_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98e4ea7cab1e872fef4315def8b06d8e3dae4cbc5aee09a02c00331ec458bfc0
|
| 3 |
+
size 73012747
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l27_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:158747691d802f9b8873784fe238f54961068d5d337bad8c467720182056904b
|
| 3 |
+
size 73019211
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l2_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5e3cfd99a40f324c5ab5dd0be0f94536edf72b69c83e32376e3130fbffc87af
|
| 3 |
+
size 73015755
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l3_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ab35c47469478e044c115d76a33b9e058ff29108aa74563f9d60ca0fa1bf31c
|
| 3 |
+
size 73016427
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l4_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12d6fe031ade93a0ceecf632c4dcde5411e2262a8632edbdcd89ae9aed278b9f
|
| 3 |
+
size 73014987
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l5_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:361da9989d0fbcfb2730992967565b09303379ddea90e84375e4cf9cdcd89841
|
| 3 |
+
size 73014603
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l6_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1aca7a41fdd3950bdb95198088b24e21aff86ac5ae6375db32b9bd7a2a3b1175
|
| 3 |
+
size 73010507
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l7_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84a7b57f821e683970fbdd2346f57242c3ac87ef19fd2769823a62acedc3d222
|
| 3 |
+
size 73018955
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l8_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5de3147a63b1ae3974925664ecd5bf640a3155888de211e6ff915ffc2a8bce9a
|
| 3 |
+
size 73017835
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_p128_l9_together.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75ab25d28cf4bd87a7d6e5f3115432acf92a04144b267bb742bd3c3e9616b84b
|
| 3 |
+
size 73009259
|
Qwen3-VL-2B-Instruct-ax8850/qwen3_vl_text_post.axmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:40ff92e3c93b96b34d4d440d69cf5ae516f445f7dd957eec9775235c0944b053
|
| 3 |
+
size 339277488
|
images/demo.jpg
ADDED
|
Git LFS Details
|
images/demo1.jpg
ADDED
|
Git LFS Details
|
images/recoAll_attractions_1.jpg
ADDED
|
Git LFS Details
|
images/recoAll_attractions_2.jpg
ADDED
|
Git LFS Details
|
images/recoAll_attractions_3.jpg
ADDED
|
Git LFS Details
|
images/recoAll_attractions_4.jpg
ADDED
|
Git LFS Details
|
images/ssd_car.jpg
ADDED
|
Git LFS Details
|
images/ssd_horse.jpg
ADDED
|
Git LFS Details
|
main_axcl_aarch64
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c43130c85d23321f85f3b62419c8a542fe6c8a63b03edada8fa6ccd050aaa79
|
| 3 |
+
size 7622944
|
post_config.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"enable_temperature" : true,
|
| 3 |
+
"temperature" : 0.2,
|
| 4 |
+
|
| 5 |
+
"enable_repetition_penalty" : false,
|
| 6 |
+
"repetition_penalty" : 1,
|
| 7 |
+
"penalty_window" : 30,
|
| 8 |
+
|
| 9 |
+
"enable_top_p_sampling" : false,
|
| 10 |
+
"top_p" : 0.8,
|
| 11 |
+
|
| 12 |
+
"enable_top_k_sampling" : true,
|
| 13 |
+
"top_k" : 10
|
| 14 |
+
}
|
qwen3-vl-tokenizer/README.md
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
pipeline_tag: image-text-to-text
|
| 4 |
+
library_name: transformers
|
| 5 |
+
---
|
| 6 |
+
<a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;">
|
| 7 |
+
<img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/>
|
| 8 |
+
</a>
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Qwen3-VL-2B-Instruct
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
Meet Qwen3-VL — the most powerful vision-language model in the Qwen series to date.
|
| 15 |
+
|
| 16 |
+
This generation delivers comprehensive upgrades across the board: superior text understanding & generation, deeper visual perception & reasoning, extended context length, enhanced spatial and video dynamics comprehension, and stronger agent interaction capabilities.
|
| 17 |
+
|
| 18 |
+
Available in Dense and MoE architectures that scale from edge to cloud, with Instruct and reasoning‑enhanced Thinking editions for flexible, on‑demand deployment.
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
#### Key Enhancements:
|
| 22 |
+
|
| 23 |
+
* **Visual Agent**: Operates PC/mobile GUIs—recognizes elements, understands functions, invokes tools, completes tasks.
|
| 24 |
+
|
| 25 |
+
* **Visual Coding Boost**: Generates Draw.io/HTML/CSS/JS from images/videos.
|
| 26 |
+
|
| 27 |
+
* **Advanced Spatial Perception**: Judges object positions, viewpoints, and occlusions; provides stronger 2D grounding and enables 3D grounding for spatial reasoning and embodied AI.
|
| 28 |
+
|
| 29 |
+
* **Long Context & Video Understanding**: Native 256K context, expandable to 1M; handles books and hours-long video with full recall and second-level indexing.
|
| 30 |
+
|
| 31 |
+
* **Enhanced Multimodal Reasoning**: Excels in STEM/Math—causal analysis and logical, evidence-based answers.
|
| 32 |
+
|
| 33 |
+
* **Upgraded Visual Recognition**: Broader, higher-quality pretraining is able to “recognize everything”—celebrities, anime, products, landmarks, flora/fauna, etc.
|
| 34 |
+
|
| 35 |
+
* **Expanded OCR**: Supports 32 languages (up from 19); robust in low light, blur, and tilt; better with rare/ancient characters and jargon; improved long-document structure parsing.
|
| 36 |
+
|
| 37 |
+
* **Text Understanding on par with pure LLMs**: Seamless text–vision fusion for lossless, unified comprehension.
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
#### Model Architecture Updates:
|
| 41 |
+
|
| 42 |
+
<p align="center">
|
| 43 |
+
<img src="https://qianwen-res.oss-accelerate.aliyuncs.com/Qwen3-VL/qwen3vl_arc.jpg" width="80%"/>
|
| 44 |
+
<p>
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
1. **Interleaved-MRoPE**: Full‑frequency allocation over time, width, and height via robust positional embeddings, enhancing long‑horizon video reasoning.
|
| 48 |
+
|
| 49 |
+
2. **DeepStack**: Fuses multi‑level ViT features to capture fine‑grained details and sharpen image–text alignment.
|
| 50 |
+
|
| 51 |
+
3. **Text–Timestamp Alignment:** Moves beyond T‑RoPE to precise, timestamp‑grounded event localization for stronger video temporal modeling.
|
| 52 |
+
|
| 53 |
+
This is the weight repository for Qwen3-VL-2B-Instruct.
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
---
|
| 57 |
+
|
| 58 |
+
## Model Performance
|
| 59 |
+
|
| 60 |
+
**Multimodal performance**
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
|
| 64 |
+
**Pure text performance**
|
| 65 |
+

|
| 66 |
+
|
| 67 |
+
## Quickstart
|
| 68 |
+
|
| 69 |
+
Below, we provide simple examples to show how to use Qwen3-VL with 🤖 ModelScope and 🤗 Transformers.
|
| 70 |
+
|
| 71 |
+
The code of Qwen3-VL has been in the latest Hugging Face transformers and we advise you to build from source with command:
|
| 72 |
+
```
|
| 73 |
+
pip install git+https://github.com/huggingface/transformers
|
| 74 |
+
# pip install transformers==4.57.0 # currently, V4.57.0 is not released
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
### Using 🤗 Transformers to Chat
|
| 78 |
+
|
| 79 |
+
Here we show a code snippet to show how to use the chat model with `transformers`:
|
| 80 |
+
|
| 81 |
+
```python
|
| 82 |
+
from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
|
| 83 |
+
|
| 84 |
+
# default: Load the model on the available device(s)
|
| 85 |
+
model = Qwen3VLForConditionalGeneration.from_pretrained(
|
| 86 |
+
"Qwen/Qwen3-VL-2B-Instruct", dtype="auto", device_map="auto"
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
|
| 90 |
+
# model = Qwen3VLForConditionalGeneration.from_pretrained(
|
| 91 |
+
# "Qwen/Qwen3-VL-2B-Instruct",
|
| 92 |
+
# dtype=torch.bfloat16,
|
| 93 |
+
# attn_implementation="flash_attention_2",
|
| 94 |
+
# device_map="auto",
|
| 95 |
+
# )
|
| 96 |
+
|
| 97 |
+
processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-2B-Instruct")
|
| 98 |
+
|
| 99 |
+
messages = [
|
| 100 |
+
{
|
| 101 |
+
"role": "user",
|
| 102 |
+
"content": [
|
| 103 |
+
{
|
| 104 |
+
"type": "image",
|
| 105 |
+
"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
|
| 106 |
+
},
|
| 107 |
+
{"type": "text", "text": "Describe this image."},
|
| 108 |
+
],
|
| 109 |
+
}
|
| 110 |
+
]
|
| 111 |
+
|
| 112 |
+
# Preparation for inference
|
| 113 |
+
inputs = processor.apply_chat_template(
|
| 114 |
+
messages,
|
| 115 |
+
tokenize=True,
|
| 116 |
+
add_generation_prompt=True,
|
| 117 |
+
return_dict=True,
|
| 118 |
+
return_tensors="pt"
|
| 119 |
+
)
|
| 120 |
+
inputs = inputs.to(model.device)
|
| 121 |
+
|
| 122 |
+
# Inference: Generation of the output
|
| 123 |
+
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
| 124 |
+
generated_ids_trimmed = [
|
| 125 |
+
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
| 126 |
+
]
|
| 127 |
+
output_text = processor.batch_decode(
|
| 128 |
+
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
| 129 |
+
)
|
| 130 |
+
print(output_text)
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
### Generation Hyperparameters
|
| 134 |
+
#### VL
|
| 135 |
+
```bash
|
| 136 |
+
export greedy='false'
|
| 137 |
+
export top_p=0.8
|
| 138 |
+
export top_k=20
|
| 139 |
+
export temperature=0.7
|
| 140 |
+
export repetition_penalty=1.0
|
| 141 |
+
export presence_penalty=1.5
|
| 142 |
+
export out_seq_length=16384
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
#### Text
|
| 146 |
+
```bash
|
| 147 |
+
export greedy='false'
|
| 148 |
+
export top_p=1.0
|
| 149 |
+
export top_k=40
|
| 150 |
+
export repetition_penalty=1.0
|
| 151 |
+
export presence_penalty=2.0
|
| 152 |
+
export temperature=1.0
|
| 153 |
+
export out_seq_length=32768
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
## Citation
|
| 158 |
+
|
| 159 |
+
If you find our work helpful, feel free to give us a cite.
|
| 160 |
+
|
| 161 |
+
```
|
| 162 |
+
@misc{qwen3technicalreport,
|
| 163 |
+
title={Qwen3 Technical Report},
|
| 164 |
+
author={Qwen Team},
|
| 165 |
+
year={2025},
|
| 166 |
+
eprint={2505.09388},
|
| 167 |
+
archivePrefix={arXiv},
|
| 168 |
+
primaryClass={cs.CL},
|
| 169 |
+
url={https://arxiv.org/abs/2505.09388},
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
@article{Qwen2.5-VL,
|
| 173 |
+
title={Qwen2.5-VL Technical Report},
|
| 174 |
+
author={Bai, Shuai and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Song, Sibo and Dang, Kai and Wang, Peng and Wang, Shijie and Tang, Jun and Zhong, Humen and Zhu, Yuanzhi and Yang, Mingkun and Li, Zhaohai and Wan, Jianqiang and Wang, Pengfei and Ding, Wei and Fu, Zheren and Xu, Yiheng and Ye, Jiabo and Zhang, Xi and Xie, Tianbao and Cheng, Zesen and Zhang, Hang and Yang, Zhibo and Xu, Haiyang and Lin, Junyang},
|
| 175 |
+
journal={arXiv preprint arXiv:2502.13923},
|
| 176 |
+
year={2025}
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
@article{Qwen2VL,
|
| 180 |
+
title={Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution},
|
| 181 |
+
author={Wang, Peng and Bai, Shuai and Tan, Sinan and Wang, Shijie and Fan, Zhihao and Bai, Jinze and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Fan, Yang and Dang, Kai and Du, Mengfei and Ren, Xuancheng and Men, Rui and Liu, Dayiheng and Zhou, Chang and Zhou, Jingren and Lin, Junyang},
|
| 182 |
+
journal={arXiv preprint arXiv:2409.12191},
|
| 183 |
+
year={2024}
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
@article{Qwen-VL,
|
| 187 |
+
title={Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond},
|
| 188 |
+
author={Bai, Jinze and Bai, Shuai and Yang, Shusheng and Wang, Shijie and Tan, Sinan and Wang, Peng and Lin, Junyang and Zhou, Chang and Zhou, Jingren},
|
| 189 |
+
journal={arXiv preprint arXiv:2308.12966},
|
| 190 |
+
year={2023}
|
| 191 |
+
}
|
| 192 |
+
```
|
qwen3-vl-tokenizer/chat_template.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {%- if messages[0].content is string %}\n {{- messages[0].content }}\n {%- else %}\n {%- for content in messages[0].content %}\n {%- if 'text' in content %}\n {{- content.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].content is string %}\n {{- messages[0].content }}\n {%- else %}\n {%- for content in messages[0].content %}\n {%- if 'text' in content %}\n {{- content.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set image_count = namespace(value=0) %}\n{%- set video_count = namespace(value=0) %}\n{%- for message in messages %}\n {%- if message.role == \"user\" %}\n {{- '<|im_start|>' + message.role + '\\n' }}\n {%- if message.content is string %}\n {{- message.content }}\n {%- else %}\n {%- for content in message.content %}\n {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}\n {%- set image_count.value = image_count.value + 1 %}\n {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}\n <|vision_start|><|image_pad|><|vision_end|>\n {%- elif content.type == 'video' or 'video' in content %}\n {%- set video_count.value = video_count.value + 1 %}\n {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}\n <|vision_start|><|video_pad|><|vision_end|>\n {%- elif 'text' in content %}\n {{- content.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role + '\\n' }}\n {%- if message.content is string %}\n {{- message.content }}\n {%- else %}\n {%- for content_item in message.content %}\n {%- if 'text' in content_item %}\n {{- content_item.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and message.content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {%- if message.content is string %}\n {{- message.content }}\n {%- else %}\n {%- for content in message.content %}\n {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}\n {%- set image_count.value = image_count.value + 1 %}\n {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}\n <|vision_start|><|image_pad|><|vision_end|>\n {%- elif content.type == 'video' or 'video' in content %}\n {%- set video_count.value = video_count.value + 1 %}\n {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}\n <|vision_start|><|video_pad|><|vision_end|>\n {%- elif 'text' in content %}\n {{- content.text }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"
|
| 3 |
+
}
|
| 4 |
+
|
qwen3-vl-tokenizer/config.json
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Qwen3VLForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"image_token_id": 151655,
|
| 6 |
+
"model_type": "qwen3_vl",
|
| 7 |
+
"text_config": {
|
| 8 |
+
"attention_bias": false,
|
| 9 |
+
"attention_dropout": 0.0,
|
| 10 |
+
"bos_token_id": 151643,
|
| 11 |
+
"dtype": "bfloat16",
|
| 12 |
+
"eos_token_id": 151645,
|
| 13 |
+
"head_dim": 128,
|
| 14 |
+
"hidden_act": "silu",
|
| 15 |
+
"hidden_size": 2048,
|
| 16 |
+
"initializer_range": 0.02,
|
| 17 |
+
"intermediate_size": 6144,
|
| 18 |
+
"max_position_embeddings": 262144,
|
| 19 |
+
"model_type": "qwen3_vl_text",
|
| 20 |
+
"num_attention_heads": 16,
|
| 21 |
+
"num_hidden_layers": 28,
|
| 22 |
+
"num_key_value_heads": 8,
|
| 23 |
+
"rms_norm_eps": 1e-06,
|
| 24 |
+
"rope_scaling": {
|
| 25 |
+
"mrope_interleaved": true,
|
| 26 |
+
"mrope_section": [
|
| 27 |
+
24,
|
| 28 |
+
20,
|
| 29 |
+
20
|
| 30 |
+
],
|
| 31 |
+
"rope_type": "default"
|
| 32 |
+
},
|
| 33 |
+
"rope_theta": 5000000,
|
| 34 |
+
"tie_word_embeddings": true,
|
| 35 |
+
"use_cache": true,
|
| 36 |
+
"vocab_size": 151936
|
| 37 |
+
},
|
| 38 |
+
"tie_word_embeddings": true,
|
| 39 |
+
"transformers_version": "4.57.0.dev0",
|
| 40 |
+
"video_token_id": 151656,
|
| 41 |
+
"vision_config": {
|
| 42 |
+
"deepstack_visual_indexes": [
|
| 43 |
+
5,
|
| 44 |
+
11,
|
| 45 |
+
17
|
| 46 |
+
],
|
| 47 |
+
"depth": 24,
|
| 48 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 49 |
+
"hidden_size": 1024,
|
| 50 |
+
"in_channels": 3,
|
| 51 |
+
"initializer_range": 0.02,
|
| 52 |
+
"intermediate_size": 4096,
|
| 53 |
+
"model_type": "qwen3_vl",
|
| 54 |
+
"num_heads": 16,
|
| 55 |
+
"num_position_embeddings": 2304,
|
| 56 |
+
"out_hidden_size": 2048,
|
| 57 |
+
"patch_size": 16,
|
| 58 |
+
"spatial_merge_size": 2,
|
| 59 |
+
"temporal_patch_size": 2
|
| 60 |
+
},
|
| 61 |
+
"vision_end_token_id": 151653,
|
| 62 |
+
"vision_start_token_id": 151652
|
| 63 |
+
}
|
qwen3-vl-tokenizer/configuration.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"framework":"Pytorch","task":"image-text-to-text"}
|
qwen3-vl-tokenizer/generation_config.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"pad_token_id": 151643,
|
| 4 |
+
"do_sample": true,
|
| 5 |
+
"eos_token_id": [
|
| 6 |
+
151645,
|
| 7 |
+
151643
|
| 8 |
+
],
|
| 9 |
+
"top_p": 0.8,
|
| 10 |
+
"top_k": 20,
|
| 11 |
+
"temperature": 0.7,
|
| 12 |
+
"repetition_penalty": 1.0,
|
| 13 |
+
"transformers_version": "4.56.0"
|
| 14 |
+
}
|
qwen3-vl-tokenizer/merges.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:599bab54075088774b1733fde865d5bd747cbcc7a547c5bc12610e874e26f5e3
|
| 3 |
+
size 1671839
|
qwen3-vl-tokenizer/preprocessor_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"size": {
|
| 3 |
+
"longest_edge": 16777216,
|
| 4 |
+
"shortest_edge": 65536
|
| 5 |
+
},
|
| 6 |
+
"patch_size": 16,
|
| 7 |
+
"temporal_patch_size": 2,
|
| 8 |
+
"merge_size": 2,
|
| 9 |
+
"image_mean": [
|
| 10 |
+
0.5,
|
| 11 |
+
0.5,
|
| 12 |
+
0.5
|
| 13 |
+
],
|
| 14 |
+
"image_std": [
|
| 15 |
+
0.5,
|
| 16 |
+
0.5,
|
| 17 |
+
0.5
|
| 18 |
+
],
|
| 19 |
+
"processor_class": "Qwen3VLProcessor",
|
| 20 |
+
"image_processor_type": "Qwen2VLImageProcessorFast"
|
| 21 |
+
}
|
qwen3-vl-tokenizer/tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5d85b6dcc535e6b93115a9ef287e6132fdbf30270da6218194ba742261173c7
|
| 3 |
+
size 7032403
|