yongqiang commited on
Commit
8a9aaa0
·
0 Parent(s):

Initialize the repository

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +44 -0
  2. .gitignore +14 -0
  3. README.md +147 -0
  4. assets/demo_1.png +3 -0
  5. assets/demo_2.png +3 -0
  6. config.json +3 -0
  7. examples/image_0.jpg +3 -0
  8. examples/image_1.jpg +3 -0
  9. examples/image_2.png +3 -0
  10. examples/image_3.png +3 -0
  11. examples/laorenshuaidao.mp4 +3 -0
  12. examples/red-panda.mp4 +3 -0
  13. examples/tuboshu.mp4 +3 -0
  14. gradio_demo.py +389 -0
  15. infer_axmodel.py +183 -0
  16. infer_torch.py +212 -0
  17. internvl3-5_axmodel/model.embed_tokens.weight.bfloat16.bin +3 -0
  18. internvl3-5_axmodel/model.embed_tokens.weight.float32.bin +3 -0
  19. internvl3-5_axmodel/model.embed_tokens.weight.npy +3 -0
  20. internvl3-5_axmodel/qwen3_p128_l0_together.axmodel +3 -0
  21. internvl3-5_axmodel/qwen3_p128_l10_together.axmodel +3 -0
  22. internvl3-5_axmodel/qwen3_p128_l11_together.axmodel +3 -0
  23. internvl3-5_axmodel/qwen3_p128_l12_together.axmodel +3 -0
  24. internvl3-5_axmodel/qwen3_p128_l13_together.axmodel +3 -0
  25. internvl3-5_axmodel/qwen3_p128_l14_together.axmodel +3 -0
  26. internvl3-5_axmodel/qwen3_p128_l15_together.axmodel +3 -0
  27. internvl3-5_axmodel/qwen3_p128_l16_together.axmodel +3 -0
  28. internvl3-5_axmodel/qwen3_p128_l17_together.axmodel +3 -0
  29. internvl3-5_axmodel/qwen3_p128_l18_together.axmodel +3 -0
  30. internvl3-5_axmodel/qwen3_p128_l19_together.axmodel +3 -0
  31. internvl3-5_axmodel/qwen3_p128_l1_together.axmodel +3 -0
  32. internvl3-5_axmodel/qwen3_p128_l20_together.axmodel +3 -0
  33. internvl3-5_axmodel/qwen3_p128_l21_together.axmodel +3 -0
  34. internvl3-5_axmodel/qwen3_p128_l22_together.axmodel +3 -0
  35. internvl3-5_axmodel/qwen3_p128_l23_together.axmodel +3 -0
  36. internvl3-5_axmodel/qwen3_p128_l24_together.axmodel +3 -0
  37. internvl3-5_axmodel/qwen3_p128_l25_together.axmodel +3 -0
  38. internvl3-5_axmodel/qwen3_p128_l26_together.axmodel +3 -0
  39. internvl3-5_axmodel/qwen3_p128_l27_together.axmodel +3 -0
  40. internvl3-5_axmodel/qwen3_p128_l2_together.axmodel +3 -0
  41. internvl3-5_axmodel/qwen3_p128_l3_together.axmodel +3 -0
  42. internvl3-5_axmodel/qwen3_p128_l4_together.axmodel +3 -0
  43. internvl3-5_axmodel/qwen3_p128_l5_together.axmodel +3 -0
  44. internvl3-5_axmodel/qwen3_p128_l6_together.axmodel +3 -0
  45. internvl3-5_axmodel/qwen3_p128_l7_together.axmodel +3 -0
  46. internvl3-5_axmodel/qwen3_p128_l8_together.axmodel +3 -0
  47. internvl3-5_axmodel/qwen3_p128_l9_together.axmodel +3 -0
  48. internvl3-5_axmodel/qwen3_post.axmodel +3 -0
  49. internvl3-5_tokenizer/.gitattributes +36 -0
  50. internvl3-5_tokenizer/README.md +830 -0
.gitattributes ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.axmodel filter=lfs diff=lfs merge=lfs -text
37
+ main_api_ax650 filter=lfs diff=lfs merge=lfs -text
38
+ main_api_axcl_x86 filter=lfs diff=lfs merge=lfs -text
39
+ main_ax650 filter=lfs diff=lfs merge=lfs -text
40
+ main_axcl_x86 filter=lfs diff=lfs merge=lfs -text
41
+ *.png filter=lfs diff=lfs merge=lfs -text
42
+ *.jpg filter=lfs diff=lfs merge=lfs -text
43
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
44
+ *.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.tar
3
+ build-output/
4
+ *.onnx
5
+ *.pth
6
+ *.pkl
7
+ *.bin
8
+ *.npy
9
+ *.axmodel
10
+ *_axmodel/
11
+ *.safetensors
12
+ compiled*
13
+ *tmp/
14
+
README.md ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: bsd-3-clause
4
+ base_model:
5
+ - OpenGVLab/InternVL3_5-1B
6
+ tags:
7
+ - InternVL3
8
+ - InternVL3_5-1B
9
+ - Int8
10
+ - VLM
11
+ pipeline_tag: image-text-to-text
12
+ language:
13
+ - en
14
+ ---
15
+
16
+ # InternVL3_5-1B
17
+
18
+ This version of InternVL3_5-1B has been converted to run on the Axera NPU using **w8a16** quantization.
19
+
20
+ This model has been optimized with the following LoRA:
21
+
22
+ Compatible with Pulsar2 version: 4.1
23
+
24
+ ## Convert tools links:
25
+
26
+ For those who are interested in model conversion, you can try to export axmodel through the original repo:
27
+
28
+ https://huggingface.co/OpenGVLab/InternVL3_5-1B
29
+
30
+ [How to Convert LLM from Huggingface to axmodel](https://github.com/AXERA-TECH/InternVL3_5-1B.axera/tree/main/model_convert)
31
+
32
+ [AXera NPU HOST LLM Runtime](https://github.com/AXERA-TECH/ax-llm/tree/ax-internvl)
33
+
34
+ [AXera NPU AXCL LLM Runtime](https://github.com/AXERA-TECH/ax-llm/tree/axcl-internvl)
35
+
36
+ ## Support Platform
37
+
38
+ - AX650
39
+ - AX650N DEMO Board
40
+ - [M4N-Dock(爱芯派Pro)](https://wiki.sipeed.com/hardware/zh/maixIV/m4ndock/m4ndock.html)
41
+ - [M.2 Accelerator card](https://axcl-docs.readthedocs.io/zh-cn/latest/doc_guide_hardware.html)
42
+
43
+ |Chips|image encoder 448|ttft|w8a16|
44
+ |--|--|--|--|
45
+ |AX650| 364.412 ms | 5072 ms | 21.60 tokens/sec|
46
+
47
+
48
+ ## How to use
49
+
50
+ Download all files from this repository to the device
51
+
52
+ ```
53
+ $ tree -L 1
54
+ .
55
+ ├── assets
56
+ ├── config.json
57
+ ├── examples
58
+ ├── gradio_demo.py
59
+ ├── infer_axmodel.py
60
+ ├── infer_torch.py
61
+ ├── internvl3-5_axmodel
62
+ ├── internvl3-5_tokenizer
63
+ ├── README.md
64
+ ├── utils
65
+ └── vit-models
66
+
67
+ 6 directories, 5 files
68
+ ```
69
+
70
+ #### Install transformer
71
+
72
+ ```
73
+ pip install transformers==4.57.1
74
+ ```
75
+
76
+ #### Inference with AX650 Host, such as M4N-Dock(爱芯派Pro) or AX650 DEMO Board
77
+
78
+ Interactive conversations using the `Gradio API`:
79
+
80
+ ```bash
81
+ $ python3 gradio_demo.py --hf_model internvl3-5_tokenizer/ --axmodel_path internvl3-5_axmodel/ --vit_model vit-models/internvl_vit_model_1x3x448x448.axmodel
82
+ ```
83
+
84
+ Plain text dialogue:
85
+
86
+ ![demo_1](assets/demo_1.png)
87
+
88
+ Image understanding:
89
+
90
+ ![demo_2](assets/demo_2.png)
91
+
92
+ ---
93
+
94
+ Run the following command on the Axera board to start a chat conversation:
95
+
96
+ ```sh
97
+ $ cd InternVL3_5-1B.axera/python
98
+ $ python3 infer_axmodel.py --hf_model internvl3-5_tokenizer/ --axmodel_path internvl3-5_axmodel/ --question "请计算函数[y=2x^2+2]的导数, 并提供 markdown 格式的推理过程"
99
+ ```
100
+
101
+ output:
102
+
103
+ ```bash
104
+ [INFO] Using provider: AxEngineExecutionProvider
105
+ [INFO] Model type: 2 (triple core)
106
+ [INFO] Compiler version: 5.1-dirty 0fdbfe15-dirty
107
+ Model loaded successfully!
108
+ slice_indices: [0]
109
+ Slice prefill done: 0
110
+ answer >> 函数 \( y = 2x^2 + 2 \) 的导数可以通过求导法则来计算。首先,我们对函数中的每一项分别求导:
111
+
112
+ 1. 对于 \( 2x^2 \),使用幂法则求导:
113
+ \[
114
+ \frac{d}{dx}(2x^2) = 2 \cdot 2x = 4x
115
+ \]
116
+
117
+ 2. 对于常数项 \( 2 \),其导数为 0,因为常数的导数为 0。
118
+
119
+ 将这两部分的结果相加,得到函数 \( y \) 的导数:
120
+ \[
121
+ y' = 4x
122
+ \]
123
+
124
+ 因此,函数 \( y = 2x^2 + 2 \) 的导数为 \( y' = 4x \)。
125
+ ```
126
+
127
+ Enter the following command to perform the single-image understanding task:
128
+
129
+ ```sh
130
+ $ cd InternVL3_5-1B.axera/python
131
+ $ python3 infer_axmodel.py --hf_model internvl3-5_tokenizer/ --axmodel_path internvl3-5_axmodel/ --question "请描述这幅图" -i examples/image_0.jpg --vit_model vit-models/internvl_vit_model_1x3x448x448.axmodel
132
+ ```
133
+
134
+ ![image_0.jpg](examples/image_0.jpg)
135
+
136
+ output:
137
+
138
+ ```bash
139
+ [INFO] Model type: 2 (triple core)
140
+ [INFO] Compiler version: 5.1-dirty 0fdbfe15-dirty
141
+ Model loaded successfully!
142
+ slice_indices: [0, 1, 2]
143
+ Slice prefill done: 0
144
+ Slice prefill done: 1
145
+ Slice prefill done: 2
146
+ answer >> 这是一张红熊猫的照片。红熊猫是一种红棕色的哺乳动物,通常生活在亚洲的森林中。它们以捕食昆虫和小型无脊椎动物为生。图片中,红熊猫正坐在一个木制的平台上,背景是绿色的树木和植被,显得非常自然和生动。红熊猫的表情看起来很友好,似乎在观察或等待什么。
147
+ ```
assets/demo_1.png ADDED

Git LFS Details

  • SHA256: 512b585d3edf0f2a90ecdec6d98508d4a551057c2f125f5f1ac61d75164f637c
  • Pointer size: 131 Bytes
  • Size of remote file: 488 kB
assets/demo_2.png ADDED

Git LFS Details

  • SHA256: 30ff98a4537cc4329da2646e577469e0bc1ca171b7aa0e0c0aa96539a027e217
  • Pointer size: 131 Bytes
  • Size of remote file: 979 kB
config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e960e7c7b8924496695fb1e1676f969d6243afd404e21c16e4287d39f0756da5
3
+ size 3874
examples/image_0.jpg ADDED

Git LFS Details

  • SHA256: c587294b3bf637dacbb3c96324c127187a2f242c94f639633a0d8a2775a9a399
  • Pointer size: 130 Bytes
  • Size of remote file: 78.1 kB
examples/image_1.jpg ADDED

Git LFS Details

  • SHA256: 08487494b8dc08d44bc36491adf3ab89ff30d13a3122da86f3cd67cad89eeee8
  • Pointer size: 131 Bytes
  • Size of remote file: 126 kB
examples/image_2.png ADDED

Git LFS Details

  • SHA256: 622ae2d01ff4467fa69a7888728d776650117a0f4887e96ba0fb9a8a6d77b3c3
  • Pointer size: 131 Bytes
  • Size of remote file: 355 kB
examples/image_3.png ADDED

Git LFS Details

  • SHA256: 729e80e77d8611778859d2f232cb7f2a8fda04ed67dd8dcc3e7cd7a657367402
  • Pointer size: 131 Bytes
  • Size of remote file: 394 kB
examples/laorenshuaidao.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f5c00b37b23af3d01d133da880eb7f6e50d4af608e3575784be7063eb137011
3
+ size 2704112
examples/red-panda.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d921c07bb97224d65a37801541d246067f0d506f08723ffa1ad85c217907ccb8
3
+ size 1867237
examples/tuboshu.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced4d95877b9a7f8b48f79bdfe4287eff8837f20348daec2f2e2987459ec1712
3
+ size 5952043
gradio_demo.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import time
4
+ from typing import Any, Dict, List, Optional, Generator, Tuple
5
+
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+ import torchvision.transforms as T
10
+ from ml_dtypes import bfloat16
11
+ from PIL import Image
12
+ from torchvision.transforms.functional import InterpolationMode
13
+ from transformers import AutoConfig, AutoTokenizer
14
+
15
+ from utils.infer_func import InferManager
16
+ from axengine import InferenceSession
17
+
18
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
19
+ IMAGENET_STD = (0.229, 0.224, 0.225)
20
+ IMG_PLACEHOLDER_TOKEN_ID = 151669 # <img>
21
+ IMG_CONTEXT_REPEAT = 256 # number of image context tokens expected by the model
22
+
23
+
24
+ SYSTEM_PROMPT = (
25
+ "<|im_start|>system\n"
26
+ "你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型, 英文名叫 InternVL3, "
27
+ "是一个有用无害的人工智能助手, 擅长思考和回答用户的问题. 请你在回答问题时使用简体中文."
28
+ "<|im_end|>\n"
29
+ )
30
+
31
+
32
+ def build_transform(input_size: int):
33
+ transform = T.Compose([
34
+ T.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
35
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
36
+ T.ToTensor(),
37
+ T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
38
+ ])
39
+ return transform
40
+
41
+
42
+ def dynamic_preprocess(image: Image.Image, min_num: int = 1, max_num: int = 12, image_size: int = 448,
43
+ use_thumbnail: bool = False):
44
+ orig_width, orig_height = image.size
45
+ aspect_ratio = orig_width / orig_height
46
+
47
+ target_ratios = set(
48
+ (i, j)
49
+ for n in range(min_num, max_num + 1)
50
+ for i in range(1, n + 1)
51
+ for j in range(1, n + 1)
52
+ if i * j <= max_num and i * j >= min_num
53
+ )
54
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
55
+
56
+ def find_closest_aspect_ratio(ar: float, ratios: List[tuple]):
57
+ best_ratio_diff = float("inf")
58
+ best_ratio = (1, 1)
59
+ area = orig_width * orig_height
60
+ for ratio in ratios:
61
+ target_aspect_ratio = ratio[0] / ratio[1]
62
+ ratio_diff = abs(ar - target_aspect_ratio)
63
+ if ratio_diff < best_ratio_diff:
64
+ best_ratio_diff = ratio_diff
65
+ best_ratio = ratio
66
+ elif ratio_diff == best_ratio_diff:
67
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
68
+ best_ratio = ratio
69
+ return best_ratio
70
+
71
+ target_aspect_ratio = find_closest_aspect_ratio(aspect_ratio, target_ratios)
72
+ target_width = image_size * target_aspect_ratio[0]
73
+ target_height = image_size * target_aspect_ratio[1]
74
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
75
+
76
+ resized_img = image.resize((target_width, target_height))
77
+ processed_images = []
78
+ for i in range(blocks):
79
+ box = (
80
+ (i % (target_width // image_size)) * image_size,
81
+ (i // (target_width // image_size)) * image_size,
82
+ ((i % (target_width // image_size)) + 1) * image_size,
83
+ ((i // (target_width // image_size)) + 1) * image_size,
84
+ )
85
+ split_img = resized_img.crop(box)
86
+ processed_images.append(split_img)
87
+ assert len(processed_images) == blocks
88
+ if use_thumbnail and len(processed_images) != 1:
89
+ processed_images.append(image.resize((image_size, image_size)))
90
+ return processed_images
91
+
92
+
93
+ def load_image(image_file: Image.Image, input_size: int = 448, max_num: int = 12):
94
+ transform = build_transform(input_size=input_size)
95
+ images = dynamic_preprocess(image_file, image_size=input_size, use_thumbnail=True, max_num=max_num)
96
+ pixel_values = [transform(img) for img in images]
97
+ pixel_values = torch.stack(pixel_values)
98
+ return pixel_values
99
+
100
+
101
+ class InternVLGradioDemo:
102
+ def __init__(self, hf_model: str, axmodel_dir: str, vit_axmodel: str, max_seq_len: int = 2047):
103
+ self.hf_model = hf_model
104
+ self.axmodel_dir = axmodel_dir
105
+ self.vit_axmodel = vit_axmodel
106
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
107
+
108
+ self.embeds = np.load(os.path.join(axmodel_dir, "model.embed_tokens.weight.npy"))
109
+ self.tokenizer = AutoTokenizer.from_pretrained(self.hf_model)
110
+ config = AutoConfig.from_pretrained(self.hf_model, trust_remote_code=True)
111
+ self.cfg = config.llm_config
112
+
113
+ self.vit_session = InferenceSession(self.vit_axmodel)
114
+ self.infer_manager = InferManager(self.cfg, self.axmodel_dir, max_seq_len=max_seq_len)
115
+
116
+ def _build_single_turn_prompt(self, user_text: str, vit_features: List[np.ndarray]):
117
+ prompt = SYSTEM_PROMPT
118
+ prompt += f"<|im_start|>user\n{user_text}"
119
+ for _ in vit_features:
120
+ prompt += "\n<img>" + "<IMG_CONTEXT>" * IMG_CONTEXT_REPEAT + "</img>"
121
+ prompt += "<|im_end|>\n<|im_start|>assistant\n"
122
+ return prompt
123
+
124
+ def _insert_vision_features(self, token_ids: List[int], prefill_data: np.ndarray, vit_features: List[np.ndarray]):
125
+ image_start_indices = np.where(np.array(token_ids) == IMG_PLACEHOLDER_TOKEN_ID)[0].tolist()
126
+ if len(image_start_indices) != len(vit_features):
127
+ raise ValueError("图片数量与占位符数量不一致, 请检查输入和模板生成逻辑")
128
+ for idx, image_start_index in enumerate(image_start_indices):
129
+ insert_pos = image_start_index + 1
130
+ prefill_data[insert_pos: insert_pos + IMG_CONTEXT_REPEAT] = vit_features[idx][0, :, :]
131
+ return prefill_data
132
+
133
+ def _run_model(self, prompt: str, vit_features: List[np.ndarray]):
134
+ """Non-streaming推理,保留以防需要一次性结果。"""
135
+ for k_cache in self.infer_manager.k_caches:
136
+ k_cache.fill(0)
137
+ for v_cache in self.infer_manager.v_caches:
138
+ v_cache.fill(0)
139
+
140
+ token_ids = self.tokenizer.encode(prompt)
141
+ prefill_data = np.take(self.embeds, token_ids, axis=0).astype(bfloat16)
142
+ if vit_features:
143
+ prefill_data = self._insert_vision_features(token_ids, prefill_data, vit_features)
144
+
145
+ eos_token_id = None
146
+ if isinstance(self.cfg.eos_token_id, list) and len(self.cfg.eos_token_id) > 1:
147
+ eos_token_id = self.cfg.eos_token_id
148
+
149
+ slice_len = 128
150
+ token_ids = self.infer_manager.prefill(self.tokenizer, token_ids, prefill_data, slice_len=slice_len)
151
+ return self.infer_manager.decode(
152
+ self.tokenizer,
153
+ token_ids,
154
+ self.embeds,
155
+ slice_len=slice_len,
156
+ eos_token_id=eos_token_id,
157
+ stream=False,
158
+ )
159
+
160
+ def _stream_generate(self, prompt: str, vit_features: List[np.ndarray]):
161
+ """流式生成,逐 token 产出累积文本与计时信息 (TTFT 与平均 decode ms/token)。"""
162
+ # reset kv cache per request
163
+ for k_cache in self.infer_manager.k_caches:
164
+ k_cache.fill(0)
165
+ for v_cache in self.infer_manager.v_caches:
166
+ v_cache.fill(0)
167
+
168
+ token_ids = self.tokenizer.encode(prompt)
169
+ prefill_data = np.take(self.embeds, token_ids, axis=0).astype(bfloat16)
170
+ if vit_features:
171
+ prefill_data = self._insert_vision_features(token_ids, prefill_data, vit_features)
172
+
173
+ eos_token_id = None
174
+ if isinstance(self.cfg.eos_token_id, list) and len(self.cfg.eos_token_id) > 1:
175
+ eos_token_id = self.cfg.eos_token_id
176
+
177
+ slice_len = 128
178
+ t_start = time.time()
179
+ token_ids = self.infer_manager.prefill(self.tokenizer, token_ids, prefill_data, slice_len=slice_len)
180
+
181
+ # copy decode逻辑,实现手动流式输出
182
+ mask = np.zeros((1, 1, self.infer_manager.max_seq_len + 1), dtype=np.float32).astype(bfloat16)
183
+ mask[:, :, :self.infer_manager.max_seq_len] -= 65536
184
+ seq_len = len(token_ids) - 1
185
+ if slice_len > 0:
186
+ mask[:, :, :seq_len] = 0
187
+
188
+ ttft_ms: Optional[float] = None
189
+ decode_tokens = 0
190
+ decode_elapsed_ms: float = 0.0
191
+ generated_text = ""
192
+ yield generated_text, ttft_ms, None, None, False
193
+
194
+ for step_idx in range(self.infer_manager.max_seq_len):
195
+ if slice_len > 0 and step_idx < seq_len:
196
+ continue
197
+ cur_token = token_ids[step_idx]
198
+ indices = np.array([step_idx], np.uint32).reshape((1, 1))
199
+ data = self.embeds[cur_token, :].reshape((1, 1, self.cfg.hidden_size)).astype(bfloat16)
200
+ for layer_idx in range(self.cfg.num_hidden_layers):
201
+ input_feed = {
202
+ "K_cache": self.infer_manager.k_caches[layer_idx],
203
+ "V_cache": self.infer_manager.v_caches[layer_idx],
204
+ "indices": indices,
205
+ "input": data,
206
+ "mask": mask,
207
+ }
208
+ outputs = self.infer_manager.decoder_sessions[layer_idx].run(None, input_feed, shape_group=0)
209
+ self.infer_manager.k_caches[layer_idx][:, step_idx, :] = outputs[0][:, :, :]
210
+ self.infer_manager.v_caches[layer_idx][:, step_idx, :] = outputs[1][:, :, :]
211
+ data = outputs[2]
212
+ mask[..., step_idx] = 0
213
+ if step_idx < seq_len - 1:
214
+ continue
215
+ post_out = self.infer_manager.post_process_session.run(None, {"input": data})[0]
216
+ next_token, possible_tokens, possible_probs = self.infer_manager.post_process(post_out, temperature=0.7)
217
+ if eos_token_id is not None and next_token in eos_token_id:
218
+ ttft_ms = ttft_ms or (time.time() - t_start) * 1000
219
+ break
220
+ if next_token == self.tokenizer.eos_token_id:
221
+ ttft_ms = ttft_ms or (time.time() - t_start) * 1000
222
+ break
223
+
224
+ token_ids.append(next_token)
225
+ # 使用完整 token 列表解码,避免多字节 UTF-8 字符被截断显示为乱码
226
+ # 只解码新生成的 tokens(从 seq_len 开始)
227
+ generated_text = self.tokenizer.decode(token_ids[seq_len:], skip_special_tokens=True)
228
+
229
+ if ttft_ms is None:
230
+ ttft_ms = (time.time() - t_start) * 1000
231
+ else:
232
+ decode_tokens += 1
233
+ decode_elapsed_ms = (time.time() - t_start) * 1000 - ttft_ms
234
+
235
+ avg_decode = (decode_elapsed_ms / decode_tokens) if decode_tokens > 0 else None
236
+ yield generated_text, ttft_ms, avg_decode, decode_tokens, False
237
+
238
+ total_ms = (time.time() - t_start) * 1000
239
+ avg_decode = (decode_elapsed_ms / decode_tokens) if decode_tokens > 0 else None
240
+ yield generated_text, ttft_ms, avg_decode, decode_tokens, True
241
+
242
+ def chat(self, user_input: str, image: Optional[Image.Image]) -> Generator:
243
+ user_text = (user_input or "").strip()
244
+ if not user_text and image is None:
245
+ yield [], gr.update(), gr.update(), gr.update(), gr.update()
246
+ return
247
+
248
+ # 先展示占位,保持图片不清空;同时占位速度信息
249
+ yield [(user_text, "处理中…")], gr.update(value=""), gr.update(), gr.update(value="<div style='text-align: right; font-size: 13px; color: #6b7280; font-family: monospace;'>TTFT -- ms&nbsp;&nbsp;|&nbsp;&nbsp;Decode -- ms/token&nbsp;&nbsp;|&nbsp;&nbsp;Tokens --</div>"), gr.update(interactive=False)
250
+
251
+ vit_outputs = []
252
+ if image is not None:
253
+ pixel_values = load_image(image, input_size=448, max_num=1)
254
+ vit_output = self.vit_session.run(None, {"image": pixel_values.numpy()})[0]
255
+ vit_outputs.append(vit_output.copy())
256
+
257
+ prompt = self._build_single_turn_prompt(user_text, vit_outputs)
258
+
259
+ chatbot_history = [(user_text, "")] # 将在流式过程中填充
260
+ for partial, ttft_ms, avg_decode_ms, decode_tokens, finished in self._stream_generate(prompt, vit_outputs):
261
+ chatbot_history[-1] = (user_text, partial)
262
+ ttft_disp = f"{ttft_ms:.0f}" if ttft_ms is not None else "--"
263
+ decode_disp = f"{avg_decode_ms:.1f}" if avg_decode_ms is not None else "--"
264
+ tok_disp = f"{decode_tokens}" if decode_tokens is not None else "--"
265
+ metrics_text = f"<div style='text-align: right; font-size: 13px; color: #6b7280; font-family: monospace;'>TTFT {ttft_disp} ms&nbsp;&nbsp;|&nbsp;&nbsp;Decode {decode_disp} ms/token&nbsp;&nbsp;|&nbsp;&nbsp;Tokens {tok_disp}</div>"
266
+ if finished:
267
+ yield chatbot_history, gr.update(value=""), gr.update(), gr.update(value=metrics_text), gr.update(interactive=True)
268
+ else:
269
+ yield chatbot_history, gr.update(value=""), gr.update(), gr.update(value=metrics_text), gr.update(interactive=False)
270
+
271
+ @staticmethod
272
+ def build_ui(demo: "InternVLGradioDemo", server_name: str = "0.0.0.0", server_port: int = 7860, share: bool = False):
273
+ # 自定义 JavaScript: Enter 发送, Shift+Enter 换行
274
+ custom_js = """
275
+ function() {
276
+ // 等待 DOM 加载完成后绑定事件
277
+ setTimeout(() => {
278
+ const textareas = document.querySelectorAll('#user-input textarea');
279
+ textareas.forEach(textarea => {
280
+ // 移除可能存在的旧监听器
281
+ textarea.removeEventListener('keydown', textarea._customKeyHandler);
282
+
283
+ textarea._customKeyHandler = function(e) {
284
+ if (e.key === 'Enter') {
285
+ if (e.shiftKey) {
286
+ // Shift+Enter: 插入换行符
287
+ e.preventDefault();
288
+ const start = this.selectionStart;
289
+ const end = this.selectionEnd;
290
+ const value = this.value;
291
+ this.value = value.substring(0, start) + '\\n' + value.substring(end);
292
+ this.selectionStart = this.selectionEnd = start + 1;
293
+ // 触发 input 事件让 Gradio 感知变化
294
+ this.dispatchEvent(new Event('input', { bubbles: true }));
295
+ } else {
296
+ // Enter: 发送消息
297
+ e.preventDefault();
298
+ const sendBtn = document.querySelector('#send-btn');
299
+ if (sendBtn) {
300
+ sendBtn.click();
301
+ }
302
+ }
303
+ }
304
+ };
305
+ textarea.addEventListener('keydown', textarea._customKeyHandler);
306
+ });
307
+ }, 500);
308
+ }
309
+ """
310
+
311
+ with gr.Blocks(title="InternVL3-5-1B AX Gradio Demo", theme=gr.themes.Soft(), js=custom_js) as iface:
312
+ gr.HTML("""<style>
313
+ #image-pane img {object-fit: contain; max-height: 380px;}
314
+ #chat-wrap {position: relative;}
315
+ #metrics-display {position: absolute; right: 12px; bottom: 12px; z-index: 5; pointer-events: none; text-align: right;}
316
+ #metrics-display > div {display: inline-block;}
317
+ </style>""")
318
+ gr.Markdown("""### InternVL3-5-1B 图文对话演示\n上传一张图片 (可选),输入问题,获取中文回答。""")
319
+
320
+ with gr.Row():
321
+ # 左侧:对话框和输入区域
322
+ with gr.Column(scale=5):
323
+ with gr.Group(elem_id="chat-wrap"):
324
+ chatbot = gr.Chatbot(height=500, label="对话")
325
+ metrics_md = gr.Markdown("<div style='text-align: right; font-size: 13px; color: #6b7280; font-family: monospace;'>TTFT -- ms&nbsp;&nbsp;|&nbsp;&nbsp;Decode -- ms/token&nbsp;&nbsp;|&nbsp;&nbsp;Tokens --</div>", elem_id="metrics-display")
326
+
327
+ with gr.Row():
328
+ user_input = gr.Textbox(
329
+ placeholder="按 Enter 发送,Shift+Enter 换行",
330
+ lines=2,
331
+ scale=7,
332
+ max_lines=5,
333
+ show_label=False,
334
+ elem_id="user-input",
335
+ )
336
+ with gr.Column(scale=1, min_width=100):
337
+ send_btn = gr.Button("发送", variant="primary", size="sm", elem_id="send-btn")
338
+ clear_btn = gr.Button("清空对话", variant="secondary", size="sm")
339
+
340
+ # 右侧:图像上传和信息提示
341
+ with gr.Column(scale=3):
342
+ image_input = gr.Image(
343
+ type="pil",
344
+ label="上传图片 (可选)",
345
+ height=380,
346
+ image_mode="RGB",
347
+ show_download_button=False,
348
+ elem_id="image-pane",
349
+ )
350
+ gr.Markdown("""- 支持单张图像理解\n- 仅当前问题与回答,不保留历史\n- 处理时间取决于硬件,请耐心等待""")
351
+
352
+ def _clear():
353
+ return [], gr.update(value=""), gr.update(), gr.update(value="<div style='text-align: right; font-size: 13px; color: #6b7280; font-family: monospace;'>TTFT -- ms&nbsp;&nbsp;|&nbsp;&nbsp;Decode -- ms/token&nbsp;&nbsp;|&nbsp;&nbsp;Tokens --</div>"), gr.update(interactive=True)
354
+
355
+ send_btn.click(
356
+ fn=demo.chat,
357
+ inputs=[user_input, image_input],
358
+ outputs=[chatbot, user_input, image_input, metrics_md, send_btn],
359
+ show_progress=False,
360
+ queue=True,
361
+ )
362
+ # 移除 user_input.submit,由自定义 JS 处理 Enter 发送,Shift+Enter 换行
363
+ clear_btn.click(fn=_clear, inputs=None, outputs=[chatbot, user_input, image_input, metrics_md, send_btn])
364
+
365
+ iface.queue().launch(server_name=server_name, server_port=server_port, share=share)
366
+
367
+
368
+ def parse_args():
369
+ parser = argparse.ArgumentParser(description="InternVL3-5-1B AX gradio demo")
370
+ parser.add_argument("--hf_model", type=str, default="./InternVL3_5-1B",
371
+ help="HuggingFace 模型路径")
372
+ parser.add_argument("--axmodel_path", type=str, default="./InternVL3_5-1B_axmodel",
373
+ help="LLM axmodel 目录")
374
+ parser.add_argument("--vit_model", type=str, default="./vit-models/internvl_vit_model_1x3x448x448.axmodel",
375
+ help="ViT axmodel 路径")
376
+ parser.add_argument("--port", type=int, default=7860, help="Gradio 端口")
377
+ parser.add_argument("--host", type=str, default="0.0.0.0", help="Gradio 监听地址")
378
+ parser.add_argument("--share", action="store_true", help="启用 gradio share")
379
+ return parser.parse_args()
380
+
381
+
382
+ def main():
383
+ args = parse_args()
384
+ demo = InternVLGradioDemo(args.hf_model, args.axmodel_path, args.vit_model)
385
+ InternVLGradioDemo.build_ui(demo, server_name=args.host, server_port=args.port, share=args.share)
386
+
387
+
388
+ if __name__ == "__main__":
389
+ main()
infer_axmodel.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoProcessor, AutoModelForImageTextToText
2
+ import torch
3
+ import onnx
4
+ import onnxruntime as ort
5
+ import numpy as np
6
+ import os
7
+ from tqdm import tqdm
8
+ from transformers import AutoConfig, AutoTokenizer
9
+ from typing import List, Tuple
10
+ from axengine import InferenceSession
11
+ from ml_dtypes import bfloat16
12
+ from utils.infer_func import InferManager
13
+ import argparse
14
+ from PIL import Image
15
+ import torchvision.transforms as T
16
+ from torchvision.transforms.functional import InterpolationMode
17
+
18
+
19
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
20
+ IMAGENET_STD = (0.229, 0.224, 0.225)
21
+
22
+ def build_transform(input_size):
23
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
24
+ transform = T.Compose([
25
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
26
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
27
+ T.ToTensor(),
28
+ T.Normalize(mean=MEAN, std=STD)
29
+ ])
30
+ return transform
31
+
32
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
33
+ best_ratio_diff = float('inf')
34
+ best_ratio = (1, 1)
35
+ area = width * height
36
+ for ratio in target_ratios:
37
+ target_aspect_ratio = ratio[0] / ratio[1]
38
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
39
+ if ratio_diff < best_ratio_diff:
40
+ best_ratio_diff = ratio_diff
41
+ best_ratio = ratio
42
+ elif ratio_diff == best_ratio_diff:
43
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
44
+ best_ratio = ratio
45
+ return best_ratio
46
+
47
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
48
+ orig_width, orig_height = image.size
49
+ aspect_ratio = orig_width / orig_height
50
+
51
+ # calculate the existing image aspect ratio
52
+ target_ratios = set(
53
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
54
+ i * j <= max_num and i * j >= min_num)
55
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
56
+
57
+ # find the closest aspect ratio to the target
58
+ target_aspect_ratio = find_closest_aspect_ratio(
59
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
60
+
61
+ # calculate the target width and height
62
+ target_width = image_size * target_aspect_ratio[0]
63
+ target_height = image_size * target_aspect_ratio[1]
64
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
65
+
66
+ # resize the image
67
+ resized_img = image.resize((target_width, target_height))
68
+ processed_images = []
69
+ for i in range(blocks):
70
+ box = (
71
+ (i % (target_width // image_size)) * image_size,
72
+ (i // (target_width // image_size)) * image_size,
73
+ ((i % (target_width // image_size)) + 1) * image_size,
74
+ ((i // (target_width // image_size)) + 1) * image_size
75
+ )
76
+ # split the image
77
+ split_img = resized_img.crop(box)
78
+ processed_images.append(split_img)
79
+ assert len(processed_images) == blocks
80
+ if use_thumbnail and len(processed_images) != 1:
81
+ thumbnail_img = image.resize((image_size, image_size))
82
+ processed_images.append(thumbnail_img)
83
+ return processed_images
84
+
85
+ def load_image(image_file, input_size=448, max_num=12):
86
+ image = Image.open(image_file).convert('RGB')
87
+ transform = build_transform(input_size=input_size)
88
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
89
+ pixel_values = [transform(image) for image in images]
90
+ pixel_values = torch.stack(pixel_values)
91
+ return pixel_values
92
+
93
+ if __name__ == "__main__":
94
+
95
+ """
96
+ python3 infer_axmodel.py --vit_model vit-models/internvl_vit_model_1x3x448x448.axmodel --images examples/image_0.jpg
97
+ """
98
+ prompt = None
99
+ parser = argparse.ArgumentParser(description="Model configuration parameters")
100
+ parser.add_argument("--hf_model", type=str, default="./InternVL3_5-1B",
101
+ help="Path to HuggingFace model")
102
+ parser.add_argument("--axmodel_path", type=str, default="./InternVL3_5-1B_axmodel",
103
+ help="Path to save compiled axmodel of llama model")
104
+ parser.add_argument("--vit_model", type=str, default=None, help="Path to save compiled axmodel of llama model")
105
+ parser.add_argument("-i", "--images", nargs='+', type=str, default=None,
106
+ help="Path to the test image.")
107
+ parser.add_argument("-q", "--question", type=str, default="请你描述这幅图的内容.",
108
+ help="Your question that you want to ask the model.")
109
+ args = parser.parse_args()
110
+
111
+ hf_model_path = args.hf_model
112
+ axmodel_path = args.axmodel_path
113
+ images = args.images
114
+ prompt = args.question
115
+
116
+ device = "cuda" if torch.cuda.is_available() else "cpu"
117
+ embeds = np.load(os.path.join(axmodel_path, "model.embed_tokens.weight.npy"))
118
+
119
+ # load the tokenizer and the model
120
+ tokenizer = AutoTokenizer.from_pretrained(hf_model_path)
121
+ config = AutoConfig.from_pretrained(hf_model_path, trust_remote_code=True)
122
+
123
+ # model = AutoModelForCausalLM.from_pretrained(
124
+ # hf_model_path,
125
+ # ).to(device)
126
+
127
+ test_imgs_path = args.images
128
+ vit_axmodel_path = args.vit_model
129
+
130
+ # set the max number of tiles in `max_num`
131
+ pixel_values_list = []
132
+ if test_imgs_path is not None:
133
+ for img_path in test_imgs_path:
134
+ pixel_values = load_image(img_path, input_size=448, max_num=1)
135
+ pixel_values_list.append(pixel_values)
136
+ print(f"输入图像数: {len(pixel_values_list)}")
137
+ print("preprocess image done!")
138
+
139
+ # extract img feature by vit
140
+ vit_session = InferenceSession(vit_axmodel_path)
141
+ vit_output_list = []
142
+ for idx, pixel_values in enumerate(pixel_values_list):
143
+ vit_output = vit_session.run(None, {"image": pixel_values.numpy()})[0]
144
+ vit_output_list.append(vit_output.copy()) # 避免 vit 输出结果使用同一块内存
145
+
146
+ print(f"vit_output.shape is {vit_output_list[0].shape}, vit feature extract done!")
147
+
148
+ prompt = "<|im_start|>system\n你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型, 英文名叫 InternVL3, 是一个有用无害的人工智能助手, 擅长思考和回答用户的问题. 请你在回答问题时使用简体中文.<|im_end|>\n"
149
+ question = args.question
150
+ prompt += "<|im_start|>user\n" + question
151
+
152
+ if len(pixel_values_list) > 0:
153
+ for idx in range(len(pixel_values_list)):
154
+ prompt += "\n<img>" + "<IMG_CONTEXT>" * 256 + "</img>\n"
155
+ prompt += "<|im_end|>\n<|im_start|>assistant\n"
156
+ print(f"prompt is {prompt}")
157
+ token_ids = tokenizer.encode(prompt)
158
+ # 图像理解
159
+ image_start_indices = np.where(np.array(token_ids) == 151669)[0].tolist() # <img> tag 151669, 151665
160
+ prefill_data = np.take(embeds, token_ids, axis=0)
161
+ prefill_data = prefill_data.astype(bfloat16)
162
+ token_len = len(token_ids)
163
+
164
+ for idx, image_start_index in enumerate(image_start_indices):
165
+ image_insert_index = image_start_index + 1
166
+ prefill_data[image_insert_index : image_insert_index + 256] = vit_output_list[idx][0, :, :]
167
+ ##################################
168
+
169
+ cfg = config.llm_config
170
+
171
+ eos_token_id = None
172
+ if isinstance(cfg.eos_token_id, list) and len(cfg.eos_token_id) > 1:
173
+ eos_token_id = cfg.eos_token_id
174
+
175
+ slice_len = 128
176
+ prefill_max_len = 1024 - 1
177
+ max_seq_len = 2048 - 1 # prefill + decode max length
178
+
179
+ imer = InferManager(cfg, axmodel_path, max_seq_len=max_seq_len) # prefill + decode max length
180
+ # import pdb; pdb.set_trace()
181
+ token_ids = imer.prefill(tokenizer, token_ids, prefill_data, slice_len=slice_len)
182
+ imer.decode(tokenizer, token_ids, embeds, slice_len=slice_len, eos_token_id=eos_token_id)
183
+ print("\n")
infer_torch.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ import torchvision.transforms as T
5
+ from decord import VideoReader, cpu
6
+ from PIL import Image
7
+ from torchvision.transforms.functional import InterpolationMode
8
+ from transformers import AutoModel, AutoTokenizer
9
+
10
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
11
+ IMAGENET_STD = (0.229, 0.224, 0.225)
12
+
13
+ def build_transform(input_size):
14
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
15
+ transform = T.Compose([
16
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
17
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
18
+ T.ToTensor(),
19
+ T.Normalize(mean=MEAN, std=STD)
20
+ ])
21
+ return transform
22
+
23
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
24
+ best_ratio_diff = float('inf')
25
+ best_ratio = (1, 1)
26
+ area = width * height
27
+ for ratio in target_ratios:
28
+ target_aspect_ratio = ratio[0] / ratio[1]
29
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
30
+ if ratio_diff < best_ratio_diff:
31
+ best_ratio_diff = ratio_diff
32
+ best_ratio = ratio
33
+ elif ratio_diff == best_ratio_diff:
34
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
35
+ best_ratio = ratio
36
+ return best_ratio
37
+
38
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
39
+ orig_width, orig_height = image.size
40
+ aspect_ratio = orig_width / orig_height
41
+
42
+ # calculate the existing image aspect ratio
43
+ target_ratios = set(
44
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
45
+ i * j <= max_num and i * j >= min_num)
46
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
47
+
48
+ # find the closest aspect ratio to the target
49
+ target_aspect_ratio = find_closest_aspect_ratio(
50
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
51
+
52
+ # calculate the target width and height
53
+ target_width = image_size * target_aspect_ratio[0]
54
+ target_height = image_size * target_aspect_ratio[1]
55
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
56
+
57
+ # resize the image
58
+ resized_img = image.resize((target_width, target_height))
59
+ processed_images = []
60
+ for i in range(blocks):
61
+ box = (
62
+ (i % (target_width // image_size)) * image_size,
63
+ (i // (target_width // image_size)) * image_size,
64
+ ((i % (target_width // image_size)) + 1) * image_size,
65
+ ((i // (target_width // image_size)) + 1) * image_size
66
+ )
67
+ # split the image
68
+ split_img = resized_img.crop(box)
69
+ processed_images.append(split_img)
70
+ assert len(processed_images) == blocks
71
+ if use_thumbnail and len(processed_images) != 1:
72
+ thumbnail_img = image.resize((image_size, image_size))
73
+ processed_images.append(thumbnail_img)
74
+ return processed_images
75
+
76
+ def load_image(image_file, input_size=448, max_num=12):
77
+ image = Image.open(image_file).convert('RGB')
78
+ transform = build_transform(input_size=input_size)
79
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
80
+ pixel_values = [transform(image) for image in images]
81
+ pixel_values = torch.stack(pixel_values)
82
+ return pixel_values
83
+
84
+ path = './InternVL3_5-1B'
85
+ model = AutoModel.from_pretrained(
86
+ path,
87
+ torch_dtype=torch.bfloat16,
88
+ load_in_8bit=False,
89
+ low_cpu_mem_usage=True,
90
+ use_flash_attn=True,
91
+ trust_remote_code=True,
92
+ device_map="auto").eval()
93
+ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
94
+
95
+ # set the max number of tiles in `max_num`
96
+ pixel_values = load_image('./examples/image_1.jpg', input_size=448, max_num=1).to(torch.bfloat16).cuda()
97
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
98
+
99
+ # pure-text conversation (纯文本对话)
100
+ question = '中国的首都'
101
+ response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
102
+ print(f'User: {question}\nAssistant: {response}')
103
+
104
+
105
+ # single-image single-round conversation (单图单轮对话)
106
+ question = '<image>\n请你描述这幅图的内容.'
107
+ response = model.chat(tokenizer, pixel_values, question, generation_config)
108
+ print(f'User: {question}\nAssistant: {response}')
109
+
110
+ # # single-image multi-round conversation (单图多轮对话)
111
+ # question = '<image>\nPlease describe the image in detail.'
112
+ # response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
113
+ # print(f'User: {question}\nAssistant: {response}')
114
+
115
+ # question = 'Please write a poem according to the image.'
116
+ # response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
117
+ # print(f'User: {question}\nAssistant: {response}')
118
+
119
+ # # multi-image multi-round conversation, combined images (多图多轮对话,拼接图像)
120
+ # pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
121
+ # pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
122
+ # pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
123
+
124
+ # question = '<image>\nDescribe the two images in detail.'
125
+ # response, history = model.chat(tokenizer, pixel_values, question, generation_config,
126
+ # history=None, return_history=True)
127
+ # print(f'User: {question}\nAssistant: {response}')
128
+
129
+ # question = 'What are the similarities and differences between these two images.'
130
+ # response, history = model.chat(tokenizer, pixel_values, question, generation_config,
131
+ # history=history, return_history=True)
132
+ # print(f'User: {question}\nAssistant: {response}')
133
+
134
+ # # multi-image multi-round conversation, separate images (多图多轮对话,独立图像)
135
+ # pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
136
+ # pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
137
+ # pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
138
+ # num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
139
+
140
+ # question = 'Image-1: <image>\nImage-2: <image>\nDescribe the two images in detail.'
141
+ # response, history = model.chat(tokenizer, pixel_values, question, generation_config,
142
+ # num_patches_list=num_patches_list,
143
+ # history=None, return_history=True)
144
+ # print(f'User: {question}\nAssistant: {response}')
145
+
146
+ # question = 'What are the similarities and differences between these two images.'
147
+ # response, history = model.chat(tokenizer, pixel_values, question, generation_config,
148
+ # num_patches_list=num_patches_list,
149
+ # history=history, return_history=True)
150
+ # print(f'User: {question}\nAssistant: {response}')
151
+
152
+ # # batch inference, single image per sample (单图批处理)
153
+ # pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
154
+ # pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
155
+ # num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
156
+ # pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
157
+
158
+ # questions = ['<image>\nDescribe the image in detail.'] * len(num_patches_list)
159
+ # responses = model.batch_chat(tokenizer, pixel_values,
160
+ # num_patches_list=num_patches_list,
161
+ # questions=questions,
162
+ # generation_config=generation_config)
163
+ # for question, response in zip(questions, responses):
164
+ # print(f'User: {question}\nAssistant: {response}')
165
+
166
+ # # video multi-round conversation (视频多轮对话)
167
+ # def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
168
+ # if bound:
169
+ # start, end = bound[0], bound[1]
170
+ # else:
171
+ # start, end = -100000, 100000
172
+ # start_idx = max(first_idx, round(start * fps))
173
+ # end_idx = min(round(end * fps), max_frame)
174
+ # seg_size = float(end_idx - start_idx) / num_segments
175
+ # frame_indices = np.array([
176
+ # int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
177
+ # for idx in range(num_segments)
178
+ # ])
179
+ # return frame_indices
180
+
181
+ # def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
182
+ # vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
183
+ # max_frame = len(vr) - 1
184
+ # fps = float(vr.get_avg_fps())
185
+
186
+ # pixel_values_list, num_patches_list = [], []
187
+ # transform = build_transform(input_size=input_size)
188
+ # frame_indices = get_index(bound, fps, max_frame, first_idx=0, num_segments=num_segments)
189
+ # for frame_index in frame_indices:
190
+ # img = Image.fromarray(vr[frame_index].asnumpy()).convert('RGB')
191
+ # img = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
192
+ # pixel_values = [transform(tile) for tile in img]
193
+ # pixel_values = torch.stack(pixel_values)
194
+ # num_patches_list.append(pixel_values.shape[0])
195
+ # pixel_values_list.append(pixel_values)
196
+ # pixel_values = torch.cat(pixel_values_list)
197
+ # return pixel_values, num_patches_list
198
+
199
+ # video_path = './examples/red-panda.mp4'
200
+ # pixel_values, num_patches_list = load_video(video_path, num_segments=8, max_num=1)
201
+ # pixel_values = pixel_values.to(torch.bfloat16).cuda()
202
+ # video_prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))])
203
+ # question = video_prefix + 'What is the red panda doing?'
204
+ # # Frame1: <image>\nFrame2: <image>\n...\nFrame8: <image>\n{question}
205
+ # response, history = model.chat(tokenizer, pixel_values, question, generation_config,
206
+ # num_patches_list=num_patches_list, history=None, return_history=True)
207
+ # print(f'User: {question}\nAssistant: {response}')
208
+
209
+ # question = 'Describe this video in detail.'
210
+ # response, history = model.chat(tokenizer, pixel_values, question, generation_config,
211
+ # num_patches_list=num_patches_list, history=history, return_history=True)
212
+ # print(f'User: {question}\nAssistant: {response}')
internvl3-5_axmodel/model.embed_tokens.weight.bfloat16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9458f8d6cac0fb032e0d9aa5e237148a2ee2e6f9c98ebf7584b065e174127009
3
+ size 311164928
internvl3-5_axmodel/model.embed_tokens.weight.float32.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69f2b742ddc9d789d16f46073fbe8fdfce77222958071fabf4ff708af07bf23
3
+ size 622329856
internvl3-5_axmodel/model.embed_tokens.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e826d74d8269b0085d3fdd397e7fa6303ec26719c76f4d5b1d029fab63fcf50
3
+ size 622329984
internvl3-5_axmodel/qwen3_p128_l0_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f12d7e947b42e933d6c3119e08d6be0ecb703df7746839cb91a0452d12c490f0
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l10_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:452cc345d0b182b95f3e961ac9faa1767563f8324fc3d3ed3372f5b63851059b
3
+ size 21569348
internvl3-5_axmodel/qwen3_p128_l11_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9f031d1f7e7713d4f0aee4a5fc1a3beffd4593eae922e81946537eca946b04f
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l12_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3295106128b7d41ae737274abc58a9b9e49fc1d56df904eb8b44d08b2a13462b
3
+ size 21569220
internvl3-5_axmodel/qwen3_p128_l13_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b192de65f95a19271ddcf91eca383c782ecd23fb6be2fdb47369ec76d480e21
3
+ size 21569156
internvl3-5_axmodel/qwen3_p128_l14_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cd05f0f6d2b2f6b07c74cece9dfd09a86a5e2a0d1b941b6a01baf31a19edc53
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l15_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:205d2b186e641bd8efb5cbcba1422d7f47384fc2e308ea791f79e0cffb002d48
3
+ size 21568836
internvl3-5_axmodel/qwen3_p128_l16_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ab96615046ebf359d86fbab9c45af036fb79bcd8e4bbf2d9cab63fef8b0ef47
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l17_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01b2569ba99a274fee59ef56ea4ba0e4109971d196240b254a68e4e0f0808ac2
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l18_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6eb521e5c3e03f3b96b6370362234e741910cf82c038984081e9ab95a101c31d
3
+ size 21568900
internvl3-5_axmodel/qwen3_p128_l19_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:821a3937c506eaa268440ee116961ef2ceb17b5362661d3c9f41997a7aacf54f
3
+ size 21568740
internvl3-5_axmodel/qwen3_p128_l1_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ba82686a72bb26759fbf96c7544277d5547ec23634f4128579bd53464072432
3
+ size 21568932
internvl3-5_axmodel/qwen3_p128_l20_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e70045401bd0c6748558ec0374d11cb381644109b687c0358fb8cec5e88de1c
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l21_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35ecb18c9bd8d79fe019df779b5f7212f0af1b742adbd69b9968f9e93ec83a83
3
+ size 21568772
internvl3-5_axmodel/qwen3_p128_l22_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5037445038d24a9a44c6defc22638f53ef9875ca62e9ea836077d1518c28d8d
3
+ size 21568964
internvl3-5_axmodel/qwen3_p128_l23_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:510b76060ef414a7100989e631d6d8336ebf5573467eceb9cf5ad0641df15119
3
+ size 21568644
internvl3-5_axmodel/qwen3_p128_l24_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87b6fed1cb82fee89097d326ddfd414e41f0d9bdc27087b630dc678235085a4c
3
+ size 21569508
internvl3-5_axmodel/qwen3_p128_l25_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86f1c7b43867ca7c38b942d78b81eed377e61123c3ec1b954bc309c71793f4aa
3
+ size 21568964
internvl3-5_axmodel/qwen3_p128_l26_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7726bb67ea16ba081e73b9982c464cbd7317de09b7c1e34c58072b7a55a1cb8
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l27_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa0888376b5c59ee87687961078bce26e9dc016a8207838659dabbaa400ce4f3
3
+ size 21570244
internvl3-5_axmodel/qwen3_p128_l2_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59ed31ea8c87f7ea9d4e31b4911ad4be22d3b917b9ce4a8723ea11eaf53f2aca
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l3_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:681988b5ae14445af3219f465b53dea50590bd755b91aeaee83646401b08d058
3
+ size 21568708
internvl3-5_axmodel/qwen3_p128_l4_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10e4d21c3ee0a63b2e3d4728a784885611105c796bc400d0f4403cc68e5113df
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l5_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f15c93c5e243ff7a55e96dc1dd38a47747164c1ef4ef7a9ba3acc6846fe93e3c
3
+ size 21568996
internvl3-5_axmodel/qwen3_p128_l6_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88dddad7925e2c455cb8ee7ed23de8743efd51378aa4a16fae11049649841981
3
+ size 21568292
internvl3-5_axmodel/qwen3_p128_l7_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2fb8302f98ec47f75e0d0608af30df1bdaebdad870d3412ea4cdf4f9a9a0632
3
+ size 21569988
internvl3-5_axmodel/qwen3_p128_l8_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ff0ddf70c1cefe35bae6e06dbcda6e1714a3077255085db50a7184314baccdc
3
+ size 21569060
internvl3-5_axmodel/qwen3_p128_l9_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2df43e2bf7490043031395eedfd7c099b420ed4bd752fe2d8ef7b3c60489f952
3
+ size 21568612
internvl3-5_axmodel/qwen3_post.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0475e343fc60202f8ae651fd3a4ff574e31a61563bca4840aacd3f59539307e
3
+ size 170001027
internvl3-5_tokenizer/.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
internvl3-5_tokenizer/README.md ADDED
@@ -0,0 +1,830 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: image-text-to-text
4
+ library_name: transformers
5
+ base_model:
6
+ - OpenGVLab/InternVL3_5-1B-MPO
7
+ base_model_relation: finetune
8
+ datasets:
9
+ - OpenGVLab/MMPR-v1.2
10
+ language:
11
+ - multilingual
12
+ tags:
13
+ - internvl
14
+ - custom_code
15
+ ---
16
+
17
+ # InternVL3_5-1B
18
+
19
+ [\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL) [\[📜 InternVL 1.0\]](https://huggingface.co/papers/2312.14238) [\[📜 InternVL 1.5\]](https://huggingface.co/papers/2404.16821) [\[📜 InternVL 2.5\]](https://huggingface.co/papers/2412.05271) [\[📜 InternVL2.5-MPO\]](https://huggingface.co/papers/2411.10442) [\[📜 InternVL3\]](https://huggingface.co/papers/2504.10479) [\[📜 InternVL3.5\]](https://huggingface.co/papers/2508.18265)
20
+
21
+ [\[🆕 Blog\]](https://internvl.github.io/blog/) [\[🗨️ Chat Demo\]](https://chat.intern-ai.org.cn/) [\[🚀 Quick Start\]](#quick-start) [\[📖 Documents\]](https://internvl.readthedocs.io/en/latest/)
22
+
23
+ <div align="center">
24
+ <img width="500" alt="image" src="https://cdn-uploads.huggingface.co/production/uploads/64006c09330a45b03605bba3/zJsd2hqd3EevgXo6fNgC-.png">
25
+ </div>
26
+
27
+ ## Introduction
28
+
29
+ We introduce *InternVL3.5*, a new family of open-source multimodal models that significantly advances versatility, reasoning capability, and inference efficiency along the InternVL series. A key innovation is the *Cascade Reinforcement Learning (Cascade RL)* framework, which enhances reasoning through a two-stage process: offline RL for stable convergence and online RL for refined alignment. This coarse-to-fine training strategy leads to substantial improvements on downstream reasoning tasks, e.g., MMMU and MathVista. To optimize efficiency, we propose a *Visual Resolution Router (ViR)* that dynamically adjusts the resolution of visual tokens without compromising performance. Coupled with ViR, our Decoupled *Vision-Language Deployment (DvD)* strategy separates the vision encoder and language model across different GPUs, effectively balancing computational load. These contributions collectively enable InternVL3.5 to achieve up to a +16.0\% gain in overall reasoning performance and a 4.05 \\(\times\\) inference speedup compared to its predecessor, i.e., InternVL3. In addition, InternVL3.5 supports novel capabilities such as GUI interaction and embodied agency. Notably, our largest model, i.e., InternVL3.5-241B-A28B, attains state-of-the-art results among open-source MLLMs across general multimodal, reasoning, text, and agentic tasks—narrowing the performance gap with leading commercial models like GPT-5. All models and code are publicly released.
30
+
31
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance.jpg)
32
+
33
+ > Hatched bars represent closed-source commercial models. We report average scores on a set of multimodal general, reasoning, text, and agentic benchmarks: MMBench v1.1 (en), MMStar,BLINK, HallusionBench, AI2D, OCRBench, MMVet, MME-RealWorld (en), MVBench, VideoMME, MMMU, MathVista, MathVision, MathVerse, DynaMath, WeMath, LogicVista, MATH500, AIME24, AIME25, GPQA, MMLU-Pro, GAOKAO, IFEval, SGP-Bench, VSI-Bench, ERQA, SpaCE-10, and OmniSpatial.
34
+
35
+ See [quick start](#quick-start) for how to use our model.
36
+
37
+ ## InternVL3.5 Family
38
+
39
+ In the following table, we provide an overview of the InternVL3.5 series.
40
+ To maintain consistency with earlier generations, we provide two model formats: [the GitHub format](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B), consistent with prior releases, and [the HF format](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B-HF), aligned with the official Transformers standard.
41
+
42
+ > If you want to convert the checkpoint between these two formats, please refer to the scripts about [custom2hf](https://github.com/OpenGVLab/InternVL/blob/main/internvl_chat/tools/internvl_custom2hf.py) and [hf2custom](https://github.com/OpenGVLab/InternVL/blob/main/internvl_chat/tools/internvl_hf2custom.py).
43
+
44
+
45
+ | Model | #Vision Param | #Language Param | #Total Param | HF Link | ModelScope Link |
46
+ | --------------------- | ------------- | --------------- | ------------ | ------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------- |
47
+ | InternVL3.5-1B | 0.3B | 0.8B | 1.1B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-1B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-1B) |
48
+ | InternVL3.5-2B | 0.3B | 2.0B | 2.3B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-2B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-2B) |
49
+ | InternVL3.5-4B | 0.3B | 4.4B | 4.7B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-4B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-4B) |
50
+ | InternVL3.5-8B | 0.3B | 8.2B | 8.5B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-8B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-8B) |
51
+ | InternVL3.5-14B | 0.3B | 14.8B | 15.1B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-14B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-14B) |
52
+ | InternVL3.5-38B | 5.5B | 32.8B | 38.4B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-38B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-38B) |
53
+ | InternVL3.5-20B-A4B | 0.3B | 20.9B | 21.2B-A4B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-GPT-OSS-20B-A4B-Preview) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-GPT-OSS-20B-A4B-Preview) |
54
+ | InternVL3.5-30B-A3B | 0.3B | 30.5B | 30.8B-A3B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-30B-A3B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-30B-A3B) |
55
+ | InternVL3.5-241B-A28B | 5.5B | 235.1B | 240.7B-A29B | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-241B-A28B) |
56
+
57
+
58
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_overall.jpg)
59
+
60
+ > We conduct the evaluation with [VLMEvalkit](https://github.com/open-compass/VLMEvalKit). ***To enable the Thinking mode of our model, please set the system prompt to [R1_SYSTEM_PROMPT](https://github.com/open-compass/VLMEvalKit/blob/main/vlmeval/vlm/internvl/internvl_chat.py#L38).*** When enabling Thinking mode, we recommend setting `do_sample=True` and `temperature=0.6` to mitigate undesired repetition.
61
+
62
+ Our training pipeline comprises four stages: Multimodal Continual Pre-Training (**CPT**), Supervised Fine-Tuning (**SFT**), and Cascade Reinforcement Learning (**CascadeRL**). In CascadeRL, we first fine-tune the model using Mixed Preference Optimization (**MPO**) under an offline RL setting, followed by **GSPO** under an oneline RL setting.
63
+ For the Flash version of InternVL3.5, we additionally introduce a lightweight training stage, termed Visual Consistency Learning (**ViCO**), which reduces the token cost required to represent an image patch.
64
+
65
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/training_pipeline.jpg)
66
+
67
+ Here, we also open-source the model weights after different training stages for potential research usage.
68
+ ***If you're unsure which version to use, please select the one without any suffix, as it has completed the full training pipeline.***
69
+
70
+
71
+ | Model | Training Pipeline | HF Link | ModelScope Link |
72
+ | -------------------------------- | --------------------- | --------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- |
73
+ | InternVL3.5-1B-Pretrained | CPT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-1B-Pretrained) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-1B-Pretrained) |
74
+ | InternVL3.5-1B-Instruct | CPT + SFT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-1B-Instruct) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-1B-Instruct) |
75
+ | InternVL3.5-1B-MPO | CPT + SFT + MPO | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-1B-MPO) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-1B-MPO) |
76
+ | InternVL3.5-1B | CPT + SFT + CascadeRL | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-1B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-1B) |
77
+ | InternVL3.5-2B-Pretrained | CPT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-2B-Pretrained) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-2B-Pretrained) |
78
+ | InternVL3.5-2B-Instruct | CPT + SFT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-2B-Instruct) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-2B-Instruct) |
79
+ | InternVL3.5-2B-MPO | CPT + SFT + MPO | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-2B-MPO) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-2B-MPO) |
80
+ | InternVL3.5-2B | CPT + SFT + CascadeRL | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-2B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-2B) |
81
+ | InternVL3.5-4B-Pretrained | CPT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-4B-Pretrained) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-4B-Pretrained) |
82
+ | InternVL3.5-4B-Instruct | CPT + SFT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-4B-Instruct) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-4B-Instruct) |
83
+ | InternVL3.5-4B-MPO | CPT + SFT + MPO | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-4B-MPO) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-4B-MPO) |
84
+ | InternVL3.5-4B | CPT + SFT + CascadeRL | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-4B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-4B) |
85
+ | InternVL3.5-8B-Pretrained | CPT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-8B-Pretrained) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-8B-Pretrained) |
86
+ | InternVL3.5-8B-Instruct | CPT + SFT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-8B-Instruct) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-8B-Instruct) |
87
+ | InternVL3.5-8B-MPO | CPT + SFT + MPO | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-8B-MPO) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-8B-MPO) |
88
+ | InternVL3.5-8B | CPT + SFT + CascadeRL | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-8B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-8B) |
89
+ | InternVL3.5-14B-Pretrained | CPT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-14B-Pretrained) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-14B-Pretrained) |
90
+ | InternVL3.5-14B-Instruct | CPT + SFT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-14B-Instruct) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-14B-Instruct) |
91
+ | InternVL3.5-14B-MPO | CPT + SFT + MPO | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-14B-MPO) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-14B-MPO) |
92
+ | InternVL3.5-14B | CPT + SFT + CascadeRL | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-14B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-14B) |
93
+ | InternVL3.5-30B-A3B-Pretrained | CPT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-30B-A3B-Pretrained) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-30B-A3B-Pretrained) |
94
+ | InternVL3.5-30B-A3B-Instruct | CPT + SFT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-30B-A3B-Instruct) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-30B-A3B-Instruct) |
95
+ | InternVL3.5-30B-A3B-MPO | CPT + SFT + MPO | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-30B-A3B-MPO) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-30B-A3B-MPO) |
96
+ | InternVL3.5-30B-A3B | CPT + SFT + CascadeRL | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-30B-A3B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-30B-A3B) |
97
+ | InternVL3.5-38B-Pretrained | CPT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-38B-Pretrained) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-38B-Pretrained) |
98
+ | InternVL3.5-38B-Instruct | CPT + SFT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-38B-Instruct) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-38B-Instruct) |
99
+ | InternVL3.5-38B-MPO | CPT + SFT + MPO | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-38B-MPO) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-38B-MPO) |
100
+ | InternVL3.5-38B | CPT + SFT + CascadeRL | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-38B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-38B) |
101
+ | InternVL3.5-241B-A28B-Pretrained | CPT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B-Pretrained) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-241B-A28B-Pretrained) |
102
+ | InternVL3.5-241B-A28B-Instruct | CPT + SFT | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B-Instruct) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-241B-A28B-Instruct) |
103
+ | InternVL3.5-241B-A28B-MPO | CPT + SFT + MPO | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B-MPO) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-241B-A28B-MPO) |
104
+ | InternVL3.5-241B-A28B | CPT + SFT + CascadeRL | [🤗 link](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B) | [🤖 link](https://www.modelscope.cn/models/OpenGVLab/InternVL3_5-241B-A28B) |
105
+
106
+
107
+ The Flash version of our model will be released as soon as possible.
108
+
109
+
110
+
111
+ ## Model Architecture
112
+
113
+ `InternVL3.5`:
114
+ This series of models follow the "ViT–MLP–LLM" paradigm adopted in previous versions of InternVL.
115
+ We initialize the language model using the Qwen3 series and GPT-OSS, and the vision encoder using InternViT-300M and InternViT-6B.
116
+ The Dynamic High Resolution strategy introduced in InternVL1.5 is also retained in our design.
117
+
118
+
119
+ `InternVL3.5-Flash`:
120
+ Compared to InternVL3.5, InternVL3.5-Flash further integrates the *Visual Resolution Router (ViR)*, thus yielding a series of efficient variants friendly suitable for resource-constrained scenarios.
121
+ Specifically, in InternVL3.5, each image patch is initially represented as 1024 visual tokens for the vision encoder, which are then compressed into 256 tokens via a pixel shuffle module before being passed to the Large Language Model (LLM).
122
+ In InternVL3.5-Flash, as shown in the Figure below, an additional pixel shuffle module with a higher compression rate is included, enabling the compression of visual tokens down to 64 tokens.
123
+ For each patch, the patch router determines the appropriate compression rate by assessing its semantic richness, and routes it to the corresponding pixel shuffle module accordingly.
124
+ Benefiting from this patch-aware compression mechanism, InternVL3.5-Flash is able to reduce the number of visual tokens by 50\% while maintaining nearly 100\% of the performance of InternVL3.5.
125
+
126
+
127
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/architecture.jpg)
128
+
129
+ ## Training and Deployment Strategy
130
+
131
+ ### Pre-Training
132
+
133
+ During the pre-training stage, we update all model parameters jointly using the combination of large-scale text and multimodal corpora. Specifically, given an arbitrary training sample consisting of a multimodal token sequence \\(\mathbf{x}=\left(x_1, x_2, \ldots, x_L\right)\\), the next token prediction (NTP) loss is calculated on each text token as follows:
134
+
135
+ $$
136
+ \mathcal{L}_{i}=-\log p_\theta\left(x_i \mid x_1, \ldots, x_{i-1}\right),
137
+ $$
138
+
139
+ where \\(x_i\\) is the predicted token and prefix tokens in \\(\{x_1, x_2, \ldots, x_{i-1}\}\\) can be either text tokens or image tokens. Notably, for conversation samples, only response tokens are included for the calculation of the loss.
140
+ Additionally, to mitigate bias toward either longer or shorter responses during training, we adopt the square averaging to re-weight the NTP loss as follows:
141
+
142
+ $$
143
+ \mathcal{L}_{i}^{'} = \frac{w_i}{\sum_j w_j} \cdot \mathcal{L}_i, \quad w_i = \frac{1}{N^{0.5}},
144
+ $$
145
+
146
+ where \\(N\\) denotes the number of tokens in the training sample on which the loss needs to be calculated. The random JPEG compression is also included to enhance the model's real-world performance.
147
+
148
+ ### Supervised Fine-Tuning
149
+
150
+ During the SFT phase, we adopt the same objective as in the pre-training stage and use the square-root averaging strategy to calculate the final loss. In this stage, the context window is set to 32K tokens to adapt long-context information.
151
+ Compared to InternVL3, the SFT stage of InternVL3.5 contains more high-quality and diverse training data derived from three sources:
152
+
153
+ (1) Instruction-following data from InternVL3, which are reused to preserve broad coverage of vision–language tasks.
154
+
155
+ (2) Multimodal reasoning data in the "Thinking" mode, which are included to instill long-thinking capabilities in the model. To construct such data, we first use InternVL3-78B to describe the image and then input the description into DeepSeek-R1 to sample rollouts with detailed reasoning processes. Rollouts with an incorrect final answer are filtered out. The questions in these datasets cover various expert domains, such as mathematics and scientific disciplines, thereby strengthening performance on different reasoning tasks.
156
+
157
+ (3) Capability-expansion datasets, which endow InternVL3.5 with new skills, including GUI-based interaction, embodied interaction, and scalable vect
158
+
159
+ ### Cascade Reinforcement Learning
160
+
161
+ Cascade RL aims to combine the benefits of offline RL and online RL to progressively facilitate the post-training of MLLMs in an efficient manner.
162
+ Specifically, we first fine-tune the model using an offline RL algorithm as an efficient warm-up stage to reach a satisfied results, which can guarantee the high-quality rollouts for the latter stage.
163
+ Subsequently, we employ an online RL algorithm to further refine the output distribution based on rollouts generated by the model itself. Compared to the single offline or online RL stage, our cascaded RL achieves significant performance improvements at a fraction of the GPU time cost.
164
+
165
+
166
+
167
+ During the offline RL stage, we employ mixed preference optimization (MPO) to fine-tune the model. Specifically, the training objective of MPO is a combination of preference loss \\(\mathcal{L}_{p}\\), quality loss \\(\mathcal{L}_{q}\\), and generation loss \\(\mathcal{L}_{g}\\), which can be formulated as follows:
168
+
169
+ $$
170
+ \mathcal{L}_{\text{MPO}}=
171
+ w_{p} \mathcal{L}_{p}
172
+ +
173
+ w_{q} \mathcal{L}_{q}
174
+ +
175
+ w_{g} \mathcal{L}_{g}
176
+ ,
177
+ $$
178
+
179
+ where \\(w_{*}\\) represents the weight assigned to each loss component.
180
+ The DPO loss, BCO loss, and LM loss serve as the preference loss, quality loss, and generation loss, respectively.
181
+
182
+
183
+ During the online RL stage, we employ GSPO, without reference model constraints, as our online RL algorithm, which we find more effective in training both dense and mixture-of-experts (MoE) models. Similar to GRPO, the advantage is defined as the normalized reward across responses sampled from the same query.
184
+ The training objective of GSPO is given by:
185
+
186
+ $$
187
+ \mathcal{L}_{\mathrm{GSPO}}(\theta)=\mathbb{E}_{x \sim \mathcal{D},\left\{y_i\right\}_{i=1}^G \sim \pi_{\theta \text { old }}(\cdot \mid x)}\left[\frac{1}{G} \sum_{i=1}^G \min \left(s_i(\theta) \widehat{A}_i, \operatorname{clip}\left(s_i(\theta), 1-\varepsilon, 1+\varepsilon\right) \widehat{A}_i\right)\right],
188
+ $$
189
+
190
+ where the importance sampling ratio is defined as the geometric mean of the per-token ratios.
191
+
192
+ > Please see [our paper](https://huggingface.co/papers/2508.18265) for more technical and experimental details.
193
+
194
+
195
+ ### Visual Consistency Learning
196
+
197
+
198
+ We further include ViCO as an additional training stage to integrate the *visual resolution router (ViR)* into InternVL3.5, thereby reducing the inference cost of InternVL3.5. The obtained efficient version of InternVL3.5 are termed as *InternVL3.5-Flash*. In particular, ViCO comprises two stages:
199
+
200
+ `Consistency training`:
201
+ In this stage, the entire model is trained to minimize the divergence between response distributions conditioned on visual tokens with different compression rates.
202
+ In practice, we introduce an extra reference model, which is frozen and initialized with InternVL3.5.
203
+ Given a sample, each image patch is represented as either 256 or 64 tokens, and the training objective is defined as follows:
204
+
205
+
206
+ $$
207
+ \mathcal{L}_\text{ViCO} =
208
+ \mathbb{E}_{\xi \sim \mathcal{R}} \Bigg[
209
+ \frac{1}{N} \sum_{i=1}^{N} \mathrm{KL} \Big(
210
+ \pi_{\theta_{ref}}\left(y_i \mid y_{<i}, I\right) \;\Big\|\;
211
+ \pi_{\theta_{policy}}\left(y_i \mid y_{<i}, I_\xi\right)
212
+ \Big)
213
+ \Bigg],
214
+ $$
215
+
216
+ where \\(\mathrm{KL}\) denotes the KL divergence and \(\xi\) denotes the compression rate, which is uniformly sampled from \(\{\frac{1}{4},\frac{1}{16}\}\). The image \(I_\xi\) is represented as 256 tokens when \(\xi=\frac{1}{4}\) and 64 tokens when \(\xi=\frac{1}{16}\). Notably, the reference model always performs inference with \(\xi=\frac{1}{4}\).
217
+
218
+
219
+ `Router training`:
220
+ This stage aims to train the ViR to select an appropriate trade-off resolution for different inputs.
221
+ ViR is formulated as a binary classifier and trained using standard cross-entropy loss.
222
+ To construct the route targets, we first compute the KL divergence between the model outputs conditioned on uncompressed visual tokens (i.e., 256 tokens per patch) and those conditioned on compressed visual tokens (i.e., 64 tokens per patch).
223
+ During this stage, the main MLLM (ViT, MLP and LLM) is kept frozen, and only the ViR is trained.
224
+ Specifically, we first compute the loss ratio for each patch:
225
+
226
+ $$
227
+ r_i = \frac{\mathcal{L}_\text{ViCO}\big(y_i \mid I_{\frac{1}{16}}\big)}{\mathcal{L}_\text{ViCO}\big(y_i \mid I_{\frac{1}{4}}\big)},
228
+ $$
229
+
230
+ which quantifies the relative increase in loss caused by compressing the visual tokens. Based on this ratio, the binary ground-truth label for the patch router is defined as:
231
+
232
+ $$
233
+ y_i^\text{router} =
234
+ \begin{cases}
235
+ 0, & r_i < \tau \; \text{(compression has negligible impact)} \\
236
+ 1, & r_i \ge \tau \; \text{(compression has significant impact)},
237
+ \end{cases}
238
+ $$
239
+
240
+ where \(y_i^{\text{router}}=0\) and \(y_i^{\text{router}}=1\) indicate that the compression rate \(\xi\) is set to \(\tfrac{1}{16}\) and \(\tfrac{1}{4}\), respectively.
241
+
242
+ > Please see [our paper](https://huggingface.co/papers/2508.18265) for more technical and experimental details.
243
+
244
+
245
+ ### Test-Time Scaling
246
+
247
+
248
+ Test-time scaling (TTS) has been empirically demonstrated as an effective approach to enhance the reasoning capabilities of LLMs and MLLMs, particularly for complex tasks necessitating multi-step inference.
249
+ In this work, we implement a comprehensive test-time scaling approach that simultaneously improves reasoning depth (i.e., deep thinking) and breadth (i.e., parallel thinking).
250
+
251
+ `Deep Thinking`: By activating the Thinking mode, we guide the model to deliberately engage in step-by-step reasoning (i.e., decomposing complex problems into logical steps and validating intermediate conclusions) prior to generating the final answer. This approach systematically improves the logical structure of solutions for complex problems, particularly those requiring multi-step inference, and enhances reasoning depth.
252
+
253
+ `Parallel Thinking`: Following InternVL3, for reasoning tasks, we adopt the Best-of-N (BoN) strategy by employing [VisualPRM-v1.1](https://huggingface.co/OpenGVLab/VisualPRM-8B-v1_1) as the critic model to select the optimal response from multiple reasoning candidates.
254
+ This approach improves reasoning breadth.
255
+
256
+ > Notably, unless otherwise specified, the experimental results reported in our paper are obtained without applying TTS. Thus far, we have only applied TTS to reasoning benchmarks, since we found that the model already exhibits strong perception and understanding capabilities, and initiating TTS yields no significant improvement.
257
+
258
+
259
+ ### Decoupled Vision-Language Deployment
260
+
261
+ In multimodal inference, the vision encoder and language model have distinct computational characteristics. The vision encoder that transforms images into semantic features is highly parallelizable and does not rely on long-term history state. In contrast, the language model adopts the inference in an autoregressive manner, which requires previous states to compute the next one. This sequential property makes the language part more sensitive to memory bandwidth and latency.
262
+ When MLLMs are deployed online at scale, the vision and language models often block each other, thus incurring additional inference cost. This effect becomes more pronounced with larger vision models or higher-resolution images.
263
+
264
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/DvD.jpg)
265
+
266
+ As shown in the Figure above, we propose decoupled vision-language deployment (DvD) to address this issue by separating vision and language processing, with a particular focus on optimizing the prefilling stage. The vision subsystem batches and processes images to produce compact feature embeddings, which are then transmitted to the language subsystem for fusion with the text context prior to decoding. This separation alleviates blocking and brings multimodal prefilling performance closer to that of pure language models.
267
+ In our system implementation, the ViT and MLP (and ViR for InternVL3.5-Flash) are deployed on the vision server, while the language server executes only the LLM. The communication is unidirectional, transmitting BF16 visual features over TCP, with RDMA optionally employed to achieve higher transmission speed. Vision processing, feature transmission, and language processing are organized into an asynchronous three-stage pipeline, enabling overlapped execution and minimizing pipeline stalls.
268
+
269
+
270
+ DvD increases GPU utilization and processing efficiency on the vision side, while enabling the language server to focus exclusively on the LLM’s prefilling and decoding without being blocked by vision computation. This design leads to improved throughput and responsiveness. Moreover, the architecture supports independent hardware cost optimization for the vision and language modules, and facilitates the seamless integration of new modules without requiring modifications to the language server deployment.
271
+
272
+
273
+ ## Evaluation on Multimodal Capability
274
+
275
+ ### Multimodal Reasoning and Mathematics
276
+
277
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_reasoning.jpg)
278
+
279
+ ### OCR, Chart, and Document Understanding
280
+
281
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_ocr.jpg)
282
+
283
+ ### Multi-Image Understanding & Real-World Comprehension
284
+
285
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_multi_images.jpg)
286
+
287
+ ### Comprehensive Multimodal Understanding & Multimodal Hallucination Evaluation
288
+
289
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_comprehensive.jpg)
290
+
291
+ ### Visual Grounding
292
+
293
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_grounding.jpg)
294
+
295
+ ### Multimodal Multilingual Understanding
296
+
297
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_multilingual.jpg)
298
+
299
+ ### Video Understanding
300
+
301
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_video.jpg)
302
+
303
+ ### GUI Tasks
304
+
305
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_gui.jpg)
306
+
307
+ ### Embodied Tasks
308
+
309
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_embody.jpg)
310
+
311
+ ### SVG Tasks
312
+
313
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_svg.jpg)
314
+
315
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_svg_gen.jpg)
316
+
317
+ ## Evaluation on Language Capability
318
+
319
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/performance_text.jpg)
320
+
321
+ ## Ablation Study
322
+
323
+ ### Cascade Reinforcement Learning
324
+
325
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/ablation_cascade_rl.jpg)
326
+
327
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/ablation_cascade_rl_table.jpg)
328
+
329
+ ### Decoupled Vision-Language Deployment
330
+
331
+
332
+ ![image/jpg](https://huggingface.co/OpenGVLab/InternVL3_5-241B-A28B/resolve/main/images/ablation_dvd.jpg)
333
+
334
+ ## Quick Start
335
+
336
+ We provide an example code to run `InternVL3.5-8B` using `transformers`. Please note that our models with up to 30B parameters can be deployed on a single A100 GPU, while the 38B model requires two A100 GPUs and the 235B model requires eight A100 GPUs.
337
+
338
+ > In most cases, both [LMDeploy](https://github.com/InternLM/lmdeploy) and [vLLM](https://github.com/vllm-project/vllm) can be used for model deployment. However, for InternVL3.5-20B-A4B, we recommend using vLLM since lmdeploy has not yet supported GPT-OSS.
339
+
340
+ > Please use transformers>=4.52.1 to ensure the model works normally. For the 20B version of our model, transformers>=4.55.0 is required.
341
+
342
+ ### Model Loading
343
+
344
+ #### 16-bit (bf16 / fp16)
345
+
346
+ ```python
347
+ import torch
348
+ from transformers import AutoTokenizer, AutoModel
349
+ path = "OpenGVLab/InternVL3_5-8B"
350
+ model = AutoModel.from_pretrained(
351
+ path,
352
+ torch_dtype=torch.bfloat16,
353
+ low_cpu_mem_usage=True,
354
+ use_flash_attn=True,
355
+ trust_remote_code=True).eval().cuda()
356
+ ```
357
+
358
+ #### BNB 8-bit Quantization
359
+
360
+ ```python
361
+ import torch
362
+ from transformers import AutoTokenizer, AutoModel
363
+ path = "OpenGVLab/InternVL3_5-8B"
364
+ model = AutoModel.from_pretrained(
365
+ path,
366
+ torch_dtype=torch.bfloat16,
367
+ load_in_8bit=True,
368
+ low_cpu_mem_usage=True,
369
+ use_flash_attn=True,
370
+ trust_remote_code=True).eval()
371
+ ```
372
+
373
+ #### Multiple GPUs
374
+
375
+ ```python
376
+ import math
377
+ import torch
378
+ from transformers import AutoTokenizer, AutoModel
379
+
380
+ path = "OpenGVLab/InternVL3_5-8B"
381
+ model = AutoModel.from_pretrained(
382
+ path,
383
+ torch_dtype=torch.bfloat16,
384
+ low_cpu_mem_usage=True,
385
+ use_flash_attn=True,
386
+ trust_remote_code=True,
387
+ device_map="auto").eval()
388
+ ```
389
+
390
+ ### Thinking Mode
391
+
392
+ To enable thinking mode, please set the system prompt to our Thinking System Prompt. When enabling Thinking mode, we recommend setting `do_sample=True` and `temperature=0.6` to mitigate undesired repetition.
393
+
394
+ ```python
395
+ R1_SYSTEM_PROMPT = """
396
+ You are an AI assistant that rigorously follows this response protocol:
397
+
398
+ 1. First, conduct a detailed analysis of the question. Consider different angles, potential solutions, and reason through the problem step-by-step. Enclose this entire thinking process within <think> and </think> tags.
399
+
400
+ 2. After the thinking section, provide a clear, concise, and direct answer to the user's question. Separate the answer from the think section with a newline.
401
+
402
+ Ensure that the thinking process is thorough but remains focused on the query. The final answer should be standalone and not reference the thinking section.
403
+ """.strip()
404
+
405
+ model.system_message = R1_SYSTEMP_PROMPT
406
+ ```
407
+
408
+ ### Inference with Transformers
409
+
410
+ ```python
411
+ import math
412
+ import numpy as np
413
+ import torch
414
+ import torchvision.transforms as T
415
+ from decord import VideoReader, cpu
416
+ from PIL import Image
417
+ from torchvision.transforms.functional import InterpolationMode
418
+ from transformers import AutoModel, AutoTokenizer
419
+
420
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
421
+ IMAGENET_STD = (0.229, 0.224, 0.225)
422
+
423
+ def build_transform(input_size):
424
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
425
+ transform = T.Compose([
426
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
427
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
428
+ T.ToTensor(),
429
+ T.Normalize(mean=MEAN, std=STD)
430
+ ])
431
+ return transform
432
+
433
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
434
+ best_ratio_diff = float('inf')
435
+ best_ratio = (1, 1)
436
+ area = width * height
437
+ for ratio in target_ratios:
438
+ target_aspect_ratio = ratio[0] / ratio[1]
439
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
440
+ if ratio_diff < best_ratio_diff:
441
+ best_ratio_diff = ratio_diff
442
+ best_ratio = ratio
443
+ elif ratio_diff == best_ratio_diff:
444
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
445
+ best_ratio = ratio
446
+ return best_ratio
447
+
448
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
449
+ orig_width, orig_height = image.size
450
+ aspect_ratio = orig_width / orig_height
451
+
452
+ # calculate the existing image aspect ratio
453
+ target_ratios = set(
454
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
455
+ i * j <= max_num and i * j >= min_num)
456
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
457
+
458
+ # find the closest aspect ratio to the target
459
+ target_aspect_ratio = find_closest_aspect_ratio(
460
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
461
+
462
+ # calculate the target width and height
463
+ target_width = image_size * target_aspect_ratio[0]
464
+ target_height = image_size * target_aspect_ratio[1]
465
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
466
+
467
+ # resize the image
468
+ resized_img = image.resize((target_width, target_height))
469
+ processed_images = []
470
+ for i in range(blocks):
471
+ box = (
472
+ (i % (target_width // image_size)) * image_size,
473
+ (i // (target_width // image_size)) * image_size,
474
+ ((i % (target_width // image_size)) + 1) * image_size,
475
+ ((i // (target_width // image_size)) + 1) * image_size
476
+ )
477
+ # split the image
478
+ split_img = resized_img.crop(box)
479
+ processed_images.append(split_img)
480
+ assert len(processed_images) == blocks
481
+ if use_thumbnail and len(processed_images) != 1:
482
+ thumbnail_img = image.resize((image_size, image_size))
483
+ processed_images.append(thumbnail_img)
484
+ return processed_images
485
+
486
+ def load_image(image_file, input_size=448, max_num=12):
487
+ image = Image.open(image_file).convert('RGB')
488
+ transform = build_transform(input_size=input_size)
489
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
490
+ pixel_values = [transform(image) for image in images]
491
+ pixel_values = torch.stack(pixel_values)
492
+ return pixel_values
493
+
494
+ path = 'OpenGVLab/InternVL3_5-8B'
495
+ model = AutoModel.from_pretrained(
496
+ path,
497
+ torch_dtype=torch.bfloat16,
498
+ load_in_8bit=False,
499
+ low_cpu_mem_usage=True,
500
+ use_flash_attn=True,
501
+ trust_remote_code=True,
502
+ device_map="auto").eval()
503
+ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
504
+
505
+ # set the max number of tiles in `max_num`
506
+ pixel_values = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
507
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
508
+
509
+ # pure-text conversation (纯文本对话)
510
+ question = 'Hello, who are you?'
511
+ response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
512
+ print(f'User: {question}\nAssistant: {response}')
513
+
514
+ question = 'Can you tell me a story?'
515
+ response, history = model.chat(tokenizer, None, question, generation_config, history=history, return_history=True)
516
+ print(f'User: {question}\nAssistant: {response}')
517
+
518
+ # single-image single-round conversation (单图单轮对话)
519
+ question = '<image>\nPlease describe the image shortly.'
520
+ response = model.chat(tokenizer, pixel_values, question, generation_config)
521
+ print(f'User: {question}\nAssistant: {response}')
522
+
523
+ # single-image multi-round conversation (单图多轮对话)
524
+ question = '<image>\nPlease describe the image in detail.'
525
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
526
+ print(f'User: {question}\nAssistant: {response}')
527
+
528
+ question = 'Please write a poem according to the image.'
529
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
530
+ print(f'User: {question}\nAssistant: {response}')
531
+
532
+ # multi-image multi-round conversation, combined images (多图多轮对话,拼接图像)
533
+ pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
534
+ pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
535
+ pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
536
+
537
+ question = '<image>\nDescribe the two images in detail.'
538
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config,
539
+ history=None, return_history=True)
540
+ print(f'User: {question}\nAssistant: {response}')
541
+
542
+ question = 'What are the similarities and differences between these two images.'
543
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config,
544
+ history=history, return_history=True)
545
+ print(f'User: {question}\nAssistant: {response}')
546
+
547
+ # multi-image multi-round conversation, separate images (多图多轮对话,独立图像)
548
+ pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
549
+ pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
550
+ pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
551
+ num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
552
+
553
+ question = 'Image-1: <image>\nImage-2: <image>\nDescribe the two images in detail.'
554
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config,
555
+ num_patches_list=num_patches_list,
556
+ history=None, return_history=True)
557
+ print(f'User: {question}\nAssistant: {response}')
558
+
559
+ question = 'What are the similarities and differences between these two images.'
560
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config,
561
+ num_patches_list=num_patches_list,
562
+ history=history, return_history=True)
563
+ print(f'User: {question}\nAssistant: {response}')
564
+
565
+ # batch inference, single image per sample (单图批处理)
566
+ pixel_values1 = load_image('./examples/image1.jpg', max_num=12).to(torch.bfloat16).cuda()
567
+ pixel_values2 = load_image('./examples/image2.jpg', max_num=12).to(torch.bfloat16).cuda()
568
+ num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)]
569
+ pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
570
+
571
+ questions = ['<image>\nDescribe the image in detail.'] * len(num_patches_list)
572
+ responses = model.batch_chat(tokenizer, pixel_values,
573
+ num_patches_list=num_patches_list,
574
+ questions=questions,
575
+ generation_config=generation_config)
576
+ for question, response in zip(questions, responses):
577
+ print(f'User: {question}\nAssistant: {response}')
578
+
579
+ # video multi-round conversation (视频多轮对话)
580
+ def get_index(bound, fps, max_frame, first_idx=0, num_segments=32):
581
+ if bound:
582
+ start, end = bound[0], bound[1]
583
+ else:
584
+ start, end = -100000, 100000
585
+ start_idx = max(first_idx, round(start * fps))
586
+ end_idx = min(round(end * fps), max_frame)
587
+ seg_size = float(end_idx - start_idx) / num_segments
588
+ frame_indices = np.array([
589
+ int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
590
+ for idx in range(num_segments)
591
+ ])
592
+ return frame_indices
593
+
594
+ def load_video(video_path, bound=None, input_size=448, max_num=1, num_segments=32):
595
+ vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
596
+ max_frame = len(vr) - 1
597
+ fps = float(vr.get_avg_fps())
598
+
599
+ pixel_values_list, num_patches_list = [], []
600
+ transform = build_transform(input_size=input_size)
601
+ frame_indices = get_index(bound, fps, max_frame, first_idx=0, num_segments=num_segments)
602
+ for frame_index in frame_indices:
603
+ img = Image.fromarray(vr[frame_index].asnumpy()).convert('RGB')
604
+ img = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
605
+ pixel_values = [transform(tile) for tile in img]
606
+ pixel_values = torch.stack(pixel_values)
607
+ num_patches_list.append(pixel_values.shape[0])
608
+ pixel_values_list.append(pixel_values)
609
+ pixel_values = torch.cat(pixel_values_list)
610
+ return pixel_values, num_patches_list
611
+
612
+ video_path = './examples/red-panda.mp4'
613
+ pixel_values, num_patches_list = load_video(video_path, num_segments=8, max_num=1)
614
+ pixel_values = pixel_values.to(torch.bfloat16).cuda()
615
+ video_prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))])
616
+ question = video_prefix + 'What is the red panda doing?'
617
+ # Frame1: <image>\nFrame2: <image>\n...\nFrame8: <image>\n{question}
618
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config,
619
+ num_patches_list=num_patches_list, history=None, return_history=True)
620
+ print(f'User: {question}\nAssistant: {response}')
621
+
622
+ question = 'Describe this video in detail.'
623
+ response, history = model.chat(tokenizer, pixel_values, question, generation_config,
624
+ num_patches_list=num_patches_list, history=history, return_history=True)
625
+ print(f'User: {question}\nAssistant: {response}')
626
+ ```
627
+
628
+ #### Streaming Output
629
+
630
+ Besides this method, you can also use the following code to get streamed output.
631
+
632
+ ```python
633
+ from transformers import TextIteratorStreamer
634
+ from threading import Thread
635
+
636
+ # Initialize the streamer
637
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=10)
638
+ # Define the generation configuration
639
+ generation_config = dict(max_new_tokens=1024, do_sample=False, streamer=streamer)
640
+ # Start the model chat in a separate thread
641
+ thread = Thread(target=model.chat, kwargs=dict(
642
+ tokenizer=tokenizer, pixel_values=pixel_values, question=question,
643
+ history=None, return_history=False, generation_config=generation_config,
644
+ ))
645
+ thread.start()
646
+
647
+ # Initialize an empty string to store the generated text
648
+ generated_text = ''
649
+ # Loop through the streamer to get the new text as it is generated
650
+ for new_text in streamer:
651
+ if new_text == model.conv_template.sep:
652
+ break
653
+ generated_text += new_text
654
+ print(new_text, end='', flush=True) # Print each new chunk of generated text on the same line
655
+ ```
656
+
657
+ ## Finetune
658
+
659
+ Many repositories now support fine-tuning of the InternVL series models, including [InternVL](https://github.com/OpenGVLab/InternVL), [SWIFT](https://github.com/modelscope/ms-swift), [XTuner](https://github.com/InternLM/xtuner), and others. Please refer to their documentation for more details on fine-tuning.
660
+
661
+ ## Deployment
662
+
663
+ ### LMDeploy
664
+
665
+ LMDeploy is a toolkit for compressing, deploying, and serving LLMs & VLMs.
666
+
667
+ ```sh
668
+ pip install lmdeploy>=0.9.1
669
+ ```
670
+
671
+ LMDeploy abstracts the complex inference process of multi-modal Vision-Language Models (VLM) into an easy-to-use pipeline, similar to the Large Language Model (LLM) inference pipeline.
672
+
673
+ #### A 'Hello, world' Example
674
+
675
+ ```python
676
+ from lmdeploy import pipeline, PytorchEngineConfig
677
+ from lmdeploy.vl import load_image
678
+
679
+ image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
680
+
681
+ # Please set tp=2 for the 38B version and tp=8 for the 241B-A28B version.
682
+ model = 'OpenGVLab/InternVL3_5-8B'
683
+ pipe = pipeline(model, backend_config=PytorchEngineConfig(session_len=32768, tp=1))
684
+
685
+ response = pipe(('describe this image', image))
686
+ print(response.text)
687
+ ```
688
+
689
+ #### Multi-images Inference
690
+
691
+ When dealing with multiple images, you can put them all in one list. Keep in mind that multiple images will lead to a higher number of input tokens, and as a result, the size of the context window typically needs to be increased.
692
+
693
+ ```python
694
+ from lmdeploy import pipeline, PytorchEngineConfig
695
+ from lmdeploy.vl import load_image
696
+ from lmdeploy.vl.constants import IMAGE_TOKEN
697
+
698
+ # Please set tp=2 for the 38B version and tp=8 for the 241B-A28B version.
699
+ model = 'OpenGVLab/InternVL3_5-8B'
700
+ pipe = pipeline(model, backend_config=PytorchEngineConfig(session_len=32768, tp=1))
701
+
702
+ image_urls=[
703
+ 'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg',
704
+ 'https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/det.jpg'
705
+ ]
706
+
707
+ images = [load_image(img_url) for img_url in image_urls]
708
+ # Numbering images improves multi-image conversations
709
+ response = pipe((f'Image-1: {IMAGE_TOKEN}\nImage-2: {IMAGE_TOKEN}\ndescribe these two images', images))
710
+ print(response.text)
711
+ ```
712
+
713
+ #### Batch Prompts Inference
714
+
715
+ Conducting inference with batch prompts is quite straightforward; just place them within a list structure:
716
+
717
+ ```python
718
+ from lmdeploy import pipeline, PytorchEngineConfig
719
+ from lmdeploy.vl import load_image
720
+
721
+ # Please set tp=2 for the 38B version and tp=8 for the 241B-A28B version.
722
+ model = 'OpenGVLab/InternVL3_5-8B'
723
+ pipe = pipeline(model, backend_config=PytorchEngineConfig(session_len=32768, tp=1))
724
+
725
+ image_urls=[
726
+ "https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg",
727
+ "https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/det.jpg"
728
+ ]
729
+ prompts = [('describe this image', load_image(img_url)) for img_url in image_urls]
730
+ response = pipe(prompts)
731
+ print(response)
732
+ ```
733
+
734
+ #### Multi-turn Conversation
735
+
736
+ There are two ways to do the multi-turn conversations with the pipeline. One is to construct messages according to the format of OpenAI and use above introduced method, the other is to use the `pipeline.chat` interface.
737
+
738
+ ```python
739
+ from lmdeploy import pipeline, PytorchEngineConfig, GenerationConfig
740
+ from lmdeploy.vl import load_image
741
+
742
+ # Please set tp=2 for the 38B version and tp=8 for the 241B-A28B version.
743
+ model = 'OpenGVLab/InternVL3_5-8B'
744
+ pipe = pipeline(model, backend_config=PytorchEngineConfig(session_len=32768, tp=1))
745
+
746
+ image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/demo/resources/human-pose.jpg')
747
+ gen_config = GenerationConfig(top_k=50, top_p=0.95, temperature=0.6, max_new_tokens=8192)
748
+ sess = pipe.chat(('describe this image', image), gen_config=gen_config)
749
+ print(sess.response.text)
750
+ sess = pipe.chat('What is the woman doing?', session=sess, gen_config=gen_config)
751
+ print(sess.response.text)
752
+ ```
753
+
754
+ #### Service
755
+
756
+ LMDeploy's `api_server` enables models to be easily packed into services with a single command. The provided RESTful APIs are compatible with OpenAI's interfaces. Below are an example of service startup:
757
+
758
+ ```shell
759
+ lmdeploy serve api_server OpenGVLab/InternVL3_5-8B --server-port 23333 --tp 1 --backend pytorch
760
+ ```
761
+
762
+ To use the OpenAI-style interface, you need to install OpenAI:
763
+
764
+ ```shell
765
+ pip install openai
766
+ ```
767
+
768
+ Then, use the code below to make the API call:
769
+
770
+ ```python
771
+ from openai import OpenAI
772
+
773
+ client = OpenAI(api_key='YOUR_API_KEY', base_url='http://0.0.0.0:23333/v1')
774
+ model_name = client.models.list().data[0].id
775
+ response = client.chat.completions.create(
776
+ model=model_name,
777
+ messages=[{
778
+ 'role':
779
+ 'user',
780
+ 'content': [{
781
+ 'type': 'text',
782
+ 'text': 'describe this image',
783
+ }, {
784
+ 'type': 'image_url',
785
+ 'image_url': {
786
+ 'url':
787
+ 'https://modelscope.oss-cn-beijing.aliyuncs.com/resource/tiger.jpeg',
788
+ },
789
+ }],
790
+ }],
791
+ temperature=0.8,
792
+ top_p=0.8)
793
+ print(response)
794
+ ```
795
+
796
+ ## License
797
+
798
+ This project is released under the apache-2.0 License. This project uses the pre-trained Qwen3 as a component, which is licensed under the apache-2.0 License.
799
+
800
+ ## Citation
801
+
802
+ If you find this project useful in your research, please consider citing:
803
+
804
+ ```BibTeX
805
+ @article{chen2024expanding,
806
+ title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling},
807
+ author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others},
808
+ journal={arXiv preprint arXiv:2412.05271},
809
+ year={2024}
810
+ }
811
+ @article{wang2024mpo,
812
+ title={Enhancing the Reasoning Ability of Multimodal Large Language Models via Mixed Preference Optimization},
813
+ author={Wang, Weiyun and Chen, Zhe and Wang, Wenhai and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Zhu, Jinguo and Zhu, Xizhou and Lu, Lewei and Qiao, Yu and Dai, Jifeng},
814
+ journal={arXiv preprint arXiv:2411.10442},
815
+ year={2024}
816
+ }
817
+ @article{chen2024far,
818
+ title={How Far Are We to GPT-4V? Closing the Gap to Commercial Multimodal Models with Open-Source Suites},
819
+ author={Chen, Zhe and Wang, Weiyun and Tian, Hao and Ye, Shenglong and Gao, Zhangwei and Cui, Erfei and Tong, Wenwen and Hu, Kongzhi and Luo, Jiapeng and Ma, Zheng and others},
820
+ journal={arXiv preprint arXiv:2404.16821},
821
+ year={2024}
822
+ }
823
+ @inproceedings{chen2024internvl,
824
+ title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks},
825
+ author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others},
826
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
827
+ pages={24185--24198},
828
+ year={2024}
829
+ }
830
+ ```