yongqiang commited on
Commit
56ed422
·
0 Parent(s):

Intial this repo

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +40 -0
  2. README.md +150 -0
  3. config.json +0 -0
  4. examples/image_0.jpg +3 -0
  5. examples/image_1.jpg +3 -0
  6. examples/image_2.png +3 -0
  7. examples/image_3.png +3 -0
  8. examples/red-panda.mp4 +3 -0
  9. infer.py +370 -0
  10. internvl3_2b_axmodel/model.embed_tokens.weight.npy +3 -0
  11. internvl3_2b_axmodel/qwen2_p128_l0_together.axmodel +3 -0
  12. internvl3_2b_axmodel/qwen2_p128_l10_together.axmodel +3 -0
  13. internvl3_2b_axmodel/qwen2_p128_l11_together.axmodel +3 -0
  14. internvl3_2b_axmodel/qwen2_p128_l12_together.axmodel +3 -0
  15. internvl3_2b_axmodel/qwen2_p128_l13_together.axmodel +3 -0
  16. internvl3_2b_axmodel/qwen2_p128_l14_together.axmodel +3 -0
  17. internvl3_2b_axmodel/qwen2_p128_l15_together.axmodel +3 -0
  18. internvl3_2b_axmodel/qwen2_p128_l16_together.axmodel +3 -0
  19. internvl3_2b_axmodel/qwen2_p128_l17_together.axmodel +3 -0
  20. internvl3_2b_axmodel/qwen2_p128_l18_together.axmodel +3 -0
  21. internvl3_2b_axmodel/qwen2_p128_l19_together.axmodel +3 -0
  22. internvl3_2b_axmodel/qwen2_p128_l1_together.axmodel +3 -0
  23. internvl3_2b_axmodel/qwen2_p128_l20_together.axmodel +3 -0
  24. internvl3_2b_axmodel/qwen2_p128_l21_together.axmodel +3 -0
  25. internvl3_2b_axmodel/qwen2_p128_l22_together.axmodel +3 -0
  26. internvl3_2b_axmodel/qwen2_p128_l23_together.axmodel +3 -0
  27. internvl3_2b_axmodel/qwen2_p128_l24_together.axmodel +3 -0
  28. internvl3_2b_axmodel/qwen2_p128_l25_together.axmodel +3 -0
  29. internvl3_2b_axmodel/qwen2_p128_l26_together.axmodel +3 -0
  30. internvl3_2b_axmodel/qwen2_p128_l27_together.axmodel +3 -0
  31. internvl3_2b_axmodel/qwen2_p128_l2_together.axmodel +3 -0
  32. internvl3_2b_axmodel/qwen2_p128_l3_together.axmodel +3 -0
  33. internvl3_2b_axmodel/qwen2_p128_l4_together.axmodel +3 -0
  34. internvl3_2b_axmodel/qwen2_p128_l5_together.axmodel +3 -0
  35. internvl3_2b_axmodel/qwen2_p128_l6_together.axmodel +3 -0
  36. internvl3_2b_axmodel/qwen2_p128_l7_together.axmodel +3 -0
  37. internvl3_2b_axmodel/qwen2_p128_l8_together.axmodel +3 -0
  38. internvl3_2b_axmodel/qwen2_p128_l9_together.axmodel +3 -0
  39. internvl3_2b_axmodel/qwen2_post.axmodel +3 -0
  40. internvl3_2b_tokenizer/added_tokens.json +33 -0
  41. internvl3_2b_tokenizer/config.json +224 -0
  42. internvl3_2b_tokenizer/configuration_intern_vit.py +120 -0
  43. internvl3_2b_tokenizer/configuration_internvl_chat.py +97 -0
  44. internvl3_2b_tokenizer/conversation.py +391 -0
  45. internvl3_2b_tokenizer/generation_config.json +4 -0
  46. internvl3_2b_tokenizer/merges.txt +0 -0
  47. internvl3_2b_tokenizer/modeling_intern_vit.py +431 -0
  48. internvl3_2b_tokenizer/modeling_internvl_chat.py +359 -0
  49. internvl3_2b_tokenizer/preprocessor_config.json +19 -0
  50. internvl3_2b_tokenizer/special_tokens_map.json +31 -0
.gitattributes ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.axmodel filter=lfs diff=lfs merge=lfs -text
37
+ *.jpg filter=lfs diff=lfs merge=lfs -text
38
+ *.png filter=lfs diff=lfs merge=lfs -text
39
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
40
+ examples/red-panda.mp4 filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ language:
4
+ - en
5
+ - zh
6
+ base_model:
7
+ - OpenGVLab/InternVL3-2B
8
+ pipeline_tag: visual-question-answering
9
+ tags:
10
+ - OpenGVLab
11
+ - InternVL3-2B
12
+ ---
13
+
14
+ # InternVL3-2B-Int8
15
+
16
+ This version of InternVL3-2B has been converted to run on the Axera NPU using **w8a16** quantization.
17
+
18
+ This model has been optimized with the following LoRA:
19
+
20
+ Compatible with Pulsar2 version: 3.4
21
+
22
+ ## Convert tools links:
23
+
24
+ For those who are interested in model conversion, you can try to export axmodel through the original repo:
25
+ https://huggingface.co/deepseek-ai/InternVL3-2B
26
+
27
+ - [Github for InternVL3-2B.axera](https://github.com/AXERA-TECH/InternVL3-2B.axera)
28
+ - [Pulsar2 Link, How to Convert LLM from Huggingface to axmodel](https://pulsar2-docs.readthedocs.io/en/latest/appendix/build_llm.html)
29
+
30
+ ## Support Platform
31
+ - AX650
32
+ - [M4N-Dock(爱芯派Pro)](https://wiki.sipeed.com/hardware/zh/maixIV/m4ndock/m4ndock.html)
33
+
34
+ |chips|Image num|image encoder 448 | ttft | w8a16 |
35
+ |--|--|--|--|--|
36
+ |AX650| 1 | 364 ms | 503 ms (320 tokens) | 11.50 tokens/sec|
37
+ |AX650| 4 | 1456 ms | 4589 ms (1152 tokens) | 11.50 tokens/sec|
38
+
39
+ ## How to use
40
+
41
+ Download all files from this repository to the device.
42
+
43
+ **Using AX650 Board**
44
+
45
+ ```bash
46
+ root@ax650 ~/yongqiang/push_hugging_face/InternVL3-2B # tree -L 1
47
+ .
48
+ ├── config.json
49
+ ├── examples
50
+ ├── infer.py
51
+ ├── internvl3_2b_axmodel
52
+ ├── internvl3_2b_tokenizer
53
+ ├── README.md
54
+ └── vit_axmodel
55
+
56
+ 4 directories, 3 files
57
+ ```
58
+
59
+ #### Inference with AX650 Host, such as M4N-Dock(爱芯派Pro) or AX650N DEMO Board
60
+
61
+ **Text Generation**
62
+
63
+ input text:
64
+
65
+ ```
66
+ Please calculate the derivative of the function [y=2x^ 2-2] and provide the reasoning process in markdown format.
67
+ ```
68
+
69
+ log information:
70
+
71
+ ```bash
72
+ root@ax650 ~/yongqiang/push_hugging_face/InternVL3-2B # python3 infer.py --hf_model internvl3_2b_tokenizer/ --axmodel_path internvl3_2b_axmodel/ --question "Please calculate the derivative of the function [y=2x^ 2-2] and provide the reasoning process in markdown format"
73
+ Init InferenceSession: 100%|██████████████████████████████████████████████████████████| 28/28 [00:16<00:00, 1.74it/s]
74
+ model load done!
75
+ prefill token_len: 85
76
+ slice_indexs is [0]
77
+ slice prefill done 0
78
+ Decode: 9%|██████▎ | 232/2559 [00:19<05:14, 7.39it/s]
79
+ Decode: 17%|████████████ | 440/2559 [00:48<04:51, 7.26it/s]hit eos!
80
+ Decode: 17%|████████████ | 440/2559 [00:48<03:53, 9.06it/s]
81
+ Certainly! Let's calculate the derivative of the function \( y = 2x^2 - 2 \ \ using the rules of differentiation.
82
+
83
+ ### Step-by-Step Reasoning:
84
+
85
+ 1. **Identify the Function:**
86
+ The given function is \( y = 2x^2 - 2 \\).
87
+
88
+ 2. **Differentiate Term by Term:**
89
+ We will differentiate each term of the function separately.
90
+
91
+ - **First Term: \( 2x^2 \ \**
92
+ - The derivative of \( x^n \ \ (where n is a constant) is \( nx^{n-1} \ \).
93
+ - Here, \( n = 2 \ \.
94
+ - Therefore, the derivative of \( 2x^2 \ \ is \( 2 \ \ times \( 2x^{2-1} \ \ which simplifies to \( 4x \ \.
95
+
96
+ - **Second Term: \( -2 \ \**
97
+ - The derivative of a constant (a term without \( x \\ is 0 \.
98
+ - Therefore, the derivative of \( -2 \ \ is \( 0 \.
99
+
100
+ 3. **Combine the Derivatives:**
101
+ - The derivative of the entire function is the sum of the derivatives of each term.
102
+ - So, the derivative of \( y = 2x^2 - 2 \\ is \( 4x + 0 \\ which simplifies to \( 4x \.
103
+
104
+ ### Final Answer:
105
+ The derivative of the function \( y = 2x^2 - 2 \ is \( 4x \.
106
+
107
+ ### Summary:
108
+ The derivative of \( y = 2x^2 - 2 \ is \( 4x \.
109
+ ```
110
+
111
+ **Multimodal Understanding**
112
+
113
+ - input image
114
+
115
+ ![](examples/image_1.jpg)
116
+
117
+ input text:
118
+
119
+ ```
120
+ "Please describe this picture in detail."
121
+ ```
122
+
123
+ log information:
124
+
125
+ ```bash
126
+ root@ax650 ~/yongqiang/push_hugging_face/InternVL3-2B # python3 infer.py --hf_model internvl3_2b_tokenizer/ --axmodel_path internvl3_2b_axmodel/ --question "Please describe this picture in detail" -i examples/image_1.jpg --vit_model vit_axmodel/internvl3_2b_vit_slim.axmodel
127
+ [INFO] Available providers: ['AxEngineExecutionProvider']
128
+ Init InferenceSession: 0%| | 0/24 [00:00<?, ?it/s][INFO] Chip type: ChipType.MC50
129
+ [INFO] VNPU type: VNPUType.DISABLED
130
+ [INFO] Engine version: 2.11.0a
131
+ Init InferenceSession: 100%|██████████████████████████████████████████████████████████| 28/28 [00:14<00:00, 1.92it/s]
132
+ model load done!
133
+ prefill token_len: 325
134
+ slice_indexs is [0, 1, 2]
135
+ slice prefill done 0
136
+ slice prefill done 1
137
+ slice prefill done 2
138
+ Decode: 13%|████████▋ | 326/2559 [00:00<00:01, 1829.15it/s]
139
+ Decode: 19%|█████████████▍ | 489/2559 [00:22<02:26, 14.17it/s]hit eos!
140
+ Decode: 20%|██████████████▏ | 517/2559 [00:26<01:43, 19.71it/s]
141
+ **Image Description:**
142
+
143
+ The image depicts a giant panda in a naturalistic enclosure, likely within a zoo or wildlife sanctuary. The panda is prominently positioned in the foreground, surrounded by lush green bamboo plants. Its distinctive black and white fur is clearly visible,
144
+
145
+ with the panda's face, ears, and limbs being black, while its body and the rest of its face are white. The panda appears to be eating bamboo, with its front paws holding a piece of bamboo close to its mouth. The panda's expression is calm and curious, with its eyes looking directly at the camera.
146
+
147
+ In the background, there is another panda partially obscured by the foliage and a wooden structure, possibly part of the enclosure's design. The ground is covered with a layer of mulch or wood chips, providing a naturalistic habitat for the pandas. The overall setting is serene and well-maintained,
148
+
149
+ designed to mimic the panda's natural habitat while ensuring the animals' well-being.
150
+ ```
config.json ADDED
File without changes
examples/image_0.jpg ADDED

Git LFS Details

  • SHA256: c587294b3bf637dacbb3c96324c127187a2f242c94f639633a0d8a2775a9a399
  • Pointer size: 130 Bytes
  • Size of remote file: 78.1 kB
examples/image_1.jpg ADDED

Git LFS Details

  • SHA256: 08487494b8dc08d44bc36491adf3ab89ff30d13a3122da86f3cd67cad89eeee8
  • Pointer size: 131 Bytes
  • Size of remote file: 126 kB
examples/image_2.png ADDED

Git LFS Details

  • SHA256: 622ae2d01ff4467fa69a7888728d776650117a0f4887e96ba0fb9a8a6d77b3c3
  • Pointer size: 131 Bytes
  • Size of remote file: 355 kB
examples/image_3.png ADDED

Git LFS Details

  • SHA256: 729e80e77d8611778859d2f232cb7f2a8fda04ed67dd8dcc3e7cd7a657367402
  • Pointer size: 131 Bytes
  • Size of remote file: 394 kB
examples/red-panda.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d921c07bb97224d65a37801541d246067f0d506f08723ffa1ad85c217907ccb8
3
+ size 1867237
infer.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import llm_utils
2
+ import dataclasses
3
+ import json
4
+ from transformers import AutoTokenizer, AutoConfig
5
+ import torch
6
+ from torchvision.transforms.functional import InterpolationMode
7
+ import numpy as np
8
+ from ml_dtypes import bfloat16
9
+ from axengine import InferenceSession
10
+ from tqdm import tqdm
11
+ import torchvision.transforms as T
12
+ from PIL import Image
13
+ import argparse
14
+
15
+
16
+ """
17
+ pulsar2 llm_build \
18
+ --input_path ./InternVL3-2B \
19
+ --output_path ./InternVL3-2B_axmodel \
20
+ --hidden_state_type bf16 \
21
+ --prefill_len 128 \
22
+ --last_kv_cache_len 128 \
23
+ --last_kv_cache_len 256 \
24
+ --last_kv_cache_len 384 \
25
+ --last_kv_cache_len 512 \
26
+ --last_kv_cache_len 640 \
27
+ --last_kv_cache_len 768 \
28
+ --last_kv_cache_len 896 \
29
+ --last_kv_cache_len 1024 \
30
+ --kv_cache_len 2559 \
31
+ --chip AX650 -c 1 --parallel 28
32
+
33
+ 最多支持 4 幅图像输入; 支持文本对话;
34
+ """
35
+
36
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
37
+ IMAGENET_STD = (0.229, 0.224, 0.225)
38
+
39
+ def build_transform(input_size):
40
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
41
+ transform = T.Compose([
42
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
43
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
44
+ T.ToTensor(),
45
+ T.Normalize(mean=MEAN, std=STD)
46
+ ])
47
+ return transform
48
+
49
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
50
+ best_ratio_diff = float('inf')
51
+ best_ratio = (1, 1)
52
+ area = width * height
53
+ for ratio in target_ratios:
54
+ target_aspect_ratio = ratio[0] / ratio[1]
55
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
56
+ if ratio_diff < best_ratio_diff:
57
+ best_ratio_diff = ratio_diff
58
+ best_ratio = ratio
59
+ elif ratio_diff == best_ratio_diff:
60
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
61
+ best_ratio = ratio
62
+ return best_ratio
63
+
64
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
65
+ orig_width, orig_height = image.size
66
+ aspect_ratio = orig_width / orig_height
67
+
68
+ # calculate the existing image aspect ratio
69
+ target_ratios = set(
70
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
71
+ i * j <= max_num and i * j >= min_num)
72
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
73
+
74
+ # find the closest aspect ratio to the target
75
+ target_aspect_ratio = find_closest_aspect_ratio(
76
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
77
+
78
+ # calculate the target width and height
79
+ target_width = image_size * target_aspect_ratio[0]
80
+ target_height = image_size * target_aspect_ratio[1]
81
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
82
+
83
+ # resize the image
84
+ resized_img = image.resize((target_width, target_height))
85
+ processed_images = []
86
+ for i in range(blocks):
87
+ box = (
88
+ (i % (target_width // image_size)) * image_size,
89
+ (i // (target_width // image_size)) * image_size,
90
+ ((i % (target_width // image_size)) + 1) * image_size,
91
+ ((i // (target_width // image_size)) + 1) * image_size
92
+ )
93
+ # split the image
94
+ split_img = resized_img.crop(box)
95
+ processed_images.append(split_img)
96
+ assert len(processed_images) == blocks
97
+ if use_thumbnail and len(processed_images) != 1:
98
+ thumbnail_img = image.resize((image_size, image_size))
99
+ processed_images.append(thumbnail_img)
100
+ return processed_images
101
+
102
+ def load_image(image_file, input_size=448, max_num=12):
103
+ image = Image.open(image_file).convert('RGB')
104
+ transform = build_transform(input_size=input_size)
105
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
106
+ pixel_values = [transform(image) for image in images]
107
+ pixel_values = torch.stack(pixel_values)
108
+ return pixel_values
109
+
110
+ def post_process(data, topk=1, topp=0.9, temperature=0.6):
111
+ def top_p(l: np.ndarray, p: float) -> np.ndarray:
112
+ index = np.argsort(l)
113
+ res = l.copy()
114
+ sum_p = 0
115
+ for i in index[::-1]:
116
+ if sum_p >= p:
117
+ res[i] = 0
118
+ sum_p += res[i]
119
+ return res / sum_p
120
+
121
+ def softmax(l: np.ndarray) -> np.ndarray:
122
+ l_max = l - l.max()
123
+ l_exp = np.exp(l_max)
124
+ res = l_exp / np.sum(l_exp)
125
+ return res.astype(np.float64)
126
+
127
+ r = data.astype(np.float32)
128
+ r = r.flatten()
129
+ # topk
130
+ candidate_index = np.argpartition(r, -topk)[-topk:]
131
+ candidate_value = r[candidate_index]
132
+ # temperature
133
+ candidate_value /= temperature
134
+ # softmax
135
+ candidate_soft = softmax(candidate_value)
136
+ # topp
137
+ candidate_soft = top_p(candidate_soft, topp)
138
+ candidate_soft = candidate_soft.astype(np.float64) / candidate_soft.sum()
139
+ pos = np.random.multinomial(1, candidate_soft).argmax()
140
+ next_token = candidate_index[pos]
141
+ return next_token, candidate_index, candidate_soft
142
+
143
+
144
+ if __name__ == "__main__":
145
+
146
+ prompt = None
147
+ parser = argparse.ArgumentParser(description="Model configuration parameters")
148
+ parser.add_argument("--hf_model", type=str, default="./InternVL3-2B",
149
+ help="Path to HuggingFace model")
150
+ parser.add_argument("--axmodel_path", type=str, default="./InternVL3-2B_axmodel",
151
+ help="Path to save compiled axmodel of llama model")
152
+ parser.add_argument("--vit_model", type=str, default=None,
153
+ help="Path to save compiled axmodel of llama model")
154
+ parser.add_argument("-i", "--images", nargs='+', type=str, default=None,
155
+ help="Path to the test image.")
156
+ parser.add_argument("-q", "--question", type=str, default="Please calculate the derivative of the function y=2x^2.",
157
+ help="Your question that you want to ask the model.")
158
+ args = parser.parse_args()
159
+
160
+
161
+ hf_model_path = args.hf_model
162
+ axmodel_path = args.axmodel_path
163
+ vit_axmodel_path = args.vit_model
164
+ test_imgs_path = args.images
165
+
166
+ config = AutoConfig.from_pretrained(hf_model_path, trust_remote_code=True)
167
+ tokenizer = AutoTokenizer.from_pretrained(hf_model_path, trust_remote_code=True, use_fast=False)
168
+ # set the max number of tiles in `max_num`
169
+ pixel_values_list = []
170
+ if test_imgs_path is not None:
171
+ for img_path in test_imgs_path:
172
+ pixel_values = load_image(img_path, input_size=448, max_num=1)
173
+ pixel_values_list.append(pixel_values)
174
+ print(f"输入图像数: {len(pixel_values_list)}")
175
+ print("preprocess image done!")
176
+
177
+ # extract img feature by vit
178
+ vit_session = InferenceSession(vit_axmodel_path)
179
+ vit_output_list = []
180
+ for idx, pixel_values in enumerate(pixel_values_list):
181
+ vit_output = vit_session.run(None, {"image": pixel_values.numpy()})[0]
182
+ vit_output_list.append(vit_output.copy()) # 避免 vit 输出结果使用同一块内存
183
+
184
+ print(f"vit_output.shape is {vit_output_list[0].shape}, vit feature extract done!")
185
+
186
+ prompt = "<|im_start|>system\n你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型, 英文名叫 InternVL3, 是一个有用无害的人工智能助手, 擅长思考和回答用户的问题.<|im_end|>\n"
187
+ question = args.question
188
+ prompt += "<|im_start|>user\n" + question
189
+
190
+ if len(pixel_values_list) > 0:
191
+ for idx in range(len(pixel_values_list)):
192
+ prompt += "\n<img>" + "<IMG_CONTEXT>" * 256 + "</img>\n"
193
+
194
+ prompt += "<|im_end|>\n<|im_start|>assistant"
195
+ print(f"prompt is {prompt}")
196
+ token_ids = tokenizer.encode(prompt)
197
+
198
+ # 图像理解
199
+ image_start_indices = np.where(np.array(token_ids) == 151665)[0].tolist() # <img> tag
200
+ embeds = np.load(f"{axmodel_path}/model.embed_tokens.weight.npy")
201
+ prefill_data = np.take(embeds, token_ids, axis=0)
202
+ prefill_data = prefill_data.astype(bfloat16)
203
+ token_len = len(token_ids)
204
+
205
+ assert token_len < 1024 + 128, f"输入 prompt({token_len}) 超过最大限度!"
206
+ for idx, image_start_index in enumerate(image_start_indices):
207
+ image_insert_index = image_start_index + 1
208
+ prefill_data[image_insert_index : image_insert_index + 256] = vit_output_list[idx][0, :, :]
209
+ ##################################
210
+
211
+ lastN = 2559
212
+ cfg = config.llm_config
213
+ # cfg = config
214
+ # cfg.num_hidden_layers = 24
215
+
216
+ kv_dim = cfg.hidden_size // cfg.num_attention_heads * cfg.num_key_value_heads
217
+ k_caches = [
218
+ np.zeros((1, lastN, kv_dim), dtype=bfloat16)
219
+ for _ in range(cfg.num_hidden_layers)
220
+ ]
221
+ v_caches = [
222
+ np.zeros((1, lastN, kv_dim), dtype=bfloat16)
223
+ for _ in range(cfg.num_hidden_layers)
224
+ ]
225
+
226
+ prefill_decoder_sessins = []
227
+ for i in tqdm(range(cfg.num_hidden_layers), desc="Init InferenceSession"):
228
+ session = InferenceSession(
229
+ f"{axmodel_path}/qwen2_p128_l{i}_together.axmodel"
230
+ )
231
+ prefill_decoder_sessins.append(session)
232
+
233
+ post_process_session = InferenceSession(
234
+ f"{axmodel_path}/qwen2_post.axmodel"
235
+ )
236
+ print("model load done!")
237
+ print("prefill token_len: ", token_len)
238
+
239
+ """
240
+ prefill
241
+ """
242
+ prefill_slice_len = 128
243
+ # slice_indexs = [0, 1, 2, 3, 4, 5, 6, 7, 8]
244
+ slice_indexs = [
245
+ e for e in range(token_len // prefill_slice_len + 1)
246
+ ]
247
+ print(f"slice_indexs is {slice_indexs}")
248
+ prefill_len = prefill_slice_len * slice_indexs[-1] if slice_indexs[-1] != 0 else prefill_slice_len # 这里的 128 就是 prefill_slice_len
249
+
250
+ if prefill_len > 0:
251
+ for slice_index in slice_indexs:
252
+ indices = np.array(
253
+ list(
254
+ range(
255
+ slice_index * prefill_slice_len,
256
+ (slice_index + 1) * prefill_slice_len,
257
+ )
258
+ ),
259
+ np.uint32,
260
+ ).reshape((1, prefill_slice_len))
261
+
262
+ mask = (
263
+ np.zeros((1, prefill_slice_len, prefill_slice_len * (slice_index + 1)))
264
+ - 65536
265
+ )
266
+ data = np.zeros((1, prefill_slice_len, cfg.hidden_size)).astype(bfloat16)
267
+ for i, t in enumerate(
268
+ range(
269
+ slice_index * prefill_slice_len,
270
+ (slice_index + 1) * prefill_slice_len,
271
+ )
272
+ ):
273
+ if t < len(token_ids):
274
+ mask[:, i, : slice_index * prefill_slice_len + i + 1] = 0
275
+ data[:, i : i + 1, :] = (
276
+ prefill_data[t]
277
+ .reshape((1, 1, cfg.hidden_size))
278
+ .astype(bfloat16)
279
+ )
280
+
281
+ if slice_index == slice_indexs[-1]:
282
+ remain_len = token_len - slice_index * prefill_slice_len
283
+ else:
284
+ remain_len = prefill_slice_len
285
+ mask = mask.astype(bfloat16)
286
+ for i in range(cfg.num_hidden_layers):
287
+ input_feed = {
288
+ "K_cache": (
289
+ k_caches[i][:, 0 : prefill_slice_len * slice_index, :]
290
+ if slice_index
291
+ else np.zeros((1, 1, cfg.hidden_size), dtype=bfloat16)
292
+ ),
293
+ "V_cache": (
294
+ v_caches[i][:, 0 : prefill_slice_len * slice_index, :]
295
+ if slice_index
296
+ else np.zeros((1, 1, cfg.hidden_size), dtype=bfloat16)
297
+ ),
298
+ "indices": indices,
299
+ "input": data,
300
+ "mask": mask,
301
+ }
302
+ outputs = prefill_decoder_sessins[i].run(None, input_feed, shape_group=slice_index + 1)
303
+ k_caches[i][
304
+ :,
305
+ slice_index
306
+ * prefill_slice_len : slice_index
307
+ * prefill_slice_len + remain_len,
308
+ :,
309
+ ] = outputs[0][:, :remain_len, :]
310
+ v_caches[i][
311
+ :,
312
+ slice_index
313
+ * prefill_slice_len : slice_index
314
+ * prefill_slice_len + remain_len,
315
+ :,
316
+ ] = outputs[1][:, :remain_len, :]
317
+ data = outputs[2]
318
+
319
+ print("slice prefill done", slice_index)
320
+ post_out = post_process_session.run(
321
+ None,
322
+ {
323
+ "input": data[
324
+ :, token_len - (len(slice_indexs) - 1) * prefill_slice_len - 1, None, :
325
+ ]
326
+ }
327
+ )[0]
328
+ next_token, posssible_tokens, possible_soft = post_process(post_out)
329
+ posibles = [tokenizer.decode([t]) for t in posssible_tokens]
330
+ posible_soft = [str((t, s)) for t, s in zip(posibles, possible_soft)]
331
+ token_ids.append(next_token)
332
+
333
+ # set to decoder
334
+ kv_cache_len = 2559
335
+ mask = np.zeros((1, 1, kv_cache_len + 1), dtype=np.float32).astype(bfloat16)
336
+ mask[:, :, :kv_cache_len] -= 65536
337
+ if prefill_len > 0:
338
+ mask[:, :, :token_len] = 0
339
+ for start_indice in tqdm(range(kv_cache_len), desc="Decode"):
340
+ if prefill_len > 0 and start_indice < token_len:
341
+ continue
342
+
343
+ next_token = token_ids[start_indice]
344
+ indices = np.array([start_indice], np.uint32).reshape((1, 1))
345
+ data = embeds[next_token, :].reshape((1, 1, cfg.hidden_size)).astype(bfloat16)
346
+ for i in range(cfg.num_hidden_layers):
347
+ input_feed = {
348
+ "K_cache": k_caches[i],
349
+ "V_cache": v_caches[i],
350
+ "indices": indices,
351
+ "input": data,
352
+ "mask": mask,
353
+ }
354
+ outputs = prefill_decoder_sessins[i].run(None, input_feed, shape_group=0)
355
+ k_caches[i][:, start_indice, :] = outputs[0][:, :, :]
356
+ v_caches[i][:, start_indice, :] = outputs[1][:, :, :]
357
+ data = outputs[2]
358
+ mask[..., start_indice] = 0
359
+ if start_indice < token_len - 1:
360
+ pass
361
+ else:
362
+ post_out = post_process_session.run(None, {"input": data})[0]
363
+ next_token, posssible_tokens, possible_soft = post_process(post_out)
364
+ token_ids.append(next_token)
365
+ if next_token == tokenizer.eos_token_id and next_token > token_len:
366
+ print("hit eos!")
367
+ break
368
+
369
+ # print result
370
+ print(tokenizer.decode(token_ids[token_len:], skip_special_tokens=True))
internvl3_2b_axmodel/model.embed_tokens.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1282f7ccf6a9b08c85c71bcce4db867c6054ecca6c7147243e7337d0d0ab708a
3
+ size 931885184
internvl3_2b_axmodel/qwen2_p128_l0_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:566547f9b6a2218381356e0b823879e7bc5a65230e7892bf9b71f35fe5cb544e
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l10_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88fcff9f305a53c0c0b8fa22a78c8814e65584b7d8e410b6ee38e1d3167cb498
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l11_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c628f4771502574c1c2450da1eabea71afc02bee5cd32a9a6e401603ad4d504b
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l12_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9912b05faabed02d933000326a7b2dfe9a05f9d4f402892073849145bfe96e50
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l13_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b228900f882b9aea5f0fd410d3e59158cd24527aa0e3e8ac1d3d558645e3fd3d
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l14_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7cf8308b9204bd3473897e6d91598d8c4cb7485a4279829119329d2d36a9882
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l15_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c39a58dea42b1610070607c5a6d1d7bef8e16edae7183a271ad6f15fc765cab5
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l16_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae575dfe963704b034503e53a13df2d193baba8df35ccf3fb9ae6082ea95ab67
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l17_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb23bc6883b8b88524b6d5b8f4f225596b9783c123a3e1b6bc4cc7ec49a98a1
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l18_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42eaacc99e87a3d9ebc88a5f12f83275285384f977d63e60dd142c4672e5c44a
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l19_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:949d0fe12d45244098cc772c3701f0e3173d12a98e3975488d6c16c57059b87e
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l1_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe657b85c98d4139f035675a27466bbc719a884817c0605b74ab95a10aeefe4a
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l20_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42c7b094b5b693623e81c089da77f5d4f6e665d8262b61dbc1dd110920d719cf
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l21_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe905b027a9fc0378c9b8c2fad994fd6fe07b50a398ec4be75400efcf38000d1
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l22_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6748d3de2fc7db45d29e8cd0ab76b06404fa1967003dec6671d46ff7f9cdf0d1
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l23_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:397bcf99b84b4edcdb9ff8486a99197057c77665c6ca1516cd843c4e7d001e7b
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l24_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c2091bb6a07d07093e44f1a6df4e262761281cff20ed7ec934e96c5ff1606d
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l25_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cb522c7ce3e0252df76108e880ae74951aefd0d0733c36e5423a9ed3d750a6e
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l26_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6eea53085ef6d8099137ea9dd7419f3b5ef8714da3cda4096a5adfd82d80d180
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l27_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f76358d0e13a3eb480705a23e11189a0009d0bcd074573fb6d8506406c985bd7
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l2_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:016fd2f9b492b3f80d18a4508f3e862742cebbdb9ee0efa21c231fd213dea29c
3
+ size 81356136
internvl3_2b_axmodel/qwen2_p128_l3_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d1bcb6c77421d1e74589a43ed02b8c54fdedd61f97bc64a6a9eefb2bdc87ecb
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l4_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6113220384ffcd4c44f852bdd8c800d1a414f5e4dc2424e6ed97f5e7db5e4d5
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l5_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66ccd095ba921ee9eda510f76836dca192b2827373279226a10447c7e0df54b4
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l6_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a21872e47517eafcb3c2a1ad0c8b959a627fd4e4dc7a106b35360438bf272c42
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l7_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20698b243c09c869985831c4d63c7d13dd26d194ff596726bbd1010f2882aab2
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l8_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:447476180d7784b36216c4ac67ba8028a8339a59721ca437145b68c7e95488fb
3
+ size 81352456
internvl3_2b_axmodel/qwen2_p128_l9_together.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9f17ec940ee1fac261db4bb3aa9d7c1e98f6c7faa477bd2739816cf82675d15
3
+ size 81352456
internvl3_2b_axmodel/qwen2_post.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:056893588013d412e3b41049c0f87a7eae70670b1992d6f17507d50698b97753
3
+ size 254396183
internvl3_2b_tokenizer/added_tokens.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 151673,
3
+ "</img>": 151666,
4
+ "</quad>": 151669,
5
+ "</ref>": 151671,
6
+ "</tool_call>": 151658,
7
+ "<IMG_CONTEXT>": 151667,
8
+ "<box>": 151672,
9
+ "<img>": 151665,
10
+ "<quad>": 151668,
11
+ "<ref>": 151670,
12
+ "<tool_call>": 151657,
13
+ "<|box_end|>": 151649,
14
+ "<|box_start|>": 151648,
15
+ "<|endoftext|>": 151643,
16
+ "<|file_sep|>": 151664,
17
+ "<|fim_middle|>": 151660,
18
+ "<|fim_pad|>": 151662,
19
+ "<|fim_prefix|>": 151659,
20
+ "<|fim_suffix|>": 151661,
21
+ "<|im_end|>": 151645,
22
+ "<|im_start|>": 151644,
23
+ "<|image_pad|>": 151655,
24
+ "<|object_ref_end|>": 151647,
25
+ "<|object_ref_start|>": 151646,
26
+ "<|quad_end|>": 151651,
27
+ "<|quad_start|>": 151650,
28
+ "<|repo_name|>": 151663,
29
+ "<|video_pad|>": 151656,
30
+ "<|vision_end|>": 151653,
31
+ "<|vision_pad|>": 151654,
32
+ "<|vision_start|>": 151652
33
+ }
internvl3_2b_tokenizer/config.json ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "/mnt/petrelfs/wangweiyun/workspace_wwy/open_source/InternVL/internvl_chat/work_dirs/internvl_chat_v3_0/InternVL3_0-2B-MPO-try0-2",
4
+ "architectures": [
5
+ "InternVLChatModel"
6
+ ],
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
9
+ "AutoModel": "modeling_internvl_chat.InternVLChatModel",
10
+ "AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
11
+ },
12
+ "downsample_ratio": 0.5,
13
+ "dynamic_image_size": true,
14
+ "force_image_size": 448,
15
+ "hidden_size": 1536,
16
+ "image_fold": null,
17
+ "llm_config": {
18
+ "_attn_implementation_autoset": true,
19
+ "_name_or_path": "./pretrained/Qwen2.5-32B-Instruct",
20
+ "add_cross_attention": false,
21
+ "architectures": [
22
+ "Qwen2ForCausalLM"
23
+ ],
24
+ "attention_dropout": 0.0,
25
+ "bad_words_ids": null,
26
+ "begin_suppress_tokens": null,
27
+ "bos_token_id": 151643,
28
+ "chunk_size_feed_forward": 0,
29
+ "cross_attention_hidden_size": null,
30
+ "decoder_start_token_id": null,
31
+ "diversity_penalty": 0.0,
32
+ "do_sample": false,
33
+ "early_stopping": false,
34
+ "encoder_no_repeat_ngram_size": 0,
35
+ "eos_token_id": 151643,
36
+ "exponential_decay_length_penalty": null,
37
+ "finetuning_task": null,
38
+ "forced_bos_token_id": null,
39
+ "forced_eos_token_id": null,
40
+ "hidden_act": "silu",
41
+ "hidden_size": 1536,
42
+ "id2label": {
43
+ "0": "LABEL_0",
44
+ "1": "LABEL_1"
45
+ },
46
+ "initializer_range": 0.02,
47
+ "intermediate_size": 8960,
48
+ "is_decoder": false,
49
+ "is_encoder_decoder": false,
50
+ "label2id": {
51
+ "LABEL_0": 0,
52
+ "LABEL_1": 1
53
+ },
54
+ "length_penalty": 1.0,
55
+ "max_length": 20,
56
+ "max_position_embeddings": 32768,
57
+ "max_window_layers": 70,
58
+ "min_length": 0,
59
+ "model_type": "qwen2",
60
+ "moe_config": null,
61
+ "no_repeat_ngram_size": 0,
62
+ "num_attention_heads": 12,
63
+ "num_beam_groups": 1,
64
+ "num_beams": 1,
65
+ "num_hidden_layers": 28,
66
+ "num_key_value_heads": 2,
67
+ "num_return_sequences": 1,
68
+ "output_attentions": false,
69
+ "output_hidden_states": false,
70
+ "output_scores": false,
71
+ "pad_token_id": null,
72
+ "prefix": null,
73
+ "problem_type": null,
74
+ "pruned_heads": {},
75
+ "remove_invalid_values": false,
76
+ "repetition_penalty": 1.0,
77
+ "return_dict": true,
78
+ "return_dict_in_generate": false,
79
+ "rms_norm_eps": 1e-06,
80
+ "rope_scaling": {
81
+ "factor": 2.0,
82
+ "rope_type": "dynamic",
83
+ "type": "dynamic"
84
+ },
85
+ "rope_theta": 1000000.0,
86
+ "sep_token_id": null,
87
+ "sliding_window": null,
88
+ "suppress_tokens": null,
89
+ "task_specific_params": null,
90
+ "temperature": 1.0,
91
+ "tf_legacy_loss": false,
92
+ "tie_encoder_decoder": false,
93
+ "tie_word_embeddings": false,
94
+ "tokenizer_class": null,
95
+ "top_k": 50,
96
+ "top_p": 1.0,
97
+ "torch_dtype": "bfloat16",
98
+ "torchscript": false,
99
+ "transformers_version": "4.48.3",
100
+ "typical_p": 1.0,
101
+ "use_bfloat16": true,
102
+ "use_cache": false,
103
+ "use_sliding_window": false,
104
+ "vocab_size": 151674
105
+ },
106
+ "max_dynamic_patch": 12,
107
+ "min_dynamic_patch": 1,
108
+ "model_type": "internvl_chat",
109
+ "pad2square": false,
110
+ "ps_version": "v2",
111
+ "select_layer": -1,
112
+ "system_message": null,
113
+ "template": "internvl2_5",
114
+ "tie_word_embeddings": false,
115
+ "torch_dtype": "bfloat16",
116
+ "transformers_version": null,
117
+ "use_backbone_lora": 0,
118
+ "use_llm_lora": 0,
119
+ "use_thumbnail": true,
120
+ "vision_config": {
121
+ "_attn_implementation_autoset": true,
122
+ "_name_or_path": "OpenGVLab/InternViT-6B-448px-V1-5",
123
+ "add_cross_attention": false,
124
+ "architectures": [
125
+ "InternVisionModel"
126
+ ],
127
+ "attention_dropout": 0.0,
128
+ "auto_map": {
129
+ "AutoConfig": "configuration_intern_vit.InternVisionConfig",
130
+ "AutoModel": "modeling_intern_vit.InternVisionModel"
131
+ },
132
+ "bad_words_ids": null,
133
+ "begin_suppress_tokens": null,
134
+ "bos_token_id": null,
135
+ "capacity_factor": 1.2,
136
+ "chunk_size_feed_forward": 0,
137
+ "cross_attention_hidden_size": null,
138
+ "decoder_start_token_id": null,
139
+ "diversity_penalty": 0.0,
140
+ "do_sample": false,
141
+ "drop_path_rate": 0.1,
142
+ "dropout": 0.0,
143
+ "early_stopping": false,
144
+ "encoder_no_repeat_ngram_size": 0,
145
+ "eos_token_id": null,
146
+ "eval_capacity_factor": 1.4,
147
+ "exponential_decay_length_penalty": null,
148
+ "finetuning_task": null,
149
+ "forced_bos_token_id": null,
150
+ "forced_eos_token_id": null,
151
+ "hidden_act": "gelu",
152
+ "hidden_size": 1024,
153
+ "id2label": {
154
+ "0": "LABEL_0",
155
+ "1": "LABEL_1"
156
+ },
157
+ "image_size": 448,
158
+ "initializer_factor": 0.1,
159
+ "initializer_range": 1e-10,
160
+ "intermediate_size": 4096,
161
+ "is_decoder": false,
162
+ "is_encoder_decoder": false,
163
+ "label2id": {
164
+ "LABEL_0": 0,
165
+ "LABEL_1": 1
166
+ },
167
+ "laux_allreduce": "all_nodes",
168
+ "layer_norm_eps": 1e-06,
169
+ "length_penalty": 1.0,
170
+ "max_length": 20,
171
+ "min_length": 0,
172
+ "model_type": "intern_vit_6b",
173
+ "moe_coeff_ratio": 0.5,
174
+ "moe_intermediate_size": 768,
175
+ "moe_output_scale": 4.0,
176
+ "no_repeat_ngram_size": 0,
177
+ "noisy_gate_policy": "RSample_before",
178
+ "norm_type": "layer_norm",
179
+ "num_attention_heads": 16,
180
+ "num_beam_groups": 1,
181
+ "num_beams": 1,
182
+ "num_channels": 3,
183
+ "num_experts": 8,
184
+ "num_hidden_layers": 24,
185
+ "num_return_sequences": 1,
186
+ "num_routed_experts": 4,
187
+ "num_shared_experts": 4,
188
+ "output_attentions": false,
189
+ "output_hidden_states": false,
190
+ "output_scores": false,
191
+ "pad_token_id": null,
192
+ "patch_size": 14,
193
+ "prefix": null,
194
+ "problem_type": null,
195
+ "pruned_heads": {},
196
+ "qk_normalization": false,
197
+ "qkv_bias": true,
198
+ "remove_invalid_values": false,
199
+ "repetition_penalty": 1.0,
200
+ "return_dict": true,
201
+ "return_dict_in_generate": false,
202
+ "sep_token_id": null,
203
+ "shared_expert_intermediate_size": 3072,
204
+ "suppress_tokens": null,
205
+ "task_specific_params": null,
206
+ "temperature": 1.0,
207
+ "tf_legacy_loss": false,
208
+ "tie_encoder_decoder": false,
209
+ "tie_word_embeddings": true,
210
+ "tokenizer_class": null,
211
+ "top_k": 50,
212
+ "top_p": 1.0,
213
+ "torch_dtype": "bfloat16",
214
+ "torchscript": false,
215
+ "transformers_version": "4.48.3",
216
+ "typical_p": 1.0,
217
+ "use_bfloat16": true,
218
+ "use_flash_attn": true,
219
+ "use_moe": false,
220
+ "use_residual": true,
221
+ "use_rts": false,
222
+ "use_weighted_residual": false
223
+ }
224
+ }
internvl3_2b_tokenizer/configuration_intern_vit.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ from typing import Union
9
+
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class InternVisionConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
19
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
20
+
21
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
22
+ documentation from [`PretrainedConfig`] for more information.
23
+
24
+ Args:
25
+ num_channels (`int`, *optional*, defaults to 3):
26
+ Number of color channels in the input images (e.g., 3 for RGB).
27
+ patch_size (`int`, *optional*, defaults to 14):
28
+ The size (resolution) of each patch.
29
+ image_size (`int`, *optional*, defaults to 224):
30
+ The size (resolution) of each image.
31
+ qkv_bias (`bool`, *optional*, defaults to `False`):
32
+ Whether to add a bias to the queries and values in the self-attention layers.
33
+ hidden_size (`int`, *optional*, defaults to 3200):
34
+ Dimensionality of the encoder layers and the pooler layer.
35
+ num_attention_heads (`int`, *optional*, defaults to 25):
36
+ Number of attention heads for each attention layer in the Transformer encoder.
37
+ intermediate_size (`int`, *optional*, defaults to 12800):
38
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
39
+ qk_normalization (`bool`, *optional*, defaults to `True`):
40
+ Whether to normalize the queries and keys in the self-attention layers.
41
+ num_hidden_layers (`int`, *optional*, defaults to 48):
42
+ Number of hidden layers in the Transformer encoder.
43
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
44
+ Whether to use flash attention mechanism.
45
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
46
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
47
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
48
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
49
+ The epsilon used by the layer normalization layers.
50
+ dropout (`float`, *optional*, defaults to 0.0):
51
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
52
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
53
+ Dropout rate for stochastic depth.
54
+ attention_dropout (`float`, *optional*, defaults to 0.0):
55
+ The dropout ratio for the attention probabilities.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ initializer_factor (`float`, *optional*, defaults to 0.1):
59
+ A factor for layer scale.
60
+ """
61
+
62
+ model_type = 'intern_vit_6b'
63
+
64
+ def __init__(
65
+ self,
66
+ num_channels=3,
67
+ patch_size=14,
68
+ image_size=224,
69
+ qkv_bias=False,
70
+ hidden_size=3200,
71
+ num_attention_heads=25,
72
+ intermediate_size=12800,
73
+ qk_normalization=True,
74
+ num_hidden_layers=48,
75
+ use_flash_attn=True,
76
+ hidden_act='gelu',
77
+ norm_type='rms_norm',
78
+ layer_norm_eps=1e-6,
79
+ dropout=0.0,
80
+ drop_path_rate=0.0,
81
+ attention_dropout=0.0,
82
+ initializer_range=0.02,
83
+ initializer_factor=0.1,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(**kwargs)
87
+
88
+ self.hidden_size = hidden_size
89
+ self.intermediate_size = intermediate_size
90
+ self.dropout = dropout
91
+ self.drop_path_rate = drop_path_rate
92
+ self.num_hidden_layers = num_hidden_layers
93
+ self.num_attention_heads = num_attention_heads
94
+ self.num_channels = num_channels
95
+ self.patch_size = patch_size
96
+ self.image_size = image_size
97
+ self.initializer_range = initializer_range
98
+ self.initializer_factor = initializer_factor
99
+ self.attention_dropout = attention_dropout
100
+ self.layer_norm_eps = layer_norm_eps
101
+ self.hidden_act = hidden_act
102
+ self.norm_type = norm_type
103
+ self.qkv_bias = qkv_bias
104
+ self.qk_normalization = qk_normalization
105
+ self.use_flash_attn = use_flash_attn
106
+
107
+ @classmethod
108
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
109
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
110
+
111
+ if 'vision_config' in config_dict:
112
+ config_dict = config_dict['vision_config']
113
+
114
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
115
+ logger.warning(
116
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
117
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
118
+ )
119
+
120
+ return cls.from_dict(config_dict, **kwargs)
internvl3_2b_tokenizer/configuration_internvl_chat.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from transformers import AutoConfig, LlamaConfig, Qwen2Config
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ from .configuration_intern_vit import InternVisionConfig
14
+
15
+ logger = logging.get_logger(__name__)
16
+
17
+
18
+ class InternVLChatConfig(PretrainedConfig):
19
+ model_type = 'internvl_chat'
20
+ is_composition = True
21
+
22
+ def __init__(
23
+ self,
24
+ vision_config=None,
25
+ llm_config=None,
26
+ use_backbone_lora=0,
27
+ use_llm_lora=0,
28
+ select_layer=-1,
29
+ force_image_size=None,
30
+ downsample_ratio=0.5,
31
+ template=None,
32
+ dynamic_image_size=False,
33
+ use_thumbnail=False,
34
+ ps_version='v1',
35
+ min_dynamic_patch=1,
36
+ max_dynamic_patch=6,
37
+ **kwargs):
38
+ super().__init__(**kwargs)
39
+
40
+ if vision_config is None:
41
+ vision_config = {'architectures': ['InternVisionModel']}
42
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
43
+
44
+ if llm_config is None:
45
+ llm_config = {'architectures': ['Qwen2ForCausalLM']}
46
+ logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
47
+
48
+ self.vision_config = InternVisionConfig(**vision_config)
49
+ if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
50
+ self.llm_config = LlamaConfig(**llm_config)
51
+ elif llm_config.get('architectures')[0] == 'Qwen2ForCausalLM':
52
+ self.llm_config = Qwen2Config(**llm_config)
53
+ else:
54
+ raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
55
+ self.use_backbone_lora = use_backbone_lora
56
+ self.use_llm_lora = use_llm_lora
57
+ self.select_layer = select_layer
58
+ self.force_image_size = force_image_size
59
+ self.downsample_ratio = downsample_ratio
60
+ self.template = template
61
+ self.dynamic_image_size = dynamic_image_size
62
+ self.use_thumbnail = use_thumbnail
63
+ self.ps_version = ps_version # pixel shuffle version
64
+ self.min_dynamic_patch = min_dynamic_patch
65
+ self.max_dynamic_patch = max_dynamic_patch
66
+ # By default, we use tie_word_embeddings=False for models of all sizes.
67
+ self.tie_word_embeddings = self.llm_config.tie_word_embeddings
68
+
69
+ logger.info(f'vision_select_layer: {self.select_layer}')
70
+ logger.info(f'ps_version: {self.ps_version}')
71
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
72
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
73
+
74
+ def to_dict(self):
75
+ """
76
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
77
+
78
+ Returns:
79
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
80
+ """
81
+ output = copy.deepcopy(self.__dict__)
82
+ output['vision_config'] = self.vision_config.to_dict()
83
+ output['llm_config'] = self.llm_config.to_dict()
84
+ output['model_type'] = self.__class__.model_type
85
+ output['use_backbone_lora'] = self.use_backbone_lora
86
+ output['use_llm_lora'] = self.use_llm_lora
87
+ output['select_layer'] = self.select_layer
88
+ output['force_image_size'] = self.force_image_size
89
+ output['downsample_ratio'] = self.downsample_ratio
90
+ output['template'] = self.template
91
+ output['dynamic_image_size'] = self.dynamic_image_size
92
+ output['use_thumbnail'] = self.use_thumbnail
93
+ output['ps_version'] = self.ps_version
94
+ output['min_dynamic_patch'] = self.min_dynamic_patch
95
+ output['max_dynamic_patch'] = self.max_dynamic_patch
96
+
97
+ return output
internvl3_2b_tokenizer/conversation.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation prompt templates.
3
+
4
+ We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
+ If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
6
+
7
+ Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
8
+ """
9
+
10
+ import dataclasses
11
+ from enum import IntEnum, auto
12
+ from typing import Dict, List, Tuple, Union
13
+
14
+
15
+ class SeparatorStyle(IntEnum):
16
+ """Separator styles."""
17
+
18
+ ADD_COLON_SINGLE = auto()
19
+ ADD_COLON_TWO = auto()
20
+ ADD_COLON_SPACE_SINGLE = auto()
21
+ NO_COLON_SINGLE = auto()
22
+ NO_COLON_TWO = auto()
23
+ ADD_NEW_LINE_SINGLE = auto()
24
+ LLAMA2 = auto()
25
+ CHATGLM = auto()
26
+ CHATML = auto()
27
+ CHATINTERN = auto()
28
+ DOLLY = auto()
29
+ RWKV = auto()
30
+ PHOENIX = auto()
31
+ ROBIN = auto()
32
+ FALCON_CHAT = auto()
33
+ CHATGLM3 = auto()
34
+ INTERNVL_ZH = auto()
35
+ MPT = auto()
36
+
37
+
38
+ @dataclasses.dataclass
39
+ class Conversation:
40
+ """A class that manages prompt templates and keeps all conversation history."""
41
+
42
+ # The name of this template
43
+ name: str
44
+ # The template of the system prompt
45
+ system_template: str = '{system_message}'
46
+ # The system message
47
+ system_message: str = ''
48
+ # The names of two roles
49
+ roles: Tuple[str] = ('USER', 'ASSISTANT')
50
+ # All messages. Each item is (role, message).
51
+ messages: List[List[str]] = ()
52
+ # The number of few shot examples
53
+ offset: int = 0
54
+ # The separator style and configurations
55
+ sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
56
+ sep: str = '\n'
57
+ sep2: str = None
58
+ # Stop criteria (the default one is EOS token)
59
+ stop_str: Union[str, List[str]] = None
60
+ # Stops generation if meeting any token in this list
61
+ stop_token_ids: List[int] = None
62
+
63
+ def get_prompt(self) -> str:
64
+ """Get the prompt for generation."""
65
+ system_prompt = self.system_template.format(system_message=self.system_message)
66
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
67
+ ret = system_prompt + self.sep
68
+ for role, message in self.messages:
69
+ if message:
70
+ ret += role + ': ' + message + self.sep
71
+ else:
72
+ ret += role + ':'
73
+ return ret
74
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
75
+ seps = [self.sep, self.sep2]
76
+ ret = system_prompt + seps[0]
77
+ for i, (role, message) in enumerate(self.messages):
78
+ if message:
79
+ ret += role + ': ' + message + seps[i % 2]
80
+ else:
81
+ ret += role + ':'
82
+ return ret
83
+ elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
84
+ ret = system_prompt + self.sep
85
+ for role, message in self.messages:
86
+ if message:
87
+ ret += role + ': ' + message + self.sep
88
+ else:
89
+ ret += role + ': ' # must be end with a space
90
+ return ret
91
+ elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
92
+ ret = '' if system_prompt == '' else system_prompt + self.sep
93
+ for role, message in self.messages:
94
+ if message:
95
+ ret += role + '\n' + message + self.sep
96
+ else:
97
+ ret += role + '\n'
98
+ return ret
99
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
100
+ ret = system_prompt
101
+ for role, message in self.messages:
102
+ if message:
103
+ ret += role + message + self.sep
104
+ else:
105
+ ret += role
106
+ return ret
107
+ elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
108
+ seps = [self.sep, self.sep2]
109
+ ret = system_prompt
110
+ for i, (role, message) in enumerate(self.messages):
111
+ if message:
112
+ ret += role + message + seps[i % 2]
113
+ else:
114
+ ret += role
115
+ return ret
116
+ elif self.sep_style == SeparatorStyle.RWKV:
117
+ ret = system_prompt
118
+ for i, (role, message) in enumerate(self.messages):
119
+ if message:
120
+ ret += (
121
+ role
122
+ + ': '
123
+ + message.replace('\r\n', '\n').replace('\n\n', '\n')
124
+ )
125
+ ret += '\n\n'
126
+ else:
127
+ ret += role + ':'
128
+ return ret
129
+ elif self.sep_style == SeparatorStyle.LLAMA2:
130
+ seps = [self.sep, self.sep2]
131
+ if self.system_message:
132
+ ret = system_prompt
133
+ else:
134
+ ret = '[INST] '
135
+ for i, (role, message) in enumerate(self.messages):
136
+ tag = self.roles[i % 2]
137
+ if message:
138
+ if i == 0:
139
+ ret += message + ' '
140
+ else:
141
+ ret += tag + ' ' + message + seps[i % 2]
142
+ else:
143
+ ret += tag
144
+ return ret
145
+ elif self.sep_style == SeparatorStyle.CHATGLM:
146
+ # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
147
+ # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
148
+ round_add_n = 1 if self.name == 'chatglm2' else 0
149
+ if system_prompt:
150
+ ret = system_prompt + self.sep
151
+ else:
152
+ ret = ''
153
+
154
+ for i, (role, message) in enumerate(self.messages):
155
+ if i % 2 == 0:
156
+ ret += f'[Round {i//2 + round_add_n}]{self.sep}'
157
+
158
+ if message:
159
+ ret += f'{role}:{message}{self.sep}'
160
+ else:
161
+ ret += f'{role}:'
162
+ return ret
163
+ elif self.sep_style == SeparatorStyle.CHATML:
164
+ ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
165
+ for role, message in self.messages:
166
+ if message:
167
+ ret += role + '\n' + message + self.sep + '\n'
168
+ else:
169
+ ret += role + '\n'
170
+ return ret
171
+ elif self.sep_style == SeparatorStyle.CHATGLM3:
172
+ ret = ''
173
+ if self.system_message:
174
+ ret += system_prompt
175
+ for role, message in self.messages:
176
+ if message:
177
+ ret += role + '\n' + ' ' + message
178
+ else:
179
+ ret += role
180
+ return ret
181
+ elif self.sep_style == SeparatorStyle.CHATINTERN:
182
+ # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
183
+ seps = [self.sep, self.sep2]
184
+ ret = system_prompt
185
+ for i, (role, message) in enumerate(self.messages):
186
+ # if i % 2 == 0:
187
+ # ret += "<s>"
188
+ if message:
189
+ ret += role + ':' + message + seps[i % 2] + '\n'
190
+ else:
191
+ ret += role + ':'
192
+ return ret
193
+ elif self.sep_style == SeparatorStyle.DOLLY:
194
+ seps = [self.sep, self.sep2]
195
+ ret = system_prompt
196
+ for i, (role, message) in enumerate(self.messages):
197
+ if message:
198
+ ret += role + ':\n' + message + seps[i % 2]
199
+ if i % 2 == 1:
200
+ ret += '\n\n'
201
+ else:
202
+ ret += role + ':\n'
203
+ return ret
204
+ elif self.sep_style == SeparatorStyle.PHOENIX:
205
+ ret = system_prompt
206
+ for role, message in self.messages:
207
+ if message:
208
+ ret += role + ': ' + '<s>' + message + '</s>'
209
+ else:
210
+ ret += role + ': ' + '<s>'
211
+ return ret
212
+ elif self.sep_style == SeparatorStyle.ROBIN:
213
+ ret = system_prompt + self.sep
214
+ for role, message in self.messages:
215
+ if message:
216
+ ret += role + ':\n' + message + self.sep
217
+ else:
218
+ ret += role + ':\n'
219
+ return ret
220
+ elif self.sep_style == SeparatorStyle.FALCON_CHAT:
221
+ ret = ''
222
+ if self.system_message:
223
+ ret += system_prompt + self.sep
224
+ for role, message in self.messages:
225
+ if message:
226
+ ret += role + ': ' + message + self.sep
227
+ else:
228
+ ret += role + ':'
229
+
230
+ return ret
231
+ elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
232
+ seps = [self.sep, self.sep2]
233
+ ret = self.system_message + seps[0]
234
+ for i, (role, message) in enumerate(self.messages):
235
+ if message:
236
+ ret += role + ': ' + message + seps[i % 2]
237
+ else:
238
+ ret += role + ':'
239
+ return ret
240
+ elif self.sep_style == SeparatorStyle.MPT:
241
+ ret = system_prompt + self.sep
242
+ for role, message in self.messages:
243
+ if message:
244
+ if type(message) is tuple:
245
+ message, _, _ = message
246
+ ret += role + message + self.sep
247
+ else:
248
+ ret += role
249
+ return ret
250
+ else:
251
+ raise ValueError(f'Invalid style: {self.sep_style}')
252
+
253
+ def set_system_message(self, system_message: str):
254
+ """Set the system message."""
255
+ self.system_message = system_message
256
+
257
+ def append_message(self, role: str, message: str):
258
+ """Append a new message."""
259
+ self.messages.append([role, message])
260
+
261
+ def update_last_message(self, message: str):
262
+ """Update the last output.
263
+
264
+ The last message is typically set to be None when constructing the prompt,
265
+ so we need to update it in-place after getting the response from a model.
266
+ """
267
+ self.messages[-1][1] = message
268
+
269
+ def to_gradio_chatbot(self):
270
+ """Convert the conversation to gradio chatbot format."""
271
+ ret = []
272
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
273
+ if i % 2 == 0:
274
+ ret.append([msg, None])
275
+ else:
276
+ ret[-1][-1] = msg
277
+ return ret
278
+
279
+ def to_openai_api_messages(self):
280
+ """Convert the conversation to OpenAI chat completion format."""
281
+ ret = [{'role': 'system', 'content': self.system_message}]
282
+
283
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
284
+ if i % 2 == 0:
285
+ ret.append({'role': 'user', 'content': msg})
286
+ else:
287
+ if msg is not None:
288
+ ret.append({'role': 'assistant', 'content': msg})
289
+ return ret
290
+
291
+ def copy(self):
292
+ return Conversation(
293
+ name=self.name,
294
+ system_template=self.system_template,
295
+ system_message=self.system_message,
296
+ roles=self.roles,
297
+ messages=[[x, y] for x, y in self.messages],
298
+ offset=self.offset,
299
+ sep_style=self.sep_style,
300
+ sep=self.sep,
301
+ sep2=self.sep2,
302
+ stop_str=self.stop_str,
303
+ stop_token_ids=self.stop_token_ids,
304
+ )
305
+
306
+ def dict(self):
307
+ return {
308
+ 'template_name': self.name,
309
+ 'system_message': self.system_message,
310
+ 'roles': self.roles,
311
+ 'messages': self.messages,
312
+ 'offset': self.offset,
313
+ }
314
+
315
+
316
+ # A global registry for all conversation templates
317
+ conv_templates: Dict[str, Conversation] = {}
318
+
319
+
320
+ def register_conv_template(template: Conversation, override: bool = False):
321
+ """Register a new conversation template."""
322
+ if not override:
323
+ assert (
324
+ template.name not in conv_templates
325
+ ), f'{template.name} has been registered.'
326
+
327
+ conv_templates[template.name] = template
328
+
329
+
330
+ def get_conv_template(name: str) -> Conversation:
331
+ """Get a conversation template."""
332
+ return conv_templates[name].copy()
333
+
334
+
335
+ # Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
336
+ # is that during training, the preprocessing function for the Hermes-2 template doesn't add
337
+ # <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
338
+ # Therefore, they are completely equivalent during inference.
339
+ register_conv_template(
340
+ Conversation(
341
+ name='Hermes-2',
342
+ system_template='<|im_start|>system\n{system_message}',
343
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
344
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
345
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
346
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
347
+ sep_style=SeparatorStyle.MPT,
348
+ sep='<|im_end|>',
349
+ stop_str='<|endoftext|>',
350
+ )
351
+ )
352
+
353
+
354
+ register_conv_template(
355
+ Conversation(
356
+ name='internlm2-chat',
357
+ system_template='<|im_start|>system\n{system_message}',
358
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
359
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
360
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
361
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
362
+ sep_style=SeparatorStyle.MPT,
363
+ sep='<|im_end|>',
364
+ )
365
+ )
366
+
367
+
368
+ register_conv_template(
369
+ Conversation(
370
+ name='phi3-chat',
371
+ system_template='<|system|>\n{system_message}',
372
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
373
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
374
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
375
+ roles=('<|user|>\n', '<|assistant|>\n'),
376
+ sep_style=SeparatorStyle.MPT,
377
+ sep='<|end|>',
378
+ )
379
+ )
380
+
381
+
382
+ register_conv_template(
383
+ Conversation(
384
+ name='internvl2_5',
385
+ system_template='<|im_start|>system\n{system_message}',
386
+ system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
387
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
388
+ sep_style=SeparatorStyle.MPT,
389
+ sep='<|im_end|>\n',
390
+ )
391
+ )
internvl3_2b_tokenizer/generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.48.3"
4
+ }
internvl3_2b_tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
internvl3_2b_tokenizer/modeling_intern_vit.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ from typing import Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+ from einops import rearrange
13
+ from timm.models.layers import DropPath
14
+ from torch import nn
15
+ from transformers.activations import ACT2FN
16
+ from transformers.modeling_outputs import (BaseModelOutput,
17
+ BaseModelOutputWithPooling)
18
+ from transformers.modeling_utils import PreTrainedModel
19
+ from transformers.utils import logging
20
+
21
+ from .configuration_intern_vit import InternVisionConfig
22
+
23
+ try:
24
+ from flash_attn.bert_padding import pad_input, unpad_input
25
+ from flash_attn.flash_attn_interface import \
26
+ flash_attn_varlen_qkvpacked_func
27
+ has_flash_attn = True
28
+ except:
29
+ print('FlashAttention2 is not installed.')
30
+ has_flash_attn = False
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ class FlashAttention(nn.Module):
36
+ """Implement the scaled dot product attention with softmax.
37
+ Arguments
38
+ ---------
39
+ softmax_scale: The temperature to use for the softmax attention.
40
+ (default: 1/sqrt(d_keys) where d_keys is computed at
41
+ runtime)
42
+ attention_dropout: The dropout rate to apply to the attention
43
+ (default: 0.0)
44
+ """
45
+
46
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
47
+ super().__init__()
48
+ self.softmax_scale = softmax_scale
49
+ self.dropout_p = attention_dropout
50
+
51
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
52
+ max_s=None, need_weights=False):
53
+ """Implements the multihead softmax attention.
54
+ Arguments
55
+ ---------
56
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
57
+ if unpadded: (nnz, 3, h, d)
58
+ key_padding_mask: a bool tensor of shape (B, S)
59
+ """
60
+ assert not need_weights
61
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
62
+ assert qkv.is_cuda
63
+
64
+ if cu_seqlens is None:
65
+ batch_size = qkv.shape[0]
66
+ seqlen = qkv.shape[1]
67
+ if key_padding_mask is None:
68
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
69
+ max_s = seqlen
70
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
71
+ device=qkv.device)
72
+ output = flash_attn_varlen_qkvpacked_func(
73
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
74
+ softmax_scale=self.softmax_scale, causal=causal
75
+ )
76
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
77
+ else:
78
+ nheads = qkv.shape[-2]
79
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
80
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
81
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
82
+ output_unpad = flash_attn_varlen_qkvpacked_func(
83
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
84
+ softmax_scale=self.softmax_scale, causal=causal
85
+ )
86
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
87
+ indices, batch_size, seqlen),
88
+ 'b s (h d) -> b s h d', h=nheads)
89
+ else:
90
+ assert max_s is not None
91
+ output = flash_attn_varlen_qkvpacked_func(
92
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
93
+ softmax_scale=self.softmax_scale, causal=causal
94
+ )
95
+
96
+ return output, None
97
+
98
+
99
+ class InternRMSNorm(nn.Module):
100
+ def __init__(self, hidden_size, eps=1e-6):
101
+ super().__init__()
102
+ self.weight = nn.Parameter(torch.ones(hidden_size))
103
+ self.variance_epsilon = eps
104
+
105
+ def forward(self, hidden_states):
106
+ input_dtype = hidden_states.dtype
107
+ hidden_states = hidden_states.to(torch.float32)
108
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
109
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
110
+ return self.weight * hidden_states.to(input_dtype)
111
+
112
+
113
+ try:
114
+ from apex.normalization import FusedRMSNorm
115
+
116
+ InternRMSNorm = FusedRMSNorm # noqa
117
+
118
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
119
+ except ImportError:
120
+ # using the normal InternRMSNorm
121
+ pass
122
+ except Exception:
123
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
124
+ pass
125
+
126
+
127
+ NORM2FN = {
128
+ 'rms_norm': InternRMSNorm,
129
+ 'layer_norm': nn.LayerNorm,
130
+ }
131
+
132
+
133
+ class InternVisionEmbeddings(nn.Module):
134
+ def __init__(self, config: InternVisionConfig):
135
+ super().__init__()
136
+ self.config = config
137
+ self.embed_dim = config.hidden_size
138
+ self.image_size = config.image_size
139
+ self.patch_size = config.patch_size
140
+
141
+ self.class_embedding = nn.Parameter(
142
+ torch.randn(1, 1, self.embed_dim),
143
+ )
144
+
145
+ self.patch_embedding = nn.Conv2d(
146
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
147
+ )
148
+
149
+ self.num_patches = (self.image_size // self.patch_size) ** 2
150
+ self.num_positions = self.num_patches + 1
151
+
152
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
153
+
154
+ def _get_pos_embed(self, pos_embed, H, W):
155
+ target_dtype = pos_embed.dtype
156
+ pos_embed = pos_embed.float().reshape(
157
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
158
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
159
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
160
+ return pos_embed
161
+
162
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
163
+ target_dtype = self.patch_embedding.weight.dtype
164
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
165
+ batch_size, _, height, width = patch_embeds.shape
166
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
167
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
168
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
169
+ position_embedding = torch.cat([
170
+ self.position_embedding[:, :1, :],
171
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
172
+ ], dim=1)
173
+ embeddings = embeddings + position_embedding.to(target_dtype)
174
+ return embeddings
175
+
176
+
177
+ class InternAttention(nn.Module):
178
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
179
+
180
+ def __init__(self, config: InternVisionConfig):
181
+ super().__init__()
182
+ self.config = config
183
+ self.embed_dim = config.hidden_size
184
+ self.num_heads = config.num_attention_heads
185
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
186
+ if config.use_flash_attn and not has_flash_attn:
187
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
188
+ self.head_dim = self.embed_dim // self.num_heads
189
+ if self.head_dim * self.num_heads != self.embed_dim:
190
+ raise ValueError(
191
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
192
+ f' {self.num_heads}).'
193
+ )
194
+
195
+ self.scale = self.head_dim ** -0.5
196
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
197
+ self.attn_drop = nn.Dropout(config.attention_dropout)
198
+ self.proj_drop = nn.Dropout(config.dropout)
199
+
200
+ self.qk_normalization = config.qk_normalization
201
+
202
+ if self.qk_normalization:
203
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
204
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
205
+
206
+ if self.use_flash_attn:
207
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
208
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
209
+
210
+ def _naive_attn(self, x):
211
+ B, N, C = x.shape
212
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
213
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
214
+
215
+ if self.qk_normalization:
216
+ B_, H_, N_, D_ = q.shape
217
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
218
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
219
+
220
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
221
+ attn = attn.softmax(dim=-1)
222
+ attn = self.attn_drop(attn)
223
+
224
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
225
+ x = self.proj(x)
226
+ x = self.proj_drop(x)
227
+ return x
228
+
229
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
230
+ qkv = self.qkv(x)
231
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
232
+
233
+ if self.qk_normalization:
234
+ q, k, v = qkv.unbind(2)
235
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
236
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
237
+ qkv = torch.stack([q, k, v], dim=2)
238
+
239
+ context, _ = self.inner_attn(
240
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
241
+ )
242
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
243
+ outs = self.proj_drop(outs)
244
+ return outs
245
+
246
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
247
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
248
+ return x
249
+
250
+
251
+ class InternMLP(nn.Module):
252
+ def __init__(self, config: InternVisionConfig):
253
+ super().__init__()
254
+ self.config = config
255
+ self.act = ACT2FN[config.hidden_act]
256
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
257
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
258
+
259
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
260
+ hidden_states = self.fc1(hidden_states)
261
+ hidden_states = self.act(hidden_states)
262
+ hidden_states = self.fc2(hidden_states)
263
+ return hidden_states
264
+
265
+
266
+ class InternVisionEncoderLayer(nn.Module):
267
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
268
+ super().__init__()
269
+ self.embed_dim = config.hidden_size
270
+ self.intermediate_size = config.intermediate_size
271
+ self.norm_type = config.norm_type
272
+
273
+ self.attn = InternAttention(config)
274
+ self.mlp = InternMLP(config)
275
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
276
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
277
+
278
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
279
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
280
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
281
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
282
+
283
+ def forward(
284
+ self,
285
+ hidden_states: torch.Tensor,
286
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
287
+ """
288
+ Args:
289
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
290
+ """
291
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states).to(hidden_states.dtype)) * self.ls1)
292
+
293
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states).to(hidden_states.dtype)) * self.ls2)
294
+
295
+ return hidden_states
296
+
297
+
298
+ class InternVisionEncoder(nn.Module):
299
+ """
300
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
301
+ [`InternEncoderLayer`].
302
+
303
+ Args:
304
+ config (`InternConfig`):
305
+ The corresponding vision configuration for the `InternEncoder`.
306
+ """
307
+
308
+ def __init__(self, config: InternVisionConfig):
309
+ super().__init__()
310
+ self.config = config
311
+ # stochastic depth decay rule
312
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
313
+ self.layers = nn.ModuleList([
314
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
315
+ self.gradient_checkpointing = True
316
+
317
+ def forward(
318
+ self,
319
+ inputs_embeds,
320
+ output_hidden_states: Optional[bool] = None,
321
+ return_dict: Optional[bool] = None,
322
+ ) -> Union[Tuple, BaseModelOutput]:
323
+ r"""
324
+ Args:
325
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
326
+ Embedded representation of the inputs. Should be float, not int tokens.
327
+ output_hidden_states (`bool`, *optional*):
328
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
329
+ for more detail.
330
+ return_dict (`bool`, *optional*):
331
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
332
+ """
333
+ output_hidden_states = (
334
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
335
+ )
336
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
337
+
338
+ encoder_states = () if output_hidden_states else None
339
+ hidden_states = inputs_embeds
340
+
341
+ for idx, encoder_layer in enumerate(self.layers):
342
+ if output_hidden_states:
343
+ encoder_states = encoder_states + (hidden_states,)
344
+ if self.gradient_checkpointing and self.training:
345
+ layer_outputs = torch.utils.checkpoint.checkpoint(
346
+ encoder_layer,
347
+ hidden_states)
348
+ else:
349
+ layer_outputs = encoder_layer(
350
+ hidden_states,
351
+ )
352
+ hidden_states = layer_outputs
353
+
354
+ if output_hidden_states:
355
+ encoder_states = encoder_states + (hidden_states,)
356
+
357
+ if not return_dict:
358
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
359
+ return BaseModelOutput(
360
+ last_hidden_state=hidden_states, hidden_states=encoder_states
361
+ )
362
+
363
+
364
+ class InternVisionModel(PreTrainedModel):
365
+ main_input_name = 'pixel_values'
366
+ _supports_flash_attn_2 = True
367
+ supports_gradient_checkpointing = True
368
+ config_class = InternVisionConfig
369
+ _no_split_modules = ['InternVisionEncoderLayer']
370
+
371
+ def __init__(self, config: InternVisionConfig):
372
+ super().__init__(config)
373
+ self.config = config
374
+
375
+ self.embeddings = InternVisionEmbeddings(config)
376
+ self.encoder = InternVisionEncoder(config)
377
+
378
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
379
+ pos_emb = self.embeddings.position_embedding
380
+ _, num_positions, embed_dim = pos_emb.shape
381
+ cls_emb = pos_emb[:, :1, :]
382
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
383
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
384
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
385
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
386
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
387
+ self.embeddings.image_size = new_size
388
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
389
+
390
+ def get_input_embeddings(self):
391
+ return self.embeddings
392
+
393
+ def forward(
394
+ self,
395
+ pixel_values: Optional[torch.FloatTensor] = None,
396
+ output_hidden_states: Optional[bool] = None,
397
+ return_dict: Optional[bool] = None,
398
+ pixel_embeds: Optional[torch.FloatTensor] = None,
399
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
400
+ output_hidden_states = (
401
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
402
+ )
403
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
404
+
405
+ if pixel_values is None and pixel_embeds is None:
406
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
407
+
408
+ if pixel_embeds is not None:
409
+ hidden_states = pixel_embeds
410
+ else:
411
+ if len(pixel_values.shape) == 4:
412
+ hidden_states = self.embeddings(pixel_values)
413
+ else:
414
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
415
+ encoder_outputs = self.encoder(
416
+ inputs_embeds=hidden_states,
417
+ output_hidden_states=output_hidden_states,
418
+ return_dict=return_dict,
419
+ )
420
+ last_hidden_state = encoder_outputs.last_hidden_state
421
+ pooled_output = last_hidden_state[:, 0, :]
422
+
423
+ if not return_dict:
424
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
425
+
426
+ return BaseModelOutputWithPooling(
427
+ last_hidden_state=last_hidden_state,
428
+ pooler_output=pooled_output,
429
+ hidden_states=encoder_outputs.hidden_states,
430
+ attentions=encoder_outputs.attentions,
431
+ )
internvl3_2b_tokenizer/modeling_internvl_chat.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import warnings
8
+ from typing import List, Optional, Tuple, Union
9
+
10
+ import torch.utils.checkpoint
11
+ import transformers
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss
14
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
15
+ Qwen2ForCausalLM)
16
+ from transformers.modeling_outputs import CausalLMOutputWithPast
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import ModelOutput, logging
19
+
20
+ from .configuration_internvl_chat import InternVLChatConfig
21
+ from .conversation import get_conv_template
22
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def version_cmp(v1, v2, op='eq'):
28
+ import operator
29
+
30
+ from packaging import version
31
+ op_func = getattr(operator, op)
32
+ return op_func(version.parse(v1), version.parse(v2))
33
+
34
+
35
+ class InternVLChatModel(PreTrainedModel):
36
+ config_class = InternVLChatConfig
37
+ main_input_name = 'pixel_values'
38
+ base_model_prefix = 'language_model'
39
+ _supports_flash_attn_2 = True
40
+ supports_gradient_checkpointing = True
41
+ _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'Qwen2DecoderLayer']
42
+
43
+ def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
44
+ super().__init__(config)
45
+
46
+ assert version_cmp(transformers.__version__, '4.37.0', 'ge')
47
+ image_size = config.force_image_size or config.vision_config.image_size
48
+ patch_size = config.vision_config.patch_size
49
+ self.patch_size = patch_size
50
+ self.select_layer = config.select_layer
51
+ self.template = config.template
52
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
53
+ self.downsample_ratio = config.downsample_ratio
54
+ self.ps_version = config.ps_version
55
+ use_flash_attn = use_flash_attn if has_flash_attn else False
56
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
57
+ config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
58
+
59
+ logger.info(f'num_image_token: {self.num_image_token}')
60
+ logger.info(f'ps_version: {self.ps_version}')
61
+ if vision_model is not None:
62
+ self.vision_model = vision_model
63
+ else:
64
+ self.vision_model = InternVisionModel(config.vision_config)
65
+ if language_model is not None:
66
+ self.language_model = language_model
67
+ else:
68
+ if config.llm_config.architectures[0] == 'LlamaForCausalLM':
69
+ self.language_model = LlamaForCausalLM(config.llm_config)
70
+ elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
71
+ self.language_model = Qwen2ForCausalLM(config.llm_config)
72
+ else:
73
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
74
+
75
+ vit_hidden_size = config.vision_config.hidden_size
76
+ llm_hidden_size = config.llm_config.hidden_size
77
+
78
+ self.mlp1 = nn.Sequential(
79
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
80
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
81
+ nn.GELU(),
82
+ nn.Linear(llm_hidden_size, llm_hidden_size)
83
+ )
84
+
85
+ self.img_context_token_id = None
86
+ self.conv_template = get_conv_template(self.template)
87
+ self.system_message = self.conv_template.system_message
88
+
89
+ def forward(
90
+ self,
91
+ pixel_values: torch.FloatTensor,
92
+ input_ids: torch.LongTensor = None,
93
+ attention_mask: Optional[torch.Tensor] = None,
94
+ position_ids: Optional[torch.LongTensor] = None,
95
+ image_flags: Optional[torch.LongTensor] = None,
96
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
97
+ labels: Optional[torch.LongTensor] = None,
98
+ use_cache: Optional[bool] = None,
99
+ output_attentions: Optional[bool] = None,
100
+ output_hidden_states: Optional[bool] = None,
101
+ return_dict: Optional[bool] = None,
102
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
103
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
104
+
105
+ image_flags = image_flags.squeeze(-1)
106
+ input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
107
+
108
+ vit_embeds = self.extract_feature(pixel_values)
109
+ vit_embeds = vit_embeds[image_flags == 1]
110
+ vit_batch_size = pixel_values.shape[0]
111
+
112
+ B, N, C = input_embeds.shape
113
+ input_embeds = input_embeds.reshape(B * N, C)
114
+
115
+ if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
116
+ print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
117
+
118
+ input_ids = input_ids.reshape(B * N)
119
+ selected = (input_ids == self.img_context_token_id)
120
+ try:
121
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
122
+ except Exception as e:
123
+ vit_embeds = vit_embeds.reshape(-1, C)
124
+ print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
125
+ f'vit_embeds.shape={vit_embeds.shape}')
126
+ n_token = selected.sum()
127
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
128
+
129
+ input_embeds = input_embeds.reshape(B, N, C)
130
+
131
+ outputs = self.language_model(
132
+ inputs_embeds=input_embeds,
133
+ attention_mask=attention_mask,
134
+ position_ids=position_ids,
135
+ past_key_values=past_key_values,
136
+ use_cache=use_cache,
137
+ output_attentions=output_attentions,
138
+ output_hidden_states=output_hidden_states,
139
+ return_dict=return_dict,
140
+ )
141
+ logits = outputs.logits
142
+
143
+ loss = None
144
+ if labels is not None:
145
+ # Shift so that tokens < n predict n
146
+ shift_logits = logits[..., :-1, :].contiguous()
147
+ shift_labels = labels[..., 1:].contiguous()
148
+ # Flatten the tokens
149
+ loss_fct = CrossEntropyLoss()
150
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
151
+ shift_labels = shift_labels.view(-1)
152
+ # Enable model parallelism
153
+ shift_labels = shift_labels.to(shift_logits.device)
154
+ loss = loss_fct(shift_logits, shift_labels)
155
+
156
+ if not return_dict:
157
+ output = (logits,) + outputs[1:]
158
+ return (loss,) + output if loss is not None else output
159
+
160
+ return CausalLMOutputWithPast(
161
+ loss=loss,
162
+ logits=logits,
163
+ past_key_values=outputs.past_key_values,
164
+ hidden_states=outputs.hidden_states,
165
+ attentions=outputs.attentions,
166
+ )
167
+
168
+ def pixel_shuffle(self, x, scale_factor=0.5):
169
+ n, w, h, c = x.size()
170
+ # N, W, H, C --> N, W, H * scale, C // scale
171
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
172
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
173
+ x = x.permute(0, 2, 1, 3).contiguous()
174
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
175
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
176
+ int(c / (scale_factor * scale_factor)))
177
+ if self.ps_version == 'v1':
178
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
179
+ 'which results in a transposed image.')
180
+ else:
181
+ x = x.permute(0, 2, 1, 3).contiguous()
182
+ return x
183
+
184
+ def extract_feature(self, pixel_values):
185
+ if self.select_layer == -1:
186
+ vit_embeds = self.vision_model(
187
+ pixel_values=pixel_values,
188
+ output_hidden_states=False,
189
+ return_dict=True).last_hidden_state
190
+ else:
191
+ vit_embeds = self.vision_model(
192
+ pixel_values=pixel_values,
193
+ output_hidden_states=True,
194
+ return_dict=True).hidden_states[self.select_layer]
195
+ vit_embeds = vit_embeds[:, 1:, :]
196
+
197
+ h = w = int(vit_embeds.shape[1] ** 0.5)
198
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
199
+ vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
200
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
201
+ vit_embeds = self.mlp1(vit_embeds)
202
+ return vit_embeds
203
+
204
+ def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
205
+ history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
206
+ IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
207
+ if history is not None or return_history:
208
+ print('Now multi-turn chat is not supported in batch_chat.')
209
+ raise NotImplementedError
210
+
211
+ if image_counts is not None:
212
+ num_patches_list = image_counts
213
+ print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
214
+
215
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
216
+ self.img_context_token_id = img_context_token_id
217
+
218
+ if verbose and pixel_values is not None:
219
+ image_bs = pixel_values.shape[0]
220
+ print(f'dynamic ViT batch size: {image_bs}')
221
+
222
+ queries = []
223
+ for idx, num_patches in enumerate(num_patches_list):
224
+ question = questions[idx]
225
+ if pixel_values is not None and '<image>' not in question:
226
+ question = '<image>\n' + question
227
+ template = get_conv_template(self.template)
228
+ template.system_message = self.system_message
229
+ template.append_message(template.roles[0], question)
230
+ template.append_message(template.roles[1], None)
231
+ query = template.get_prompt()
232
+
233
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
234
+ query = query.replace('<image>', image_tokens, 1)
235
+ queries.append(query)
236
+
237
+ tokenizer.padding_side = 'left'
238
+ model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
239
+ input_ids = model_inputs['input_ids'].to(self.device)
240
+ attention_mask = model_inputs['attention_mask'].to(self.device)
241
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
242
+ generation_config['eos_token_id'] = eos_token_id
243
+ generation_output = self.generate(
244
+ pixel_values=pixel_values,
245
+ input_ids=input_ids,
246
+ attention_mask=attention_mask,
247
+ **generation_config
248
+ )
249
+ responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
250
+ responses = [response.split(template.sep.strip())[0].strip() for response in responses]
251
+ return responses
252
+
253
+ def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
254
+ num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
255
+ verbose=False):
256
+
257
+ if history is None and pixel_values is not None and '<image>' not in question:
258
+ question = '<image>\n' + question
259
+
260
+ if num_patches_list is None:
261
+ num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
262
+ assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
263
+
264
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
265
+ self.img_context_token_id = img_context_token_id
266
+
267
+ template = get_conv_template(self.template)
268
+ template.system_message = self.system_message
269
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
270
+
271
+ history = [] if history is None else history
272
+ for (old_question, old_answer) in history:
273
+ template.append_message(template.roles[0], old_question)
274
+ template.append_message(template.roles[1], old_answer)
275
+ template.append_message(template.roles[0], question)
276
+ template.append_message(template.roles[1], None)
277
+ query = template.get_prompt()
278
+
279
+ if verbose and pixel_values is not None:
280
+ image_bs = pixel_values.shape[0]
281
+ print(f'dynamic ViT batch size: {image_bs}')
282
+
283
+ for num_patches in num_patches_list:
284
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
285
+ query = query.replace('<image>', image_tokens, 1)
286
+
287
+ model_inputs = tokenizer(query, return_tensors='pt')
288
+ input_ids = model_inputs['input_ids'].to(self.device)
289
+ attention_mask = model_inputs['attention_mask'].to(self.device)
290
+ generation_config['eos_token_id'] = eos_token_id
291
+ generation_output = self.generate(
292
+ pixel_values=pixel_values,
293
+ input_ids=input_ids,
294
+ attention_mask=attention_mask,
295
+ **generation_config
296
+ )
297
+ response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
298
+ response = response.split(template.sep.strip())[0].strip()
299
+ history.append((question, response))
300
+ if return_history:
301
+ return response, history
302
+ else:
303
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
304
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
305
+ if verbose:
306
+ print(query_to_print, response)
307
+ return response
308
+
309
+ @torch.no_grad()
310
+ def generate(
311
+ self,
312
+ pixel_values: Optional[torch.FloatTensor] = None,
313
+ input_ids: Optional[torch.FloatTensor] = None,
314
+ attention_mask: Optional[torch.LongTensor] = None,
315
+ visual_features: Optional[torch.FloatTensor] = None,
316
+ generation_config: Optional[GenerationConfig] = None,
317
+ output_hidden_states: Optional[bool] = None,
318
+ **generate_kwargs,
319
+ ) -> torch.LongTensor:
320
+
321
+ assert self.img_context_token_id is not None
322
+ if pixel_values is not None:
323
+ if visual_features is not None:
324
+ vit_embeds = visual_features
325
+ else:
326
+ vit_embeds = self.extract_feature(pixel_values)
327
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
328
+ B, N, C = input_embeds.shape
329
+ input_embeds = input_embeds.reshape(B * N, C)
330
+
331
+ input_ids = input_ids.reshape(B * N)
332
+ selected = (input_ids == self.img_context_token_id)
333
+ assert selected.sum() != 0
334
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
335
+
336
+ input_embeds = input_embeds.reshape(B, N, C)
337
+ else:
338
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
339
+
340
+ outputs = self.language_model.generate(
341
+ inputs_embeds=input_embeds,
342
+ attention_mask=attention_mask,
343
+ generation_config=generation_config,
344
+ output_hidden_states=output_hidden_states,
345
+ use_cache=True,
346
+ **generate_kwargs,
347
+ )
348
+
349
+ return outputs
350
+
351
+ @property
352
+ def lm_head(self):
353
+ return self.language_model.get_output_embeddings()
354
+
355
+ def get_input_embeddings(self):
356
+ return self.language_model.get_input_embeddings()
357
+
358
+ def get_output_embeddings(self):
359
+ return self.language_model.get_output_embeddings()
internvl3_2b_tokenizer/preprocessor_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 448,
3
+ "do_center_crop": true,
4
+ "do_normalize": true,
5
+ "do_resize": true,
6
+ "feature_extractor_type": "CLIPFeatureExtractor",
7
+ "image_mean": [
8
+ 0.485,
9
+ 0.456,
10
+ 0.406
11
+ ],
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 3,
18
+ "size": 448
19
+ }
internvl3_2b_tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }