rkazants commited on
Commit
a96cf1a
·
verified ·
1 Parent(s): 46c96a2

Upload 18 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,36 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "VideoChatFlashQwenForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "auto_map": {
7
+ "AutoConfig": "modeling_videochat_flash.VideoChatFlashQwenConfig",
8
+ "AutoModel": "modeling_videochat_flash.VideoChatFlashQwenForCausalLM"
9
+ },
10
+ "bos_token_id": 151643,
11
+ "dtype": "float16",
12
+ "eos_token_id": 151645,
13
+ "frame_aspect_ratio": "square",
14
+ "frame_grid_pinpoints": null,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 64,
17
+ "image_aspect_ratio": "square",
18
+ "image_crop_resolution": null,
19
+ "image_grid_pinpoints": [
20
+ [
21
+ 224,
22
+ 224
23
+ ]
24
+ ],
25
+ "image_split_resolution": null,
26
+ "initializer_range": 0.02,
27
+ "intermediate_size": 128,
28
+ "layer_types": [
29
+ "full_attention",
30
+ "full_attention"
31
+ ],
32
+ "llm_compress_layer_list": [],
33
+ "llm_compress_type": "attention",
34
+ "llm_image_token_ratio_list": [
35
+ 1.0,
36
+ 0.5,
37
+ 0.25,
38
+ 0.125
39
+ ],
40
+ "max_num_pixels": 401408,
41
+ "max_position_embeddings": 2048,
42
+ "max_window_layers": 2,
43
+ "min_slow_num_frames": 4,
44
+ "mm_close_init": false,
45
+ "mm_hidden_size": 128,
46
+ "mm_llm_compress": false,
47
+ "mm_local_num_frames": 4,
48
+ "mm_newline_position": "nothing",
49
+ "mm_num_compress_latents": 8,
50
+ "mm_num_compress_query_type": "learnable",
51
+ "mm_patch_merge_type": "spatial_nopad",
52
+ "mm_pos_num_frames": 4,
53
+ "mm_projector_lr": null,
54
+ "mm_projector_type": "tome16_mlp_hd64",
55
+ "mm_resampler_type": null,
56
+ "mm_spatial_pool_mode": "bilinear",
57
+ "mm_tunable_parts": "mm_vision_tower,mm_mlp_adapter,mm_language_model",
58
+ "mm_use_im_patch_token": false,
59
+ "mm_use_im_start_end": false,
60
+ "mm_vision_select_feature": "patch",
61
+ "mm_vision_select_layer": -2,
62
+ "mm_vision_tower": "internvideo2",
63
+ "mm_vision_tower_lr": 2e-06,
64
+ "model_type": "videochat_flash_qwen",
65
+ "num_attention_heads": 4,
66
+ "num_hidden_layers": 2,
67
+ "num_key_value_heads": 1,
68
+ "pos_skipping_range": 2048,
69
+ "rms_norm_eps": 1e-06,
70
+ "rope_scaling": null,
71
+ "rope_theta": 1000000.0,
72
+ "sliding_window": null,
73
+ "tie_word_embeddings": true,
74
+ "tokenizer_model_max_length": 2048,
75
+ "tokenizer_padding_side": "right",
76
+ "transformers_version": "4.57.6",
77
+ "use_cache": true,
78
+ "use_mm_proj": true,
79
+ "use_pos_skipping": false,
80
+ "use_sliding_window": false,
81
+ "vision_encode_type": "video_image",
82
+ "vision_tower_pretrained": null,
83
+ "vocab_size": 152064
84
+ }
constants.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONTROLLER_HEART_BEAT_EXPIRATION = 30
2
+ WORKER_HEART_BEAT_INTERVAL = 15
3
+
4
+ LOGDIR = "."
5
+
6
+ # Model Constants
7
+ IGNORE_INDEX = -100
8
+ IMAGE_TOKEN_INDEX = -200
9
+ DEFAULT_IMAGE_TOKEN = "<image>"
10
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
11
+ DEFAULT_IM_START_TOKEN = "<im_start>"
12
+ DEFAULT_IM_END_TOKEN = "<im_end>"
conversation.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from enum import auto, Enum
3
+ from typing import List, Any, Dict, Union, Tuple
4
+ import re
5
+ import base64
6
+ from io import BytesIO
7
+ from PIL import Image
8
+ from transformers import AutoTokenizer
9
+
10
+
11
+ class SeparatorStyle(Enum):
12
+ """Different separator style."""
13
+
14
+ SINGLE = auto()
15
+ TWO = auto()
16
+ MPT = auto()
17
+ PLAIN = auto()
18
+ CHATML = auto()
19
+ LLAMA_2 = auto()
20
+ LLAMA_3 = auto()
21
+ QWEN = auto()
22
+ GEMMA = auto()
23
+
24
+
25
+ @dataclasses.dataclass
26
+ class Conversation:
27
+ """A class that keeps all conversation history."""
28
+
29
+ system: str
30
+ roles: List[str]
31
+ messages: List[List[str]]
32
+ offset: int
33
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
34
+ sep: str = "###"
35
+ sep2: str = None
36
+ version: str = "Unknown"
37
+
38
+ tokenizer_id: str = ""
39
+ tokenizer: Any = None
40
+ # Stop criteria (the default one is EOS token)
41
+ stop_str: Union[str, List[str]] = None
42
+ # Stops generation if meeting any token in this list
43
+ stop_token_ids: List[int] = None
44
+
45
+ skip_next: bool = False
46
+
47
+ def get_prompt(self):
48
+ messages = self.messages
49
+ if len(messages) > 0 and type(messages[0][1]) is tuple:
50
+ messages = self.messages.copy()
51
+ init_role, init_msg = messages[0].copy()
52
+ init_msg = init_msg[0]
53
+ if "mmtag" in self.version:
54
+ init_msg = init_msg.replace("<image>", "").strip()
55
+ messages[0] = (init_role, init_msg)
56
+ messages.insert(0, (self.roles[0], "<Image><image></Image>"))
57
+ messages.insert(1, (self.roles[1], "Received."))
58
+ elif not init_msg.startswith("<image>"):
59
+ init_msg = init_msg.replace("<image>", "").strip()
60
+ messages[0] = (init_role, "<image>\n" + init_msg)
61
+ else:
62
+ messages[0] = (init_role, init_msg)
63
+
64
+ if self.sep_style == SeparatorStyle.SINGLE:
65
+ ret = self.system + self.sep
66
+ for role, message in messages:
67
+ if message:
68
+ if type(message) is tuple:
69
+ message, _, _ = message
70
+ ret += role + ": " + message + self.sep
71
+ else:
72
+ ret += role + ":"
73
+
74
+ elif self.sep_style == SeparatorStyle.TWO:
75
+ seps = [self.sep, self.sep2]
76
+ ret = self.system + seps[0]
77
+ for i, (role, message) in enumerate(messages):
78
+ if message:
79
+ if type(message) is tuple:
80
+ message, _, _ = message
81
+ ret += role + ": " + message + seps[i % 2]
82
+ else:
83
+ ret += role + ":"
84
+
85
+ elif self.sep_style == SeparatorStyle.CHATML:
86
+ ret = "" if self.system == "" else self.system + self.sep + "\n"
87
+ for role, message in messages:
88
+ if message:
89
+ if type(message) is tuple:
90
+ message, images, _ = message
91
+ message = "<image>" * len(images) + message
92
+ ret += role + "\n" + message + self.sep + "\n"
93
+ else:
94
+ ret += role + "\n"
95
+ return ret
96
+
97
+ elif self.sep_style == SeparatorStyle.LLAMA_3:
98
+ chat_template_messages = [{"role": "system", "content": self.system}]
99
+ for role, message in messages:
100
+ if message:
101
+ if type(message) is tuple:
102
+ message, images = message
103
+ message = "<image>" * len(images) + message
104
+ chat_template_messages.append({"role": role, "content": message})
105
+
106
+ # print(chat_template_messages)
107
+ return self.tokenizer.apply_chat_template(chat_template_messages, tokenize=False, add_generation_prompt=True)
108
+ # ret = "" if self.system == "" else self.system + self.sep + "\n"
109
+ # for role, message in messages:
110
+ # if message:
111
+ # if type(message) is tuple:
112
+ # message, images = message
113
+ # message = "<image>" * len(images) + message
114
+ # ret += role + "\n" + message + self.sep + "\n"
115
+ # else:
116
+ # ret += role + "\n"
117
+ # return ret
118
+
119
+ elif self.sep_style == SeparatorStyle.MPT:
120
+ ret = self.system + self.sep
121
+ for role, message in messages:
122
+ if message:
123
+ if type(message) is tuple:
124
+ message, _, _ = message
125
+ ret += role + message + self.sep
126
+ else:
127
+ ret += role
128
+
129
+ elif self.sep_style == SeparatorStyle.GEMMA:
130
+ ret = ""
131
+ for i, (role, message) in enumerate(messages):
132
+ assert role == self.roles[i % 2], "Conversation should alternate user/assistant/user/assistant/..."
133
+ if message:
134
+ if type(message) is tuple:
135
+ message, _, _ = message
136
+ ret += role + message + self.sep
137
+ else:
138
+ ret += role
139
+
140
+ elif self.sep_style == SeparatorStyle.LLAMA_2:
141
+ wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" if len(msg) > 0 else msg
142
+ wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
143
+ ret = ""
144
+
145
+ for i, (role, message) in enumerate(messages):
146
+ if i == 0:
147
+ assert message, "first message should not be none"
148
+ assert role == self.roles[0], "first message should come from user"
149
+ if message:
150
+ if type(message) is tuple:
151
+ message, _, _ = message
152
+ if i == 0:
153
+ message = wrap_sys(self.system) + message
154
+ if i % 2 == 0:
155
+ message = wrap_inst(message)
156
+ ret += self.sep + message
157
+ else:
158
+ ret += " " + message + " " + self.sep2
159
+ else:
160
+ ret += ""
161
+ ret = ret.lstrip(self.sep)
162
+
163
+ elif self.sep_style == SeparatorStyle.PLAIN:
164
+ seps = [self.sep, self.sep2]
165
+ ret = self.system
166
+ for i, (role, message) in enumerate(messages):
167
+ if message:
168
+ if type(message) is tuple:
169
+ message, _, _ = message
170
+ ret += message + seps[i % 2]
171
+ else:
172
+ ret += ""
173
+ else:
174
+ raise ValueError(f"Invalid style: {self.sep_style}")
175
+
176
+ return ret
177
+
178
+ def append_message(self, role, message):
179
+ self.messages.append([role, message])
180
+
181
+ def process_image(self, image, image_process_mode, return_pil=False, image_format="PNG"):
182
+ if image_process_mode == "Pad":
183
+
184
+ def expand2square(pil_img, background_color=(122, 116, 104)):
185
+ width, height = pil_img.size
186
+ if width == height:
187
+ return pil_img
188
+ elif width > height:
189
+ result = Image.new(pil_img.mode, (width, width), background_color)
190
+ result.paste(pil_img, (0, (width - height) // 2))
191
+ return result
192
+ else:
193
+ result = Image.new(pil_img.mode, (height, height), background_color)
194
+ result.paste(pil_img, ((height - width) // 2, 0))
195
+ return result
196
+
197
+ image = expand2square(image)
198
+ elif image_process_mode in ["Default", "Crop"]:
199
+ pass
200
+ elif image_process_mode == "Resize":
201
+ image = image.resize((336, 336))
202
+ else:
203
+ raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
204
+
205
+ if type(image) is not Image.Image:
206
+ image = Image.open(image).convert("RGB")
207
+
208
+ max_hw, min_hw = max(image.size), min(image.size)
209
+ aspect_ratio = max_hw / min_hw
210
+ max_len, min_len = 672, 448
211
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
212
+ longest_edge = int(shortest_edge * aspect_ratio)
213
+ W, H = image.size
214
+ if H > W:
215
+ H, W = longest_edge, shortest_edge
216
+ else:
217
+ H, W = shortest_edge, longest_edge
218
+ image = image.resize((W, H))
219
+ if return_pil:
220
+ return image
221
+ else:
222
+ buffered = BytesIO()
223
+ image.save(buffered, format=image_format)
224
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
225
+ return img_b64_str
226
+
227
+ def get_images(self, return_pil=False, return_path=False):
228
+ images = []
229
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
230
+ if i % 2 == 0:
231
+ if type(msg) is tuple:
232
+ msg, image, image_process_mode = msg
233
+ if type(image) != list:
234
+ image = [image]
235
+ for img in image:
236
+ if not return_path and self.is_image_file(img):
237
+ img = self.process_image(img, image_process_mode, return_pil=return_pil)
238
+ else:
239
+ images.append(img)
240
+ return images
241
+
242
+ def is_image_file(self, filename):
243
+ image_extensions = [".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp"]
244
+ return any(filename.lower().endswith(ext) for ext in image_extensions)
245
+
246
+ def is_video_file(self, filename):
247
+ video_extensions = [".mp4", ".mov", ".avi", ".mkv", ".wmv", ".flv", ".mpeg", ".mpg"]
248
+ return any(filename.lower().endswith(ext) for ext in video_extensions)
249
+
250
+ def to_gradio_chatbot(self):
251
+ ret = []
252
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
253
+ if i % 2 == 0:
254
+ if type(msg) is tuple:
255
+ msg, image, image_process_mode = msg
256
+ if type(image) != list:
257
+ image = [image]
258
+ if len(image) == 1:
259
+ msg = "<image>\n" + msg.replace("<image>", "").strip()
260
+ else:
261
+ msg = re.sub(r"(<image>)\n(?=<image>)", r"\1 ", msg)
262
+
263
+ img_str_list = []
264
+ for img in image:
265
+ if self.is_image_file(img):
266
+ img_b64_str = self.process_image(img, "Default", return_pil=False, image_format="JPEG")
267
+ img_str = f'<img src="data:image/jpeg;base64,{img_b64_str}" style="max-width: 256px; max-height: 256px; width: auto; height: auto; object-fit: contain;"/>'
268
+ img_str_list.append(img_str)
269
+ elif self.is_video_file(img):
270
+ ret.append(((img,), None))
271
+
272
+ msg = msg.strip()
273
+ img_place_holder = ""
274
+ for img_str in img_str_list:
275
+ img_place_holder += f"{img_str}\n\n"
276
+
277
+ if len(img_str_list) > 0:
278
+ msg = f"{img_place_holder}\n\n{msg}"
279
+
280
+ if len(msg) > 0:
281
+ ret.append([msg, None])
282
+ else:
283
+ ret.append([msg, None])
284
+ else:
285
+ ret[-1][-1] = msg
286
+ return ret
287
+
288
+ def copy(self):
289
+ return Conversation(system=self.system, roles=self.roles, messages=[[x, y] for x, y in self.messages], offset=self.offset, sep_style=self.sep_style, sep=self.sep, sep2=self.sep2, version=self.version)
290
+
291
+ def dict(self):
292
+ if len(self.get_images()) > 0:
293
+ return {
294
+ "system": self.system,
295
+ "roles": self.roles,
296
+ "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
297
+ "offset": self.offset,
298
+ "sep": self.sep,
299
+ "sep2": self.sep2,
300
+ }
301
+ return {
302
+ "system": self.system,
303
+ "roles": self.roles,
304
+ "messages": self.messages,
305
+ "offset": self.offset,
306
+ "sep": self.sep,
307
+ "sep2": self.sep2,
308
+ }
309
+
310
+
311
+ conv_vicuna_v0 = Conversation(
312
+ system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
313
+ roles=("Human", "Assistant"),
314
+ messages=[
315
+ ["Human", "What are the key differences between renewable and non-renewable energy sources?"],
316
+ [
317
+ "Assistant",
318
+ "Renewable energy sources are those that can be replenished naturally in a relatively "
319
+ "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
320
+ "Non-renewable energy sources, on the other hand, are finite and will eventually be "
321
+ "depleted, such as coal, oil, and natural gas. Here are some key differences between "
322
+ "renewable and non-renewable energy sources:\n"
323
+ "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
324
+ "energy sources are finite and will eventually run out.\n"
325
+ "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
326
+ "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
327
+ "and other negative effects.\n"
328
+ "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
329
+ "have lower operational costs than non-renewable sources.\n"
330
+ "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
331
+ "locations than non-renewable sources.\n"
332
+ "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
333
+ "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
334
+ "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
335
+ "non-renewable sources are not, and their depletion can lead to economic and social instability.\n",
336
+ ],
337
+ ],
338
+ offset=2,
339
+ sep_style=SeparatorStyle.SINGLE,
340
+ sep="###",
341
+ )
342
+
343
+ conv_vicuna_v1 = Conversation(
344
+ system="A chat between a curious user and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the user's questions.",
345
+ roles=("USER", "ASSISTANT"),
346
+ version="v1",
347
+ messages=[],
348
+ offset=0,
349
+ sep_style=SeparatorStyle.TWO,
350
+ sep=" ",
351
+ sep2="</s>",
352
+ )
353
+
354
+ conv_llama_2 = Conversation(
355
+ system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
356
+
357
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
358
+ roles=("USER", "ASSISTANT"),
359
+ version="llama_v2",
360
+ messages=[],
361
+ offset=0,
362
+ sep_style=SeparatorStyle.LLAMA_2,
363
+ sep="<s>",
364
+ sep2="</s>",
365
+ )
366
+
367
+ conv_llava_llama_2 = Conversation(
368
+ system="You are a helpful language and vision assistant. " "You are able to understand the visual content that the user provides, " "and assist the user with a variety of tasks using natural language.",
369
+ roles=("USER", "ASSISTANT"),
370
+ version="llama_v2",
371
+ messages=[],
372
+ offset=0,
373
+ sep_style=SeparatorStyle.LLAMA_2,
374
+ sep="<s>",
375
+ sep2="</s>",
376
+ )
377
+
378
+ # conv_llava_llama_3 = Conversation(
379
+ # system="You are a helpful language and vision assistant. " "You are able to understand the visual content that the user provides, " "and assist the user with a variety of tasks using natural language.",
380
+ # roles=("user", "assistant"),
381
+ # version="llama_v3",
382
+ # messages=[],
383
+ # offset=0,
384
+ # sep="<|eot_id|>",
385
+ # sep_style=SeparatorStyle.LLAMA_3,
386
+ # tokenizer_id="meta-llama/Meta-Llama-3-8B-Instruct",
387
+ # tokenizer=AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct"),
388
+ # stop_token_ids=[128009],
389
+ # )
390
+
391
+ conv_mistral_instruct = Conversation(
392
+ system="",
393
+ roles=("USER", "ASSISTANT"),
394
+ version="llama_v2",
395
+ messages=[],
396
+ offset=0,
397
+ sep_style=SeparatorStyle.LLAMA_2,
398
+ sep="",
399
+ sep2="</s>",
400
+ )
401
+
402
+ conv_llava_llama_2_simple = Conversation(
403
+ system="Answer the questions about the visual content that the user provides.",
404
+ roles=("USER", "ASSISTANT"),
405
+ version="llama_v2",
406
+ messages=[],
407
+ offset=0,
408
+ sep_style=SeparatorStyle.LLAMA_2,
409
+ sep="<s>",
410
+ sep2="</s>",
411
+ )
412
+
413
+ conv_llava_llama_2_mmtag = Conversation(
414
+ system="Answer the questions about the visual content that the user provides." "The visual content will be provided with the following format: <Image>visual content</Image>.",
415
+ roles=("USER", "ASSISTANT"),
416
+ version="llama_v2_mmtag",
417
+ messages=[],
418
+ offset=0,
419
+ sep_style=SeparatorStyle.LLAMA_2,
420
+ sep="<s>",
421
+ sep2="</s>",
422
+ )
423
+
424
+ conv_mpt = Conversation(
425
+ system="""<|im_start|>system
426
+ A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
427
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
428
+ version="mpt",
429
+ messages=[],
430
+ offset=0,
431
+ sep_style=SeparatorStyle.MPT,
432
+ sep="<|im_end|>",
433
+ )
434
+
435
+ conv_qwen = Conversation(
436
+ system="""<|im_start|>system
437
+ You are a helpful assistant.""",
438
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
439
+ version="qwen",
440
+ messages=[],
441
+ offset=0,
442
+ sep_style=SeparatorStyle.CHATML,
443
+ sep="<|im_end|>",
444
+ )
445
+
446
+
447
+
448
+ conv_internlm_2 = Conversation(
449
+ system="""<|im_start|>system
450
+ You are a helpful assistant.""",
451
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
452
+ version="internlm_2",
453
+ messages=[],
454
+ offset=0,
455
+ sep_style=SeparatorStyle.CHATML,
456
+ sep="<|im_end|>",
457
+ )
458
+
459
+ conv_gemma_instruct = Conversation(system="", roles=("<start_of_turn>user\n", "<start_of_turn>model\n"), version="gemma", messages=[], offset=0, sep_style=SeparatorStyle.GEMMA, sep="<end_of_turn>\n")
460
+
461
+ conv_llava_plain = Conversation(
462
+ system="",
463
+ roles=("", ""),
464
+ messages=[],
465
+ offset=0,
466
+ sep_style=SeparatorStyle.PLAIN,
467
+ sep="\n",
468
+ )
469
+
470
+ conv_llava_v0 = Conversation(
471
+ system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
472
+ roles=("Human", "Assistant"),
473
+ messages=[],
474
+ offset=0,
475
+ sep_style=SeparatorStyle.SINGLE,
476
+ sep="###",
477
+ )
478
+
479
+ conv_llava_v0_mmtag = Conversation(
480
+ system="A chat between a curious user and an artificial intelligence assistant. "
481
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
482
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
483
+ roles=("Human", "Assistant"),
484
+ messages=[],
485
+ offset=0,
486
+ sep_style=SeparatorStyle.SINGLE,
487
+ sep="###",
488
+ version="v0_mmtag",
489
+ )
490
+
491
+ conv_llava_v1 = Conversation(
492
+ system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
493
+ roles=("USER", "ASSISTANT"),
494
+ version="v1",
495
+ messages=[],
496
+ offset=0,
497
+ sep_style=SeparatorStyle.TWO,
498
+ sep=" ",
499
+ sep2="</s>",
500
+ )
501
+
502
+ conv_llava_v1_mmtag = Conversation(
503
+ system="A chat between a curious user and an artificial intelligence assistant. "
504
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
505
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
506
+ roles=("USER", "ASSISTANT"),
507
+ messages=[],
508
+ offset=0,
509
+ sep_style=SeparatorStyle.TWO,
510
+ sep=" ",
511
+ sep2="</s>",
512
+ version="v1_mmtag",
513
+ )
514
+
515
+ conv_mistral_orca = Conversation(
516
+ system="""<|im_start|>system
517
+ You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!""",
518
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
519
+ version="mpt",
520
+ messages=[],
521
+ offset=0,
522
+ sep_style=SeparatorStyle.MPT,
523
+ sep="<|im_end|>",
524
+ )
525
+
526
+ conv_mistral_zephyr = Conversation(
527
+ system="""<|system|>
528
+ You are a helpful AI assistant.""",
529
+ roles=("<|user|>\n", "<|assistant|>\n"),
530
+ version="mpt",
531
+ messages=[],
532
+ offset=0,
533
+ sep_style=SeparatorStyle.MPT,
534
+ sep="</s>",
535
+ )
536
+
537
+ conv_mistral_direct = Conversation(
538
+ system="""<|im_start|>system
539
+ Answer the questions.""",
540
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
541
+ version="mpt",
542
+ messages=[],
543
+ offset=0,
544
+ sep_style=SeparatorStyle.MPT,
545
+ sep="<|im_end|>",
546
+ )
547
+
548
+ conv_chatml_direct = Conversation(
549
+ system="""<|im_start|>system
550
+ Answer the questions.""",
551
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
552
+ version="mpt",
553
+ messages=[],
554
+ offset=0,
555
+ sep_style=SeparatorStyle.MPT,
556
+ sep="<|im_end|>",
557
+ )
558
+
559
+ default_conversation = conv_vicuna_v0
560
+ conv_templates = {
561
+ "default": conv_vicuna_v0,
562
+ "v0": conv_vicuna_v0,
563
+ "v1": conv_vicuna_v1,
564
+ "vicuna_v1": conv_vicuna_v1,
565
+ "llama_2": conv_llama_2,
566
+ "mistral_instruct": conv_mistral_instruct,
567
+ "mistral_orca": conv_mistral_orca,
568
+ "mistral_zephyr": conv_mistral_zephyr,
569
+ "mistral_direct": conv_mistral_direct,
570
+ "plain": conv_llava_plain,
571
+ "v0_plain": conv_llava_plain,
572
+ "chatml_direct": conv_chatml_direct,
573
+ "llava_v0": conv_llava_v0,
574
+ "llava_v0_mmtag": conv_llava_v0_mmtag,
575
+ "llava_v1": conv_llava_v1,
576
+ "llava_v1_mmtag": conv_llava_v1_mmtag,
577
+ "llava_llama_2": conv_llava_llama_2,
578
+ # "llava_llama_3": conv_llava_llama_3,
579
+ "llava_llama_2_simple": conv_llava_llama_2_simple,
580
+ "llava_llama_2_mmtag": conv_llava_llama_2_mmtag,
581
+ "llava_mistral_instruct": conv_mistral_instruct,
582
+ "mpt": conv_mpt,
583
+ "qwen_1_5": conv_qwen,
584
+ "qwen_2": conv_qwen,
585
+ "internlm_2": conv_internlm_2,
586
+ "gemma_instruct": conv_gemma_instruct,
587
+ }
588
+
589
+
590
+ if __name__ == "__main__":
591
+ print(default_conversation.get_prompt())
592
+ print(default_conversation)
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
mm_projector_builder.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from typing import Callable, Tuple
4
+
5
+
6
+ def bipartite_soft_matching(
7
+ metric: torch.Tensor,
8
+ r: int,
9
+ ) -> Tuple[Callable, Callable]:
10
+ """
11
+ Applies ToMe with a balanced matching set (50%, 50%).
12
+
13
+ Input size is [batch, tokens, channels].
14
+ r indicates the number of tokens to remove (max 50% of tokens).
15
+ """
16
+ protected = 0
17
+
18
+ t = metric.shape[1]
19
+ r = min(r, (t - protected) // 2)
20
+
21
+ assert r > 0, r
22
+
23
+ with torch.no_grad():
24
+ metric = metric / metric.norm(dim=-1, keepdim=True)
25
+ a, b = metric[..., ::2, :], metric[..., 1::2, :]
26
+ scores = a @ b.transpose(-1, -2)
27
+
28
+ node_max, node_idx = scores.max(dim=-1)
29
+ edge_idx = node_max.argsort(dim=-1, descending=True)[..., None]
30
+
31
+ unm_idx = edge_idx[..., r:, :] # Unmerged Tokens
32
+ src_idx = edge_idx[..., :r, :] # Merged Tokens
33
+ dst_idx = node_idx[..., None].gather(dim=-2, index=src_idx)
34
+
35
+ def merge(x: torch.Tensor, mode="mean") -> torch.Tensor:
36
+ src, dst = x[..., ::2, :], x[..., 1::2, :]
37
+ n, t1, c = src.shape
38
+ unm = src.gather(dim=-2, index=unm_idx.expand(n, t1 - r, c))
39
+ src = src.gather(dim=-2, index=src_idx.expand(n, r, c))
40
+ dst = dst.scatter_add(-2, dst_idx.expand(n, r, c), src) # , reduce=mode)
41
+
42
+ return torch.cat([unm, dst], dim=1)
43
+
44
+ def unmerge(x: torch.Tensor) -> torch.Tensor:
45
+ unm_len = unm_idx.shape[1]
46
+ unm, dst = x[..., :unm_len, :], x[..., unm_len:, :]
47
+ n, _, c = unm.shape
48
+
49
+ src = dst.gather(dim=-2, index=dst_idx.expand(n, r, c))
50
+
51
+ out = torch.zeros(n, metric.shape[1], c, device=x.device, dtype=x.dtype)
52
+
53
+ out[..., 1::2, :] = dst
54
+ out.scatter_(dim=-2, index=(2 * unm_idx).expand(n, unm_len, c), src=unm)
55
+ out.scatter_(dim=-2, index=(2 * src_idx).expand(n, r, c), src=src)
56
+
57
+ return out
58
+
59
+ return merge, unmerge
60
+
61
+
62
+ def merge_wavg(
63
+ merge: Callable, x: torch.Tensor, size: torch.Tensor = None
64
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
65
+ """
66
+ Applies the merge function by taking a weighted average based on token size.
67
+ Returns the merged tensor and the new token sizes.
68
+ """
69
+ if size is None:
70
+ size = torch.ones_like(x[..., 0, None])
71
+
72
+ x = merge(x * size, mode="sum")
73
+ size = merge(size, mode="sum")
74
+
75
+ x = x / size
76
+ return x, size
77
+
78
+
79
+
80
+
81
+ class ToMe16_mlp_hd64(nn.Module):
82
+ def __init__(self, config, vision_cfg):
83
+ super().__init__()
84
+ self._config = config
85
+ self.mm_hidden_size = config.mm_hidden_size
86
+ self.hw = vision_cfg.image_size // vision_cfg.patch_size
87
+ self.num_attention_heads = vision_cfg.num_attention_heads
88
+ self.mlp = nn.Sequential(nn.Linear(config.mm_hidden_size, config.hidden_size),
89
+ nn.GELU(),
90
+ nn.Linear(config.hidden_size, config.hidden_size))
91
+ self.max_pos_hw = self.hw
92
+ self.max_pos_num_frames = config.mm_pos_num_frames
93
+ self.num_image_patches_per_side = 8
94
+ self.num_frame_patches_per_side = 4
95
+
96
+ def merge_tokens(self, x, target_num_token):
97
+ r"""
98
+ x = torch.randn(10, 2560, c)
99
+ x = merge_tokens(x, r_merge_list=[1280])
100
+ """
101
+ size = None
102
+ b, p, c = x.shape
103
+ tmp_p = p
104
+ r_merge_list = []
105
+ assert tmp_p > target_num_token, f"{tmp_p} should greater than {target_num_token}"
106
+ while tmp_p != target_num_token:
107
+ if tmp_p - target_num_token <= (tmp_p // 2):
108
+ r_merge_list.append(tmp_p - target_num_token)
109
+ break
110
+ else:
111
+ r_merge_list.append(tmp_p // 2)
112
+ tmp_p = tmp_p - (tmp_p // 2)
113
+
114
+
115
+ head = self.num_attention_heads
116
+
117
+ dim = c // head
118
+ for r in r_merge_list:
119
+ metric = x.reshape(b, p, head, dim).mean(2) # [b, p, c//head]
120
+ merge, _ = bipartite_soft_matching(
121
+ metric,
122
+ r
123
+ )
124
+ x, size = merge_wavg(merge, x, size)
125
+ _, p, _ = x.shape
126
+
127
+ return x
128
+
129
+
130
+
131
+ def forward(self, x, compress=False, local_num_frames=-1): # 单帧64
132
+ height = width = self.hw
133
+ assert height * width == x.shape[1]
134
+
135
+ if local_num_frames != -1 and local_num_frames != 1:
136
+ assert compress is True
137
+ if compress:
138
+ if local_num_frames != -1:
139
+ num_frames = local_num_frames
140
+ x = x.reshape(x.shape[0] // local_num_frames, -1, x.shape[-1])
141
+ else:
142
+ num_frames = x.shape[0]
143
+ x = x.reshape(1, -1, x.shape[-1])
144
+ num_tome_tokens = 16 * num_frames
145
+ else:
146
+ num_tome_tokens = 64
147
+
148
+ x = self.merge_tokens(x, target_num_token=num_tome_tokens)
149
+ x = self.mlp(x)
150
+ return x
151
+
152
+ @property
153
+ def config(self):
154
+ return {"mm_projector_type": "tome16_mlp_hd64"}
155
+
156
+
157
+
158
+
159
+ def build_vision_projector(config, delay_load=False, **kwargs):
160
+ projector_type = getattr(config, "mm_projector_type", "linear")
161
+
162
+ if projector_type == 'tome16_mlp_hd64':
163
+ return ToMe16_mlp_hd64(config, kwargs["vision_cfg"])
164
+
165
+ raise ValueError(f"Unknown projector type: {projector_type}")
mm_utils.py ADDED
@@ -0,0 +1,851 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from io import BytesIO
3
+ import base64
4
+ import math
5
+ import ast
6
+ import re
7
+ import torch
8
+ from transformers import StoppingCriteria
9
+ from .constants import IMAGE_TOKEN_INDEX
10
+ import random
11
+ import os
12
+ import io
13
+ import av
14
+ import cv2
15
+ import imageio
16
+ from decord import VideoReader
17
+ import numpy as np
18
+ from torchvision.transforms.functional import pil_to_tensor
19
+
20
+
21
+ ######################## load video ########################
22
+
23
+ def get_index(num_frames, num_segments):
24
+ seg_size = float(num_frames - 1) / num_segments
25
+ start = int(seg_size / 2)
26
+ offsets = np.array([
27
+ start + int(np.round(seg_size * idx)) for idx in range(num_segments)
28
+ ])
29
+ return offsets
30
+
31
+
32
+ def pts_to_secs(pts: int, time_base: float, start_pts: int) -> float:
33
+ """
34
+ Converts a present time with the given time base and start_pts offset to seconds.
35
+
36
+ Returns:
37
+ time_in_seconds (float): The corresponding time in seconds.
38
+
39
+ https://github.com/facebookresearch/pytorchvideo/blob/main/pytorchvideo/data/utils.py#L54-L64
40
+ """
41
+ if pts == math.inf:
42
+ return math.inf
43
+
44
+ return int(pts - start_pts) * time_base
45
+
46
+
47
+ def get_pyav_video_duration(video_reader):
48
+ video_stream = video_reader.streams.video[0]
49
+ video_duration = pts_to_secs(
50
+ video_stream.duration,
51
+ video_stream.time_base,
52
+ video_stream.start_time
53
+ )
54
+ return float(video_duration)
55
+
56
+
57
+
58
+ def get_frame_indices(num_frames, vlen, sample='middle', fix_start=None, input_fps=1, min_num_frames=1, max_num_frames=-1, local_num_frames=8):
59
+
60
+ if min_num_frames > vlen:
61
+ if sample == 'dynamic_fps1':
62
+ min_num_frames = (vlen // local_num_frames) * local_num_frames
63
+ else:
64
+ min_num_frames = vlen
65
+
66
+
67
+ if sample == 'dynamic_fps1':
68
+
69
+ duration = float(vlen) / input_fps
70
+ num_segments = int(duration // local_num_frames)
71
+ if num_segments == 0:
72
+ num_frames = local_num_frames
73
+ else:
74
+ num_frames = local_num_frames * num_segments
75
+
76
+ if max_num_frames > 0:
77
+ num_frames = min(num_frames, max_num_frames)
78
+ sample = "middle" # NOTE
79
+
80
+ # logger.info(f"? is OK (img), duation={duration} frames={num_frames}!!!!")
81
+
82
+ num_frames = max(min_num_frames, num_frames)
83
+
84
+ # print(f"\033[0;31m vlen={vlen}, input_fps={input_fps} num_frames={num_frames} \033[0m")
85
+
86
+ if sample in ["rand", "middle"]: # uniform sampling
87
+ acc_samples = min(num_frames, vlen)
88
+ # split the video into `acc_samples` intervals, and sample from each interval.
89
+ intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
90
+ ranges = []
91
+ for idx, interv in enumerate(intervals[:-1]):
92
+ ranges.append((interv, intervals[idx + 1] - 1))
93
+ if sample == 'rand':
94
+ try:
95
+ frame_indices = [random.choice(range(x[0], x[1])) for x in ranges]
96
+ except:
97
+ frame_indices = np.random.permutation(vlen)[:acc_samples]
98
+ frame_indices.sort()
99
+ frame_indices = list(frame_indices)
100
+ elif fix_start is not None:
101
+ frame_indices = [x[0] + fix_start for x in ranges]
102
+ elif sample == 'middle':
103
+ frame_indices = [(x[0] + x[1]) // 2 for x in ranges]
104
+ else:
105
+ raise NotImplementedError
106
+
107
+ if len(frame_indices) < num_frames: # padded with last frame
108
+ padded_frame_indices = [frame_indices[-1]] * num_frames
109
+ padded_frame_indices[:len(frame_indices)] = frame_indices
110
+ frame_indices = padded_frame_indices
111
+ elif "fps" in sample: # fps0.5, sequentially sample frames at 0.5 fps
112
+ output_fps = float(sample[3:])
113
+ duration = float(vlen) / input_fps
114
+ delta = 1 / output_fps # gap between frames, this is also the clip length each frame represents
115
+ frame_seconds = np.arange(0 + delta / 2, duration + delta / 2, delta)
116
+ frame_indices = np.around(frame_seconds * input_fps).astype(int)
117
+ frame_indices = [e for e in frame_indices if e < vlen]
118
+ if max_num_frames > 0 and len(frame_indices) > max_num_frames:
119
+ frame_indices = frame_indices[:max_num_frames]
120
+ # frame_indices = np.linspace(0 + delta / 2, duration + delta / 2, endpoint=False, num=max_num_frames)
121
+ else:
122
+ raise ValueError(f"Not support sample type: {sample}")
123
+
124
+
125
+ return frame_indices
126
+
127
+
128
+ def read_frames_av(video_path, num_frames, sample='rand', client=None, fix_start=None, min_num_frames=1, max_num_frames=-1, clip=None, local_num_frames=8):
129
+ if clip is not None:
130
+ raise NotImplementedError("av don't support clip!!!")
131
+ if 's3://' in video_path:
132
+ video_bytes = client.get(video_path)
133
+ byteio = io.BytesIO(video_bytes)
134
+ byteio.seek(0)
135
+ reader = av.open(byteio)
136
+ else:
137
+ byteio = None
138
+ reader = av.open(video_path)
139
+ frames = [f.to_rgb().to_ndarray() for f in reader.decode(video=0)]
140
+ vlen = len(frames)
141
+ duration = get_pyav_video_duration(reader)
142
+ fps = vlen / float(duration)
143
+ frame_indices = get_frame_indices(
144
+ num_frames, vlen, sample=sample, fix_start=fix_start,
145
+ input_fps=fps, min_num_frames=min_num_frames, max_num_frames=max_num_frames, local_num_frames=local_num_frames
146
+ )
147
+ frames = np.stack([frames[idx] for idx in frame_indices]) # (T, H, W, C), torch.uint8
148
+ # frames = frames.permute(0, 3, 1, 2) # (T, C, H, W), torch.uint8
149
+ if byteio != None:
150
+ byteio.close()
151
+
152
+ reader.close()
153
+
154
+ return frames, frame_indices, float(fps), duration
155
+
156
+
157
+ def read_frames_gif(
158
+ video_path, num_frames, sample='rand', fix_start=None,
159
+ min_num_frames=1, max_num_frames=-1, client=None, clip=None, local_num_frames=8
160
+ ):
161
+ if clip is not None:
162
+ raise NotImplementedError("Gif don't support clip!!!")
163
+ if 's3://' in video_path:
164
+ video_bytes = client.get(video_path)
165
+ byteio = io.BytesIO(video_bytes)
166
+ gif = imageio.get_reader(byteio)
167
+ else:
168
+ byteio = None
169
+ gif = imageio.get_reader(video_path)
170
+ vlen = len(gif)
171
+ fps = 1.
172
+ duration = vlen / fps
173
+ frame_indices = get_frame_indices(
174
+ num_frames, vlen, sample=sample, fix_start=fix_start,
175
+ min_num_frames=min_num_frames,
176
+ max_num_frames=max_num_frames, local_num_frames=local_num_frames,
177
+ input_fps=fps
178
+ )
179
+ frames = []
180
+
181
+ min_h = min_w = 100000
182
+ hw_set = set()
183
+ for index, frame in enumerate(gif):
184
+ # for index in frame_idxs:
185
+ if index in frame_indices:
186
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
187
+ frame = frame.astype(np.uint8)
188
+ # # (H x W x C) to (C x H x W)
189
+ # frame = frame.permute(2, 0, 1)
190
+ frames.append(frame)
191
+ hw_set.add(frame.shape)
192
+ if frame.shape[0] < min_h:
193
+ min_h = frame.shape[0]
194
+ if frame.shape[1] < min_w:
195
+ min_w = frame.shape[1]
196
+ # print(hw_set, min_h, min_w)
197
+ if len(hw_set) > 1:
198
+ frames = [i[:min_h, :min_w] for i in frames]
199
+
200
+ frames = np.stack(frames) # .float() / 255
201
+
202
+ if byteio != None:
203
+ byteio.close()
204
+
205
+ return frames, frame_indices, float(fps), duration # for tgif
206
+
207
+
208
+
209
+ def read_frames_decord(
210
+ video_path, num_frames, sample='rand', fix_start=None, min_num_frames=1,
211
+ max_num_frames=-1, client=None, clip=None, local_num_frames=8
212
+ ):
213
+
214
+ if video_path.endswith('.avi'):
215
+ return read_frames_av(video_path=video_path, num_frames=num_frames, sample=sample,
216
+ fix_start=fix_start, min_num_frames=min_num_frames, max_num_frames=max_num_frames,
217
+ client=client, clip=clip, local_num_frames=local_num_frames)
218
+ if 's3://' in video_path:
219
+ video_bytes = client.get(video_path)
220
+ if video_bytes is None or len(video_bytes) == 0:
221
+ raise ValueError(f"Can't read byte from {video_path}!")
222
+ byteio = io.BytesIO(video_bytes)
223
+ video_reader = VideoReader(byteio, num_threads=1)
224
+ else:
225
+ byteio = None
226
+ video_reader = VideoReader(video_path, num_threads=1)
227
+ vlen = len(video_reader)
228
+ fps = video_reader.get_avg_fps()
229
+ duration = vlen / float(fps)
230
+
231
+
232
+ if clip:
233
+ start, end = clip
234
+ start = max(0, start)
235
+ end = min(duration - 0.1, end)
236
+ duration = end - start
237
+ vlen = int(duration * fps)
238
+ start_index = int(start * fps)
239
+
240
+ frame_indices = get_frame_indices(
241
+ num_frames, vlen, sample=sample, fix_start=fix_start,
242
+ input_fps=fps, min_num_frames=min_num_frames, max_num_frames=max_num_frames, local_num_frames=local_num_frames
243
+ )
244
+ if clip:
245
+ frame_indices = [f + start_index for f in frame_indices]
246
+
247
+ # print(fps, frame_indices)
248
+ frames = video_reader.get_batch(frame_indices).asnumpy() # (T, H, W, C), torch.uint8
249
+ # https://github.com/dmlc/decord/issues/208
250
+ video_reader.seek(0)
251
+
252
+ if byteio != None:
253
+ byteio.close()
254
+ # frames = frames.permute(0, 3, 1, 2) # (T, C, H, W), torch.uint8
255
+ return frames, frame_indices, float(fps), duration
256
+
257
+
258
+
259
+ def read_frames_img(
260
+ video_path, num_frames, sample='rand', fix_start=None, min_num_frames=1,
261
+ max_num_frames=-1, client=None, clip=None, local_num_frames=8
262
+ ):
263
+ def extract_frame_number(filename):
264
+ # Extract the numeric part from the filename using regular expressions
265
+ if filename.endswith('.jpg'):
266
+ match = re.search(r'_(\d+).jpg$', filename)
267
+ elif filename.endswith('.jpeg'):
268
+ match = re.search(r'_(\d+).jpeg$', filename)
269
+ elif filename.endswith('.png'):
270
+ match = re.search(r'_(\d+).png$', filename)
271
+ else:
272
+ raise NotImplementedError(f"Wrong filename: {filename}")
273
+
274
+ return int(match.group(1)) if match else -1
275
+
276
+
277
+ def sort_frames(frame_paths):
278
+ # Extract filenames from each path and sort by their numeric part
279
+ return sorted(frame_paths, key=lambda x: extract_frame_number(os.path.basename(x)))
280
+
281
+ # img_list=[]
282
+
283
+ if "s3://" in video_path:
284
+ img_list = sort_frames(client.list(video_path))
285
+ else:
286
+ img_list = sort_frames(list(os.listdir(video_path)))
287
+
288
+
289
+ if 'tvqa' in video_path.lower():
290
+ fps = 3.0
291
+ else:
292
+ fps = 1.0
293
+
294
+ if clip is not None:
295
+ start = float(clip[0])
296
+ end = float(clip[1])
297
+ start = max(0, start)
298
+ end = min(len(img_list) / fps, end)
299
+ vlen = (end - start) * fps
300
+ else:
301
+ vlen = len(img_list)
302
+
303
+ duration = vlen / fps
304
+
305
+ if min_num_frames > vlen:
306
+ if sample == 'dynamic_fps1':
307
+ min_num_frames = (vlen // local_num_frames) * local_num_frames
308
+ else:
309
+ min_num_frames = vlen
310
+
311
+ if sample == 'dynamic_fps1':
312
+ num_segments = int(duration // local_num_frames)
313
+ if num_segments == 0:
314
+ num_frames = local_num_frames
315
+ else:
316
+ num_frames = local_num_frames * num_segments
317
+ num_frames = min(num_frames, max_num_frames)
318
+ num_frames = max(min_num_frames, num_frames)
319
+
320
+ num_frames = int(num_frames)
321
+ if clip is not None:
322
+ def _get_index_by_time(start_sec, end_sec, num_segments=8, fps=1., max_frame=9999):
323
+ start_idx = max(1, round(start_sec * fps))
324
+ end_idx = min(round(end_sec * fps), max_frame)
325
+ seg_size = float(end_idx - start_idx) / (num_segments - 1)
326
+ offsets = np.array([start_idx + int(np.round(seg_size * idx)) for idx in range(num_segments)])
327
+ return offsets
328
+
329
+ frame_indices = _get_index_by_time(float(clip[0]), float(clip[1]), num_segments=num_frames, fps=fps, max_frame=len(img_list)-1)
330
+ else:
331
+ frame_indices = get_frame_indices(
332
+ num_frames, vlen, sample=sample, fix_start=fix_start,
333
+ min_num_frames=min_num_frames,
334
+ max_num_frames=max_num_frames, local_num_frames=local_num_frames
335
+ )
336
+
337
+ imgs = []
338
+ for idx in frame_indices:
339
+ frame_fname = os.path.join(video_path, img_list[idx])
340
+ if "s3://" in video_path:
341
+ img_bytes = client.get(frame_fname)
342
+ else:
343
+ with open(frame_fname, 'rb') as f:
344
+ img_bytes = f.read()
345
+ img_np = np.frombuffer(img_bytes, np.uint8)
346
+ img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
347
+ cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
348
+ imgs.append(img)
349
+
350
+ frames = np.array(imgs, dtype=np.uint8)
351
+
352
+
353
+ return frames, frame_indices, fps, duration
354
+
355
+
356
+
357
+ VIDEO_READER_FUNCS = {
358
+ 'av': read_frames_av,
359
+ 'decord': read_frames_decord,
360
+ 'gif': read_frames_gif,
361
+ 'img': read_frames_img,
362
+ 'frame': read_frames_img
363
+ }
364
+
365
+
366
+
367
+ def load_video(video_path, max_num_frames=512, media_dict=None): #, media_dict):
368
+
369
+ if media_dict is None:
370
+ media_dict = {'video_read_type': 'decord'}
371
+
372
+ if type(video_path) != str:
373
+ assert len(video_path) == 1, video_path
374
+ video_path = video_path[0]
375
+
376
+ if 'start' in media_dict:
377
+ clip = [media_dict['start'], media_dict['end']]
378
+ else:
379
+ clip = None
380
+
381
+ client = None
382
+
383
+ frames, frame_indices, fps, duration = VIDEO_READER_FUNCS[media_dict['video_read_type']](video_path=video_path, num_frames=max_num_frames, sample='dynamic_fps1', fix_start=None, min_num_frames=64, max_num_frames=max_num_frames, client=client, clip=clip, local_num_frames=8)
384
+
385
+ sec = [str(round(f / fps, 1)) for f in frame_indices]
386
+
387
+ msg = f"\nThe video lasts for {duration:.2f} seconds, and {len(sec)} frames are uniformly sampled from it. "
388
+
389
+ return frames, msg
390
+
391
+
392
+ ######################## load video ########################
393
+
394
+
395
+ def resize_and_center_crop(image, shortest_edge_length):
396
+ # Calculate new dimensions and resize
397
+ aspect_ratio = float(image.width) / float(image.height)
398
+ if aspect_ratio > 1:
399
+ new_width = int(shortest_edge_length * aspect_ratio)
400
+ new_height = shortest_edge_length
401
+ else:
402
+ new_width = shortest_edge_length
403
+ new_height = int(shortest_edge_length / aspect_ratio)
404
+ resized_image = image.resize((new_width, new_height), Image.ANTIALIAS)
405
+
406
+ # Calculate the position and perform the center crop
407
+ left = (new_width - shortest_edge_length) / 2
408
+ top = (new_height - shortest_edge_length) / 2
409
+ right = (new_width + shortest_edge_length) / 2
410
+ bottom = (new_height + shortest_edge_length) / 2
411
+ cropped_image = resized_image.crop((left, top, right, bottom))
412
+
413
+ return cropped_image
414
+
415
+
416
+ def auto_pad_images(image, grid_params):
417
+ assert isinstance(image, Image.Image), "Input should be a Pillow Image"
418
+ assert len(grid_params) > 0, "Grid parameters should not be empty"
419
+
420
+ # Step 1: Calculate and find the closest aspect ratio
421
+ input_width, input_height = image.size
422
+ input_aspect_ratio = input_width / input_height
423
+ candidate_resolutions = [(w / h, w, h) for w in grid_params for h in grid_params]
424
+ closest_aspect_ratio = min(candidate_resolutions, key=lambda x: abs(input_aspect_ratio - x[0]))
425
+
426
+ candidate_resolutions = [(x[1], x[2]) for x in candidate_resolutions if abs(x[0] - closest_aspect_ratio[0]) < 1e-3]
427
+
428
+ target_resolution = min(candidate_resolutions, key=lambda res: abs(max(input_width, input_height) / max(res) - 1))
429
+
430
+ resize_width, resize_height = target_resolution
431
+ if input_width > input_height:
432
+ resize_height = int(resize_width / input_aspect_ratio)
433
+ else:
434
+ resize_width = int(resize_height * input_aspect_ratio)
435
+ resized_image = image.resize((resize_width, resize_height), Image.ANTIALIAS)
436
+
437
+ # Step 5: Pad the resized image if necessary to match the target resolution
438
+ pad_width = target_resolution[0] - resize_width
439
+ pad_height = target_resolution[1] - resize_height
440
+ padded_image = Image.new("RGB", target_resolution, color=(0, 0, 0))
441
+ padded_image.paste(resized_image, (pad_width // 2, pad_height // 2))
442
+
443
+ return padded_image
444
+
445
+
446
+ def extract_patches(image, patch_size, overlap_ratio):
447
+ assert isinstance(image, Image.Image), "Input should be a Pillow Image"
448
+ assert patch_size > 0, "Patch size should be greater than 0"
449
+ assert 0 <= overlap_ratio < 1, "Overlap ratio should be between 0 and 1"
450
+
451
+ W, H = image.size
452
+ patches = []
453
+
454
+ stride = int(patch_size * (1 - overlap_ratio))
455
+
456
+ num_patches_y = (H - patch_size) // stride + 1
457
+ num_patches_x = (W - patch_size) // stride + 1
458
+
459
+ y_start = (H - (num_patches_y - 1) * stride - patch_size) // 2
460
+ x_start = (W - (num_patches_x - 1) * stride - patch_size) // 2
461
+
462
+ for y in range(y_start, y_start + num_patches_y * stride, stride):
463
+ for x in range(x_start, x_start + num_patches_x * stride, stride):
464
+ patch = image.crop((x, y, x + patch_size, y + patch_size))
465
+ patches.append(patch)
466
+
467
+ return patches
468
+
469
+
470
+ def process_highres_image_crop_split(image, data_args, processor=None):
471
+ crop_resolution = data_args.image_crop_resolution
472
+ split_resolution = data_args.image_split_resolution
473
+ if processor is None:
474
+ processor = data_args.image_processor
475
+ image_crop = resize_and_center_crop(image, crop_resolution)
476
+ image_patches = extract_patches(image_crop, patch_size=split_resolution, overlap_ratio=0)
477
+ image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
478
+ return torch.stack(image_patches, dim=0)
479
+
480
+
481
+ def process_highres_image(image, processor, grid_pinpoints):
482
+ grid_params = [int(x) for x in grid_pinpoints.split(",")]
483
+ width_height = max(image.size)
484
+ fit_grid_params = [x for x in grid_params if x >= width_height]
485
+ if len(fit_grid_params) == 0:
486
+ select_size = max(grid_params)
487
+ else:
488
+ select_size = min(fit_grid_params)
489
+ # FIXME: always select the 448
490
+ select_size = max(grid_params)
491
+ image_padded = expand2square(image, tuple(int(x * 255) for x in processor.image_mean))
492
+
493
+ # FIXME: this seems to be a bug that it always resizes instead of padding
494
+ image_original_resize = image.resize((processor.size["shortest_edge"], processor.size["shortest_edge"]))
495
+ image_padded = image_padded.resize((select_size, select_size))
496
+ image_patches = extract_patches(image_padded, patch_size=processor.size["shortest_edge"], overlap_ratio=0)
497
+ image_patches = [image_original_resize] + image_patches
498
+ image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
499
+ return torch.stack(image_patches, dim=0)
500
+
501
+
502
+ def select_best_resolution(original_size, possible_resolutions, max_resolutions, patch_size):
503
+ """
504
+ Selects the best resolution from a list of possible resolutions based on the original size.
505
+
506
+ Args:
507
+ original_size (tuple): The original size of the image in the format (width, height).
508
+ possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
509
+
510
+ Returns:
511
+ tuple: The best fit resolution in the format (width, height).
512
+ """
513
+ original_width, original_height = original_size
514
+ best_fit = None
515
+ max_effective_resolution = 0
516
+ min_wasted_resolution = float("inf")
517
+
518
+ for width, height in possible_resolutions:
519
+ if max_resolutions != None and (width * height != patch_size * patch_size):
520
+ if (width * height+patch_size*patch_size) > max_resolutions: # NOTE 要���一个global
521
+ continue
522
+ # Calculate the downscaled size to keep the aspect ratio
523
+ scale = min(width / original_width, height / original_height)
524
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
525
+
526
+ # Calculate effective and wasted resolutions
527
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
528
+ wasted_resolution = (width * height) - effective_resolution
529
+
530
+ if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
531
+ max_effective_resolution = effective_resolution
532
+ min_wasted_resolution = wasted_resolution
533
+ best_fit = (width, height)
534
+
535
+ # print(f"original_size={original_size}, possible_resolutions={possible_resolutions}, max_resolutions={max_resolutions}, best_fit={best_fit}")
536
+ assert best_fit is not None, f"Can't find suitable fit in {possible_resolutions} at max:{max_resolutions}"
537
+ return best_fit
538
+
539
+
540
+ def resize_and_pad_image(image, target_resolution):
541
+ """
542
+ Resize and pad an image to a target resolution while maintaining aspect ratio.
543
+
544
+ Args:
545
+ image (PIL.Image.Image): The input image.
546
+ target_resolution (tuple): The target resolution (width, height) of the image.
547
+
548
+ Returns:
549
+ PIL.Image.Image: The resized and padded image.
550
+ """
551
+ original_width, original_height = image.size
552
+ target_width, target_height = target_resolution
553
+
554
+ # Determine which dimension (width or height) to fill
555
+ scale_w = target_width / original_width
556
+ scale_h = target_height / original_height
557
+
558
+ if scale_w < scale_h:
559
+ # Width will be filled completely
560
+ new_width = target_width
561
+ new_height = min(math.ceil(original_height * scale_w), target_height)
562
+ else:
563
+ # Height will be filled completely
564
+ new_height = target_height
565
+ new_width = min(math.ceil(original_width * scale_h), target_width)
566
+
567
+ # Resize the image
568
+ resized_image = image.resize((new_width, new_height))
569
+
570
+ # Create a new image with the target size and paste the resized image onto it
571
+ new_image = Image.new("RGB", (target_width, target_height), (0, 0, 0))
572
+ paste_x = (target_width - new_width) // 2
573
+ paste_y = (target_height - new_height) // 2
574
+ new_image.paste(resized_image, (paste_x, paste_y))
575
+
576
+ return new_image
577
+
578
+
579
+ def divide_to_patches(image, patch_size):
580
+ """
581
+ Divides an image into patches of a specified size.
582
+
583
+ Args:
584
+ image (PIL.Image.Image): The input image.
585
+ patch_size (int): The size of each patch.
586
+
587
+ Returns:
588
+ list: A list of PIL.Image.Image objects representing the patches.
589
+ """
590
+ patches = []
591
+ width, height = image.size
592
+ for i in range(0, height, patch_size):
593
+ for j in range(0, width, patch_size):
594
+ box = (j, i, j + patch_size, i + patch_size)
595
+ patch = image.crop(box)
596
+ patches.append(patch)
597
+
598
+ return patches
599
+
600
+
601
+ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size, max_resolutions=None):
602
+ """
603
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
604
+
605
+ Args:
606
+ image_size (tuple): The size of the input image in the format (width, height).
607
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
608
+ patch_size (int): The size of each image patch.
609
+
610
+ Returns:
611
+ tuple: The shape of the image patch grid in the format (width, height).
612
+ """
613
+ if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
614
+ assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
615
+ # Use regex to extract the range from the input string
616
+ matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
617
+ range_start = tuple(map(int, matches[0]))
618
+ range_end = tuple(map(int, matches[-1]))
619
+ # Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
620
+ grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
621
+ # Multiply all elements by patch_size
622
+ grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
623
+ if type(grid_pinpoints) is list:
624
+ possible_resolutions = grid_pinpoints
625
+ else:
626
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
627
+ width, height = select_best_resolution(image_size, possible_resolutions, max_resolutions=max_resolutions, patch_size=patch_size)
628
+
629
+ # print("get width/patch size", width, patch_size, flush=True)
630
+
631
+ return width // patch_size, height // patch_size
632
+
633
+
634
+ def process_anyres_image(image, processor, grid_pinpoints):
635
+ """
636
+ Process an image with variable resolutions.
637
+
638
+ Args:
639
+ image (PIL.Image.Image): The input image to be processed.
640
+ processor: The image processor object.
641
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
642
+
643
+ Returns:
644
+ torch.Tensor: A tensor containing the processed image patches.
645
+ """
646
+ raise NotImplementedError
647
+ # Convert grid_pinpoints from string to list
648
+ if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
649
+ try:
650
+ patch_size = processor.size[0]
651
+ except Exception as e:
652
+ patch_size = processor.size["shortest_edge"]
653
+ assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
654
+ # Use regex to extract the range from the input string
655
+ matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
656
+ range_start = tuple(map(int, matches[0]))
657
+ range_end = tuple(map(int, matches[-1]))
658
+ # Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
659
+ grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
660
+ # Multiply all elements by patch_size
661
+ grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
662
+
663
+ if type(grid_pinpoints) is list:
664
+ possible_resolutions = grid_pinpoints
665
+ else:
666
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
667
+ best_resolution = select_best_resolution(image.size, possible_resolutions)
668
+ image_padded = resize_and_pad_image(image, best_resolution)
669
+
670
+ patches = divide_to_patches(image_padded, processor.crop_size["height"])
671
+
672
+ # FIXME: this seems to be a bug that it resizes instead of pad.
673
+ # but to keep it consistent with previous, i will keep it as it is
674
+ # TODO: uncomment below to ablate with the padding
675
+ if isinstance(processor.size, dict):
676
+ shortest_edge = processor.size["shortest_edge"]
677
+ else:
678
+ shortest_edge = min(processor.size)
679
+ image_original_resize = image.resize((shortest_edge, shortest_edge))
680
+ # image_padded_square = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
681
+ # image_original_resize = image_padded_square.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
682
+
683
+ image_patches = [image_original_resize] + patches
684
+ image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
685
+
686
+ # print("image.size", image.size, "len(image_patches):", len(image_patches), "patch_size:", image_patches[0].shape)
687
+ return torch.stack(image_patches, dim=0)
688
+
689
+ def process_anyres_image_nopad(image, processor, grid_pinpoints):
690
+ """
691
+ Process an image with variable resolutions.
692
+
693
+ Args:
694
+ image (PIL.Image.Image): The input image to be processed.
695
+ processor: The image processor object.
696
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
697
+
698
+ Returns:
699
+ torch.Tensor: A tensor containing the processed image patches.
700
+ """
701
+ # Convert grid_pinpoints from string to list
702
+ try:
703
+ patch_size = processor.size[0]
704
+ except Exception as e:
705
+ patch_size = processor.size["shortest_edge"]
706
+
707
+ assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
708
+
709
+ if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
710
+
711
+ # Use regex to extract the range from the input string
712
+ matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
713
+ range_start = tuple(map(int, matches[0]))
714
+ range_end = tuple(map(int, matches[-1]))
715
+ # Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
716
+ grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
717
+ # Multiply all elements by patch_size
718
+ grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
719
+
720
+ if type(grid_pinpoints) is list:
721
+ possible_resolutions = grid_pinpoints
722
+ else:
723
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
724
+ best_resolution = select_best_resolution(image.size, possible_resolutions, max_resolutions=None, patch_size=patch_size) # 目前图像无限制
725
+ # image_padded = resize_and_pad_image(image, best_resolution)
726
+
727
+ patches = divide_to_patches(image.resize(best_resolution), patch_size)
728
+
729
+ # FIXME: this seems to be a bug that it resizes instead of pad.
730
+ # but to keep it consistent with previous, i will keep it as it is
731
+ # TODO: uncomment below to ablate with the padding
732
+ if isinstance(processor.size, dict):
733
+ shortest_edge = processor.size["shortest_edge"]
734
+ else:
735
+ shortest_edge = min(processor.size)
736
+ image_original_resize = image.resize((shortest_edge, shortest_edge))
737
+ # image_padded_square = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
738
+ # image_original_resize = image_padded_square.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
739
+
740
+ image_patches = [image_original_resize] + patches
741
+ image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
742
+
743
+ # raise ValueError(f"image.size: {image.size} len(image_patches): {len(image_patches)}, patch_size:, {image_patches[0].shape}, possible_resolutions:, {possible_resolutions}, best: {best_resolution}")
744
+ return torch.stack(image_patches, dim=0)
745
+
746
+
747
+ def load_image_from_base64(image):
748
+ return Image.open(BytesIO(base64.b64decode(image)))
749
+
750
+
751
+ def expand2square(pil_img, background_color):
752
+ width, height = pil_img.size
753
+ if width == height:
754
+ return pil_img
755
+ elif width > height:
756
+ result = Image.new(pil_img.mode, (width, width), background_color)
757
+ result.paste(pil_img, (0, (width - height) // 2))
758
+ return result
759
+ else:
760
+ result = Image.new(pil_img.mode, (height, height), background_color)
761
+ result.paste(pil_img, ((height - width) // 2, 0))
762
+ return result
763
+
764
+
765
+ def process_images(images, image_processor, model_cfg):
766
+ image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
767
+ new_images = []
768
+ if image_aspect_ratio == "highres":
769
+ raise NotImplementedError
770
+ for image in images:
771
+ image = process_highres_image(image, image_processor, model_cfg.image_grid_pinpoints)
772
+ new_images.append(image)
773
+ elif "anyres" in image_aspect_ratio:
774
+ for image in images:
775
+ if "nopad" in image_aspect_ratio:
776
+ image = process_anyres_image_nopad(image, image_processor, model_cfg.image_grid_pinpoints)
777
+ else:
778
+ image = process_anyres_image(image, image_processor, model_cfg.image_grid_pinpoints)
779
+ new_images.append(image)
780
+ elif image_aspect_ratio == "crop_split":
781
+ raise NotImplementedError
782
+ for image in images:
783
+ image = process_highres_image_crop_split(image, model_cfg, image_processor)
784
+ new_images.append(image)
785
+ elif image_aspect_ratio == "pad":
786
+ for image in images:
787
+ image = expand2square(image, tuple(int(x * 255) for x in image_processor.image_mean))
788
+ image = image_processor.preprocess(image, return_tensors="pt")["pixel_values"][0]
789
+ new_images.append(image)
790
+ else:
791
+ return image_processor.preprocess(images, return_tensors="pt")["pixel_values"]
792
+ if all(x.shape == new_images[0].shape for x in new_images):
793
+ new_images = torch.stack(new_images, dim=0)
794
+ return new_images
795
+
796
+
797
+ def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
798
+ prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split("<image>")]
799
+
800
+ def insert_separator(X, sep):
801
+ return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
802
+
803
+ input_ids = []
804
+ offset = 0
805
+ if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
806
+ offset = 1
807
+ input_ids.append(prompt_chunks[0][0])
808
+
809
+ for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
810
+ input_ids.extend(x[offset:])
811
+
812
+ if return_tensors is not None:
813
+ if return_tensors == "pt":
814
+ return torch.tensor(input_ids, dtype=torch.long)
815
+ raise ValueError(f"Unsupported tensor type: {return_tensors}")
816
+ return input_ids
817
+
818
+
819
+ def get_model_name_from_path(model_path):
820
+ model_path = model_path.strip("/")
821
+ model_paths = model_path.split("/")
822
+ if model_paths[-1].startswith("checkpoint-"):
823
+ return model_paths[-2] + "_" + model_paths[-1]
824
+ else:
825
+ return model_paths[-1]
826
+
827
+
828
+ class KeywordsStoppingCriteria(StoppingCriteria):
829
+ def __init__(self, keywords, tokenizer, input_ids):
830
+ self.keywords = keywords
831
+ self.keyword_ids = []
832
+ for keyword in keywords:
833
+ cur_keyword_ids = tokenizer(keyword).input_ids
834
+ if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
835
+ cur_keyword_ids = cur_keyword_ids[1:]
836
+ self.keyword_ids.append(torch.tensor(cur_keyword_ids))
837
+ self.tokenizer = tokenizer
838
+ self.start_len = input_ids.shape[1]
839
+
840
+ def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
841
+ assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO
842
+ offset = min(output_ids.shape[1] - self.start_len, 3)
843
+ self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
844
+ for keyword_id in self.keyword_ids:
845
+ if output_ids[0, -keyword_id.shape[0] :] == keyword_id:
846
+ return True
847
+ outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
848
+ for keyword in self.keywords:
849
+ if keyword in outputs:
850
+ return True
851
+ return False
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f8ace2bca808732931c85bf2c99ccfb8ec75e04a3b571504b4786b1097b6300
3
+ size 20533852
modeling_qwen2_flash.py ADDED
@@ -0,0 +1,1583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # transformers==4.39.2 NOTE
3
+ # Borrows some implementations from https://github.com/Cooperx521/PyramidDrop, thanks!
4
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
7
+ # and OPT implementations in this library. It has been modified from its
8
+ # original forms to accommodate minor architectural differences compared
9
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+ """ PyTorch Qwen2 model."""
23
+ import inspect
24
+ import math
25
+ import warnings
26
+ from typing import List, Optional, Tuple, Union
27
+
28
+ import torch
29
+ import torch.nn.functional as F
30
+ import torch.utils.checkpoint
31
+ from torch import nn
32
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
33
+
34
+ from transformers.activations import ACT2FN
35
+ from transformers.cache_utils import Cache, DynamicCache
36
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
37
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
38
+ from transformers.modeling_utils import PreTrainedModel
39
+ from transformers.utils import (
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ is_flash_attn_2_available,
43
+ is_flash_attn_greater_or_equal_2_10,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
48
+ from .constants import IGNORE_INDEX
49
+
50
+
51
+ if is_flash_attn_2_available():
52
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
53
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
54
+
55
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
56
+
57
+
58
+ logger = logging.get_logger(__name__)
59
+
60
+
61
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
62
+ _CONFIG_FOR_DOC = "Qwen2Config"
63
+
64
+ QWEN2_PRETRAINED_MODEL_ARCHIVE_LIST = [
65
+ "Qwen/Qwen2-7B-beta",
66
+ # See all Qwen2 models at https://huggingface.co/models?filter=qwen2
67
+ ]
68
+
69
+
70
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
71
+ def _get_unpad_data(attention_mask):
72
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
73
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
74
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
75
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
76
+ return (
77
+ indices,
78
+ cu_seqlens,
79
+ max_seqlen_in_batch,
80
+ )
81
+
82
+
83
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
84
+ class Qwen2RMSNorm(nn.Module):
85
+ def __init__(self, hidden_size, eps=1e-6):
86
+ """
87
+ Qwen2RMSNorm is equivalent to T5LayerNorm
88
+ """
89
+ super().__init__()
90
+ self.weight = nn.Parameter(torch.ones(hidden_size))
91
+ self.variance_epsilon = eps
92
+
93
+ def forward(self, hidden_states):
94
+ input_dtype = hidden_states.dtype
95
+ hidden_states = hidden_states.to(torch.float32)
96
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
97
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
98
+ return self.weight * hidden_states.to(input_dtype)
99
+
100
+
101
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2
102
+ class Qwen2RotaryEmbedding(nn.Module):
103
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
104
+ super().__init__()
105
+
106
+ self.dim = dim
107
+ self.max_position_embeddings = max_position_embeddings
108
+ self.base = base
109
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
110
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
111
+
112
+ # Build here to make `torch.jit.trace` work.
113
+ self._set_cos_sin_cache(
114
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
115
+ )
116
+
117
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
118
+ self.max_seq_len_cached = seq_len
119
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
120
+
121
+ freqs = torch.outer(t, self.inv_freq)
122
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
123
+ emb = torch.cat((freqs, freqs), dim=-1)
124
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
125
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
126
+
127
+ def forward(self, x, seq_len=None):
128
+ # x: [bs, num_attention_heads, seq_len, head_size]
129
+ if seq_len > self.max_seq_len_cached:
130
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
131
+
132
+ return (
133
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
134
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
135
+ )
136
+
137
+
138
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
139
+ def rotate_half(x):
140
+ """Rotates half the hidden dims of the input."""
141
+ x1 = x[..., : x.shape[-1] // 2]
142
+ x2 = x[..., x.shape[-1] // 2 :]
143
+ return torch.cat((-x2, x1), dim=-1)
144
+
145
+
146
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
147
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
148
+ """Applies Rotary Position Embedding to the query and key tensors.
149
+
150
+ Args:
151
+ q (`torch.Tensor`): The query tensor.
152
+ k (`torch.Tensor`): The key tensor.
153
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
154
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
155
+ position_ids (`torch.Tensor`):
156
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
157
+ used to pass offsetted position ids when working with a KV-cache.
158
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
159
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
160
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
161
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
162
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
163
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
164
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
165
+ Returns:
166
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
167
+ """
168
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
169
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
170
+ q_embed = (q * cos) + (rotate_half(q) * sin)
171
+ k_embed = (k * cos) + (rotate_half(k) * sin)
172
+ return q_embed, k_embed
173
+
174
+
175
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
176
+ class Qwen2MLP(nn.Module):
177
+ def __init__(self, config):
178
+ super().__init__()
179
+ self.config = config
180
+ self.hidden_size = config.hidden_size
181
+ self.intermediate_size = config.intermediate_size
182
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
183
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
184
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
185
+ self.act_fn = ACT2FN[config.hidden_act]
186
+
187
+ def forward(self, x):
188
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
189
+
190
+
191
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
192
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
193
+ """
194
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
195
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
196
+ """
197
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
198
+ if n_rep == 1:
199
+ return hidden_states
200
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
201
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
202
+
203
+
204
+ class Qwen2Attention(nn.Module):
205
+ """
206
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
207
+ and "Generating Long Sequences with Sparse Transformers".
208
+ """
209
+
210
+ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
211
+ super().__init__()
212
+ self.config = config
213
+ self.layer_idx = layer_idx
214
+ if layer_idx is None:
215
+ logger.warning_once(
216
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
217
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
218
+ "when creating this class."
219
+ )
220
+
221
+ self.hidden_size = config.hidden_size
222
+ self.num_heads = config.num_attention_heads
223
+ self.head_dim = self.hidden_size // self.num_heads
224
+ self.num_key_value_heads = config.num_key_value_heads
225
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
226
+ self.max_position_embeddings = config.max_position_embeddings
227
+ self.rope_theta = config.rope_theta
228
+ self.is_causal = True
229
+ self.attention_dropout = config.attention_dropout
230
+
231
+ if (self.head_dim * self.num_heads) != self.hidden_size:
232
+ raise ValueError(
233
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
234
+ f" and `num_heads`: {self.num_heads})."
235
+ )
236
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
237
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
238
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
239
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
240
+
241
+ self.rotary_emb = Qwen2RotaryEmbedding(
242
+ self.head_dim,
243
+ max_position_embeddings=self.max_position_embeddings,
244
+ base=self.rope_theta,
245
+ )
246
+
247
+ def forward(
248
+ self,
249
+ hidden_states: torch.Tensor,
250
+ attention_mask: Optional[torch.Tensor] = None,
251
+ position_ids: Optional[torch.LongTensor] = None,
252
+ past_key_value: Optional[Cache] = None,
253
+ output_attentions: bool = False,
254
+ use_cache: bool = False,
255
+ **kwargs,
256
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
257
+ if "padding_mask" in kwargs:
258
+ warnings.warn(
259
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
260
+ )
261
+ bsz, q_len, _ = hidden_states.size()
262
+
263
+ query_states = self.q_proj(hidden_states)
264
+ key_states = self.k_proj(hidden_states)
265
+ value_states = self.v_proj(hidden_states)
266
+
267
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
268
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
269
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
270
+
271
+ kv_seq_len = key_states.shape[-2]
272
+ if past_key_value is not None:
273
+ if self.layer_idx is None:
274
+ raise ValueError(
275
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
276
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
277
+ "with a layer index."
278
+ )
279
+ # get_usable_length has been removed in transformers 4.54.0
280
+ if hasattr(past_key_value, "get_usable_length"):
281
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
282
+ else:
283
+ kv_seq_len += past_key_value.get_seq_length(self.layer_idx)
284
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
285
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
286
+
287
+ if past_key_value is not None:
288
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
289
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
290
+
291
+ # repeat k/v heads if n_kv_heads < n_heads
292
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
293
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
294
+
295
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
296
+
297
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
298
+ raise ValueError(
299
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
300
+ f" {attn_weights.size()}"
301
+ )
302
+
303
+ if attention_mask is not None:
304
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
305
+ raise ValueError(
306
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
307
+ )
308
+
309
+ attn_weights = attn_weights + attention_mask
310
+
311
+ # upcast attention to fp32
312
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
313
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
314
+ attn_output = torch.matmul(attn_weights, value_states)
315
+
316
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
317
+ raise ValueError(
318
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
319
+ f" {attn_output.size()}"
320
+ )
321
+
322
+ attn_output = attn_output.transpose(1, 2).contiguous()
323
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
324
+
325
+ attn_output = self.o_proj(attn_output)
326
+
327
+ if not output_attentions:
328
+ attn_weights = None
329
+
330
+ return attn_output, attn_weights, past_key_value
331
+
332
+
333
+ class Qwen2FlashAttention2(Qwen2Attention):
334
+ """
335
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
336
+ as the weights of the module stays untouched. The only required change would be on the forward pass
337
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
338
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
339
+ config.max_window_layers layers.
340
+ """
341
+
342
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
343
+ def __init__(self, *args, **kwargs):
344
+ super().__init__(*args, **kwargs)
345
+
346
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
347
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
348
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
349
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
350
+
351
+ def forward(
352
+ self,
353
+ hidden_states: torch.Tensor,
354
+ attention_mask: Optional[torch.Tensor] = None,
355
+ position_ids: Optional[torch.LongTensor] = None,
356
+ past_key_value: Optional[Cache] = None,
357
+ output_attentions: bool = False,
358
+ use_cache: bool = False,
359
+ **kwargs,
360
+ ):
361
+ if "padding_mask" in kwargs:
362
+ warnings.warn(
363
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
364
+ )
365
+
366
+ # overwrite attention_mask with padding_mask
367
+ attention_mask = kwargs.pop("padding_mask")
368
+ bsz, q_len, _ = hidden_states.size()
369
+
370
+ query_states = self.q_proj(hidden_states)
371
+ key_states = self.k_proj(hidden_states)
372
+ value_states = self.v_proj(hidden_states)
373
+
374
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
375
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
376
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
377
+
378
+ kv_seq_len = key_states.shape[-2]
379
+ if past_key_value is not None:
380
+ if self.layer_idx is None:
381
+ raise ValueError(
382
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
383
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
384
+ "with a layer index."
385
+ )
386
+ # get_usable_length has been removed in transformers 4.54.0
387
+ if hasattr(past_key_value, "get_usable_length"):
388
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
389
+ else:
390
+ kv_seq_len += past_key_value.get_seq_length(self.layer_idx)
391
+
392
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
393
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
394
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
395
+
396
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
397
+
398
+ use_sliding_windows = (
399
+ _flash_supports_window_size
400
+ and getattr(self.config, "sliding_window", None) is not None
401
+ and kv_seq_len > self.config.sliding_window
402
+ and self.config.use_sliding_window
403
+ )
404
+
405
+ if not _flash_supports_window_size:
406
+ logger.warning_once(
407
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
408
+ " make sure to upgrade flash-attn library."
409
+ )
410
+
411
+ if past_key_value is not None:
412
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
413
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
414
+ if (
415
+ getattr(self.config, "sliding_window", None) is not None
416
+ and kv_seq_len > self.config.sliding_window
417
+ and cache_has_contents
418
+ ):
419
+ slicing_tokens = 1 - self.config.sliding_window
420
+
421
+ past_key = past_key_value[self.layer_idx][0]
422
+ past_value = past_key_value[self.layer_idx][1]
423
+
424
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
425
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
426
+
427
+ if past_key.shape[-2] != self.config.sliding_window - 1:
428
+ raise ValueError(
429
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
430
+ f" {past_key.shape}"
431
+ )
432
+
433
+ if attention_mask is not None:
434
+ attention_mask = attention_mask[:, slicing_tokens:]
435
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
436
+
437
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
438
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
439
+
440
+ # repeat k/v heads if n_kv_heads < n_heads
441
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
442
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
443
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
444
+
445
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
446
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
447
+ # cast them back in float16 just to be sure everything works as expected.
448
+ input_dtype = query_states.dtype
449
+ if input_dtype == torch.float32:
450
+ if torch.is_autocast_enabled():
451
+ target_dtype = torch.get_autocast_gpu_dtype()
452
+ # Handle the case where the model is quantized
453
+ elif hasattr(self.config, "_pre_quantization_dtype"):
454
+ target_dtype = self.config._pre_quantization_dtype
455
+ else:
456
+ target_dtype = self.q_proj.weight.dtype
457
+
458
+ logger.warning_once(
459
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
460
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
461
+ f" {target_dtype}."
462
+ )
463
+
464
+ query_states = query_states.to(target_dtype)
465
+ key_states = key_states.to(target_dtype)
466
+ value_states = value_states.to(target_dtype)
467
+
468
+ # Reashape to the expected shape for Flash Attention
469
+ query_states = query_states.transpose(1, 2)
470
+ key_states = key_states.transpose(1, 2)
471
+ value_states = value_states.transpose(1, 2)
472
+
473
+ attn_output = self._flash_attention_forward(
474
+ query_states,
475
+ key_states,
476
+ value_states,
477
+ attention_mask,
478
+ q_len,
479
+ dropout=dropout_rate,
480
+ use_sliding_windows=use_sliding_windows,
481
+ )
482
+
483
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
484
+ attn_output = self.o_proj(attn_output)
485
+
486
+ if not output_attentions:
487
+ attn_weights = None
488
+
489
+ return attn_output, attn_weights, past_key_value
490
+
491
+ def _flash_attention_forward(
492
+ self,
493
+ query_states,
494
+ key_states,
495
+ value_states,
496
+ attention_mask,
497
+ query_length,
498
+ dropout=0.0,
499
+ softmax_scale=None,
500
+ use_sliding_windows=False,
501
+ ):
502
+ """
503
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
504
+ first unpad the input, then computes the attention scores and pad the final attention scores.
505
+
506
+ Args:
507
+ query_states (`torch.Tensor`):
508
+ Input query states to be passed to Flash Attention API
509
+ key_states (`torch.Tensor`):
510
+ Input key states to be passed to Flash Attention API
511
+ value_states (`torch.Tensor`):
512
+ Input value states to be passed to Flash Attention API
513
+ attention_mask (`torch.Tensor`):
514
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
515
+ position of padding tokens and 1 for the position of non-padding tokens.
516
+ dropout (`float`):
517
+ Attention dropout
518
+ softmax_scale (`float`, *optional*):
519
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
520
+ use_sliding_windows (`bool`, *optional*):
521
+ Whether to activate sliding window attention.
522
+ """
523
+ if not self._flash_attn_uses_top_left_mask:
524
+ causal = self.is_causal
525
+ else:
526
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
527
+ causal = self.is_causal and query_length != 1
528
+
529
+ # Decide whether to use SWA or not by layer index.
530
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
531
+ use_sliding_windows = False
532
+
533
+ # Contains at least one padding token in the sequence
534
+ if attention_mask is not None:
535
+ batch_size = query_states.shape[0]
536
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
537
+ query_states, key_states, value_states, attention_mask, query_length
538
+ )
539
+
540
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
541
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
542
+
543
+ if not use_sliding_windows:
544
+ attn_output_unpad = flash_attn_varlen_func(
545
+ query_states,
546
+ key_states,
547
+ value_states,
548
+ cu_seqlens_q=cu_seqlens_q,
549
+ cu_seqlens_k=cu_seqlens_k,
550
+ max_seqlen_q=max_seqlen_in_batch_q,
551
+ max_seqlen_k=max_seqlen_in_batch_k,
552
+ dropout_p=dropout,
553
+ softmax_scale=softmax_scale,
554
+ causal=causal,
555
+ )
556
+ else:
557
+ attn_output_unpad = flash_attn_varlen_func(
558
+ query_states,
559
+ key_states,
560
+ value_states,
561
+ cu_seqlens_q=cu_seqlens_q,
562
+ cu_seqlens_k=cu_seqlens_k,
563
+ max_seqlen_q=max_seqlen_in_batch_q,
564
+ max_seqlen_k=max_seqlen_in_batch_k,
565
+ dropout_p=dropout,
566
+ softmax_scale=softmax_scale,
567
+ causal=causal,
568
+ window_size=(self.config.sliding_window, self.config.sliding_window),
569
+ )
570
+
571
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
572
+ else:
573
+ if not use_sliding_windows:
574
+ attn_output = flash_attn_func(
575
+ query_states,
576
+ key_states,
577
+ value_states,
578
+ dropout,
579
+ softmax_scale=softmax_scale,
580
+ causal=causal,
581
+ )
582
+ else:
583
+ attn_output = flash_attn_func(
584
+ query_states,
585
+ key_states,
586
+ value_states,
587
+ dropout,
588
+ softmax_scale=softmax_scale,
589
+ causal=causal,
590
+ window_size=(self.config.sliding_window, self.config.sliding_window),
591
+ )
592
+
593
+ return attn_output
594
+
595
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
596
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
597
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
598
+
599
+ # On the first iteration we need to properly re-create the padding mask
600
+ # by slicing it on the proper place
601
+ if kv_seq_len != attention_mask.shape[-1]:
602
+ attention_mask_num_tokens = attention_mask.shape[-1]
603
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
604
+
605
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
606
+
607
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
608
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
609
+
610
+ if query_length == kv_seq_len:
611
+ query_layer = index_first_axis(
612
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
613
+ )
614
+ cu_seqlens_q = cu_seqlens_k
615
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
616
+ indices_q = indices_k
617
+ elif query_length == 1:
618
+ max_seqlen_in_batch_q = 1
619
+ cu_seqlens_q = torch.arange(
620
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
621
+ ) # There is a memcpy here, that is very bad.
622
+ indices_q = cu_seqlens_q[:-1]
623
+ query_layer = query_layer.squeeze(1)
624
+ else:
625
+ # The -q_len: slice assumes left padding.
626
+ attention_mask = attention_mask[:, -query_length:]
627
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
628
+
629
+ return (
630
+ query_layer,
631
+ key_layer,
632
+ value_layer,
633
+ indices_q,
634
+ (cu_seqlens_q, cu_seqlens_k),
635
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
636
+ )
637
+
638
+
639
+ # Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2
640
+ class Qwen2SdpaAttention(Qwen2Attention):
641
+ """
642
+ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
643
+ `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
644
+ SDPA API.
645
+ """
646
+
647
+ # Adapted from Qwen2Attention.forward
648
+ def forward(
649
+ self,
650
+ hidden_states: torch.Tensor,
651
+ attention_mask: Optional[torch.Tensor] = None,
652
+ position_ids: Optional[torch.LongTensor] = None,
653
+ past_key_value: Optional[Cache] = None,
654
+ output_attentions: bool = False,
655
+ use_cache: bool = False,
656
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
657
+ if output_attentions:
658
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
659
+ logger.warning_once(
660
+ "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
661
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
662
+ )
663
+ return super().forward(
664
+ hidden_states=hidden_states,
665
+ attention_mask=attention_mask,
666
+ position_ids=position_ids,
667
+ past_key_value=past_key_value,
668
+ output_attentions=output_attentions,
669
+ use_cache=use_cache,
670
+ )
671
+
672
+ bsz, q_len, _ = hidden_states.size()
673
+
674
+ query_states = self.q_proj(hidden_states)
675
+ key_states = self.k_proj(hidden_states)
676
+ value_states = self.v_proj(hidden_states)
677
+
678
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
679
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
680
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
681
+
682
+ kv_seq_len = key_states.shape[-2]
683
+ if past_key_value is not None:
684
+ # get_usable_length has been removed in transformers 4.54.0
685
+ if hasattr(past_key_value, "get_usable_length"):
686
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
687
+ else:
688
+ kv_seq_len += past_key_value.get_seq_length(self.layer_idx)
689
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
690
+
691
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
692
+
693
+ if past_key_value is not None:
694
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
695
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
696
+
697
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
698
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
699
+
700
+ if attention_mask is not None:
701
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
702
+ raise ValueError(
703
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
704
+ )
705
+
706
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
707
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
708
+ if query_states.device.type == "cuda" and attention_mask is not None:
709
+ query_states = query_states.contiguous()
710
+ key_states = key_states.contiguous()
711
+ value_states = value_states.contiguous()
712
+
713
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
714
+ query_states,
715
+ key_states,
716
+ value_states,
717
+ attn_mask=attention_mask,
718
+ dropout_p=self.attention_dropout if self.training else 0.0,
719
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
720
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
721
+ )
722
+
723
+ attn_output = attn_output.transpose(1, 2).contiguous()
724
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
725
+
726
+ attn_output = self.o_proj(attn_output)
727
+
728
+ return attn_output, None, past_key_value
729
+
730
+
731
+ QWEN2_ATTENTION_CLASSES = {
732
+ "eager": Qwen2Attention,
733
+ "flash_attention_2": Qwen2FlashAttention2,
734
+ "sdpa": Qwen2SdpaAttention,
735
+ }
736
+
737
+
738
+ class Qwen2DecoderLayer(nn.Module):
739
+ def __init__(self, config: Qwen2Config, layer_idx: int):
740
+ super().__init__()
741
+ self.hidden_size = config.hidden_size
742
+
743
+ if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
744
+ logger.warning_once(
745
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
746
+ "unexpected results may be encountered."
747
+ )
748
+ self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
749
+
750
+ self.mlp = Qwen2MLP(config)
751
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
752
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
753
+
754
+ def forward(
755
+ self,
756
+ hidden_states: torch.Tensor,
757
+ attention_mask: Optional[torch.Tensor] = None,
758
+ position_ids: Optional[torch.LongTensor] = None,
759
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
760
+ output_attentions: Optional[bool] = False,
761
+ use_cache: Optional[bool] = False,
762
+ **kwargs,
763
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
764
+ if "padding_mask" in kwargs:
765
+ warnings.warn(
766
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
767
+ "Please make sure use `attention_mask` instead.`"
768
+ )
769
+ """
770
+ Args:
771
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
772
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
773
+ `(batch, sequence_length)` where padding elements are indicated by 0.
774
+ output_attentions (`bool`, *optional*):
775
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
776
+ returned tensors for more detail.
777
+ use_cache (`bool`, *optional*):
778
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
779
+ (see `past_key_values`).
780
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
781
+ """
782
+
783
+ residual = hidden_states
784
+
785
+ hidden_states = self.input_layernorm(hidden_states)
786
+
787
+ # Self Attention
788
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
789
+ hidden_states=hidden_states,
790
+ attention_mask=attention_mask,
791
+ position_ids=position_ids,
792
+ past_key_value=past_key_value,
793
+ output_attentions=output_attentions,
794
+ use_cache=use_cache,
795
+ )
796
+ hidden_states = residual + hidden_states
797
+
798
+ # Fully Connected
799
+ residual = hidden_states
800
+ hidden_states = self.post_attention_layernorm(hidden_states)
801
+ hidden_states = self.mlp(hidden_states)
802
+ hidden_states = residual + hidden_states
803
+
804
+ outputs = (hidden_states,)
805
+
806
+ if output_attentions:
807
+ outputs += (self_attn_weights,)
808
+
809
+ if use_cache:
810
+ outputs += (present_key_value,)
811
+
812
+ return outputs
813
+
814
+
815
+ QWEN2_START_DOCSTRING = r"""
816
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
817
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
818
+ etc.)
819
+
820
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
821
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
822
+ and behavior.
823
+
824
+ Parameters:
825
+ config ([`Qwen2Config`]):
826
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
827
+ load the weights associated with the model, only the configuration. Check out the
828
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
829
+ """
830
+
831
+
832
+ @add_start_docstrings(
833
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
834
+ QWEN2_START_DOCSTRING,
835
+ )
836
+ class Qwen2PreTrainedModel(PreTrainedModel):
837
+ config_class = Qwen2Config
838
+ base_model_prefix = "model"
839
+ supports_gradient_checkpointing = True
840
+ _no_split_modules = ["Qwen2DecoderLayer"]
841
+ _skip_keys_device_placement = "past_key_values"
842
+ _supports_flash_attn_2 = True
843
+ _supports_sdpa = True
844
+ _supports_cache_class = True
845
+
846
+ def _init_weights(self, module):
847
+ std = self.config.initializer_range
848
+ if isinstance(module, nn.Linear):
849
+ module.weight.data.normal_(mean=0.0, std=std)
850
+ if module.bias is not None:
851
+ module.bias.data.zero_()
852
+ elif isinstance(module, nn.Embedding):
853
+ module.weight.data.normal_(mean=0.0, std=std)
854
+ if module.padding_idx is not None:
855
+ module.weight.data[module.padding_idx].zero_()
856
+
857
+
858
+ QWEN2_INPUTS_DOCSTRING = r"""
859
+ Args:
860
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
861
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
862
+ it.
863
+
864
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
865
+ [`PreTrainedTokenizer.__call__`] for details.
866
+
867
+ [What are input IDs?](../glossary#input-ids)
868
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
869
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
870
+
871
+ - 1 for tokens that are **not masked**,
872
+ - 0 for tokens that are **masked**.
873
+
874
+ [What are attention masks?](../glossary#attention-mask)
875
+
876
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
877
+ [`PreTrainedTokenizer.__call__`] for details.
878
+
879
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
880
+ `past_key_values`).
881
+
882
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
883
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
884
+ information on the default strategy.
885
+
886
+ - 1 indicates the head is **not masked**,
887
+ - 0 indicates the head is **masked**.
888
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
889
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
890
+ config.n_positions - 1]`.
891
+
892
+ [What are position IDs?](../glossary#position-ids)
893
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
894
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
895
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
896
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
897
+
898
+ Two formats are allowed:
899
+ - a [`~cache_utils.Cache`] instance;
900
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
901
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
902
+ cache format.
903
+
904
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
905
+ legacy cache format will be returned.
906
+
907
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
908
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
909
+ of shape `(batch_size, sequence_length)`.
910
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
911
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
912
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
913
+ model's internal embedding lookup matrix.
914
+ use_cache (`bool`, *optional*):
915
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
916
+ `past_key_values`).
917
+ output_attentions (`bool`, *optional*):
918
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
919
+ tensors for more detail.
920
+ output_hidden_states (`bool`, *optional*):
921
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
922
+ more detail.
923
+ return_dict (`bool`, *optional*):
924
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
925
+ """
926
+
927
+
928
+ @add_start_docstrings(
929
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
930
+ QWEN2_START_DOCSTRING,
931
+ )
932
+ class Qwen2Model_Flash(Qwen2PreTrainedModel):
933
+ """
934
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
935
+
936
+ Args:
937
+ config: Qwen2Config
938
+ """
939
+
940
+ def __init__(self, config: Qwen2Config):
941
+ super().__init__(config)
942
+ self.padding_idx = config.pad_token_id
943
+ self.vocab_size = config.vocab_size
944
+
945
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
946
+ self.layers = nn.ModuleList(
947
+ [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
948
+ )
949
+ self._attn_implementation = config._attn_implementation
950
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
951
+
952
+ self.gradient_checkpointing = False
953
+
954
+ # Initialize weights and apply final processing
955
+ self.post_init()
956
+
957
+ def get_input_embeddings(self):
958
+ return self.embed_tokens
959
+
960
+ def set_input_embeddings(self, value):
961
+ self.embed_tokens = value
962
+
963
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
964
+ def forward(
965
+ self,
966
+ input_ids: torch.LongTensor = None,
967
+ attention_mask: Optional[torch.Tensor] = None,
968
+ position_ids: Optional[torch.LongTensor] = None,
969
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
970
+ inputs_embeds: Optional[torch.FloatTensor] = None,
971
+ use_cache: Optional[bool] = None,
972
+ output_attentions: Optional[bool] = None,
973
+ output_hidden_states: Optional[bool] = None,
974
+ return_dict: Optional[bool] = None,
975
+ labels: Optional[torch.Tensor] = None,
976
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
977
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
978
+ output_hidden_states = (
979
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
980
+ )
981
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
982
+
983
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
984
+
985
+ # retrieve input_ids and inputs_embeds
986
+ if input_ids is not None and inputs_embeds is not None:
987
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
988
+ elif input_ids is not None:
989
+ batch_size, seq_length = input_ids.shape
990
+ elif inputs_embeds is not None:
991
+ batch_size, seq_length, _ = inputs_embeds.shape
992
+ else:
993
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
994
+
995
+ if self.gradient_checkpointing and self.training:
996
+ if use_cache:
997
+ logger.warning_once(
998
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
999
+ )
1000
+ use_cache = False
1001
+
1002
+ past_key_values_length = 0
1003
+
1004
+ if use_cache:
1005
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1006
+ if use_legacy_cache:
1007
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1008
+ # get_usable_length has been removed in transformers 4.54.0
1009
+ if hasattr(past_key_values, "get_usable_length"):
1010
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1011
+ else:
1012
+ past_key_values_length = past_key_values.get_seq_length()
1013
+
1014
+ if position_ids is None:
1015
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1016
+ position_ids = torch.arange(
1017
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1018
+ )
1019
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1020
+ else:
1021
+ position_ids = position_ids.view(-1, seq_length).long()
1022
+
1023
+ if inputs_embeds is None:
1024
+ inputs_embeds = self.embed_tokens(input_ids)
1025
+
1026
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
1027
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1028
+ if is_padding_right:
1029
+ raise ValueError(
1030
+ "You are attempting to perform batched generation with padding_side='right'"
1031
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
1032
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1033
+ )
1034
+
1035
+ if self._attn_implementation == "flash_attention_2":
1036
+ # 2d mask is passed through the layers
1037
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1038
+ elif self._attn_implementation == "sdpa" and not output_attentions:
1039
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1040
+ # the manual implementation that requires a 4D causal mask in all cases.
1041
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1042
+ attention_mask,
1043
+ (batch_size, seq_length),
1044
+ inputs_embeds,
1045
+ past_key_values_length,
1046
+ )
1047
+ else:
1048
+ # 4d mask is passed through the layers
1049
+ attention_mask = _prepare_4d_causal_attention_mask(
1050
+ attention_mask,
1051
+ (batch_size, seq_length),
1052
+ inputs_embeds,
1053
+ past_key_values_length,
1054
+ sliding_window=self.config.sliding_window,
1055
+ )
1056
+
1057
+ hidden_states = inputs_embeds
1058
+
1059
+ # decoder layers
1060
+ all_hidden_states = () if output_hidden_states else None
1061
+ all_self_attns = () if output_attentions else None
1062
+ next_decoder_cache = None
1063
+
1064
+ for layer_idx, decoder_layer in enumerate(self.layers):
1065
+ if output_hidden_states:
1066
+ all_hidden_states += (hidden_states,)
1067
+
1068
+ if self.gradient_checkpointing and self.training:
1069
+ layer_outputs = self._gradient_checkpointing_func(
1070
+ decoder_layer.__call__,
1071
+ hidden_states,
1072
+ attention_mask,
1073
+ position_ids,
1074
+ past_key_values,
1075
+ output_attentions,
1076
+ use_cache,
1077
+ )
1078
+ else:
1079
+ layer_outputs = decoder_layer(
1080
+ hidden_states,
1081
+ attention_mask=attention_mask,
1082
+ position_ids=position_ids,
1083
+ past_key_value=past_key_values,
1084
+ output_attentions=output_attentions,
1085
+ use_cache=use_cache,
1086
+ )
1087
+
1088
+ hidden_states = layer_outputs[0]
1089
+
1090
+ if use_cache:
1091
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1092
+
1093
+ if output_attentions:
1094
+ all_self_attns += (layer_outputs[1],)
1095
+
1096
+ ###### copy from pdrop #########
1097
+ # rank & drop after specific layer
1098
+ # only drop in prefill stage when inference
1099
+ rank_layer = layer_idx+1
1100
+ if rank_layer in self.llm_compress_layer_list:
1101
+ if hidden_states.shape[1] != 1: # prefill stage or training
1102
+ stage = self.llm_compress_layer_list.index(rank_layer) # determine current stage
1103
+ (
1104
+ position_ids,
1105
+ attention_mask,
1106
+ hidden_states,
1107
+ labels # update labels and return
1108
+ ) = self.video_level_compress(
1109
+ cur_num = stage,
1110
+ rank_layer = rank_layer,
1111
+ features = hidden_states,
1112
+ position_ids=position_ids,
1113
+ attention_mask=attention_mask,
1114
+ labels = labels
1115
+ )
1116
+
1117
+ # process attention_mask again after updating
1118
+ if self._attn_implementation == "flash_attention_2":
1119
+ # 2d mask is passed through the layers
1120
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1121
+ elif self._attn_implementation == "sdpa" and not output_attentions:
1122
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1123
+ # the manual implementation that requires a 4D causal mask in all cases.
1124
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1125
+ attention_mask,
1126
+ (batch_size, hidden_states.shape[1]),
1127
+ hidden_states,
1128
+ past_key_values_length,
1129
+ )
1130
+ else:
1131
+ # 4d mask is passed through the layers
1132
+ attention_mask = _prepare_4d_causal_attention_mask(
1133
+ attention_mask,
1134
+ (batch_size, hidden_states.shape[1]),
1135
+ hidden_states,
1136
+ past_key_values_length,
1137
+ sliding_window=self.config.sliding_window,
1138
+ )
1139
+
1140
+ else:
1141
+ # update position_ids in decoding stage when inference
1142
+ stage = self.llm_compress_layer_list.index(rank_layer) # determine current stage
1143
+ cur_visual_length = [int(cur_image_token * self.llm_image_token_ratio_list[stage]) for cur_image_token in self.num_image_token_lens]
1144
+ next_visual_length = [int(cur_image_token * self.llm_image_token_ratio_list[stage + 1]) for cur_image_token in self.num_image_token_lens]
1145
+ new_position_ids = []
1146
+ for idx, cur_position_ids in enumerate(position_ids):
1147
+ cur_position_ids = cur_position_ids - (cur_visual_length[idx] - next_visual_length[idx])
1148
+ new_position_ids.append(cur_position_ids)
1149
+ assert idx == 0, idx
1150
+ position_ids = torch.tensor(new_position_ids, dtype=torch.long).unsqueeze(0)
1151
+
1152
+ #################
1153
+
1154
+ hidden_states = self.norm(hidden_states)
1155
+
1156
+ # add hidden states from the last decoder layer
1157
+ if output_hidden_states:
1158
+ all_hidden_states += (hidden_states,)
1159
+
1160
+ next_cache = None
1161
+ if use_cache:
1162
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1163
+
1164
+ if not return_dict:
1165
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None), labels
1166
+ return BaseModelOutputWithPast(
1167
+ last_hidden_state=hidden_states,
1168
+ past_key_values=next_cache,
1169
+ hidden_states=all_hidden_states,
1170
+ attentions=all_self_attns,
1171
+ ), labels
1172
+
1173
+
1174
+ # implementation of pdrop
1175
+ def video_level_compress(
1176
+ self, cur_num, rank_layer, features ,
1177
+ position_ids, attention_mask, labels
1178
+ ):
1179
+
1180
+ if self.llm_compress_type == 'uniform0_attention':
1181
+ if cur_num == 0:
1182
+ llm_compress_type = 'uniform'
1183
+ else:
1184
+ llm_compress_type = 'attention'
1185
+ else:
1186
+ llm_compress_type = self.llm_compress_type
1187
+
1188
+ _labels = labels
1189
+ _position_ids = position_ids
1190
+ _attention_mask = attention_mask
1191
+
1192
+ if position_ids is None:
1193
+ position_ids = torch.arange(0, features.shape[1], dtype=torch.long, device=features.device).unsqueeze(0)
1194
+
1195
+ if getattr(self.config, 'tokenizer_padding_side', 'right') == "right":
1196
+
1197
+ batch_size = features.shape[0]
1198
+ image_tokens = [int(cur_image_token * self.llm_image_token_ratio_list[cur_num]) for cur_image_token in self.num_image_token_lens]
1199
+ keep_length = [int(cur_image_token * self.llm_image_token_ratio_list[cur_num + 1]) for cur_image_token in self.num_image_token_lens]
1200
+
1201
+ features_list = []
1202
+ attention_mask_list = []
1203
+ labels_list = []
1204
+
1205
+ if attention_mask is None:
1206
+ attention_mask = torch.ones((batch_size,features.shape[1]), dtype=torch.bool, device=features.device)
1207
+ else:
1208
+ attention_mask = attention_mask.bool()
1209
+ if labels is None:
1210
+ labels = torch.full((batch_size,features.shape[1]), IGNORE_INDEX, device=features.device)
1211
+
1212
+
1213
+ if 'attention' in llm_compress_type:
1214
+ # obtain query_states and key_states to calculate attention map
1215
+ hidden_states= features.clone().detach()
1216
+
1217
+ self_attn = self.layers[rank_layer].self_attn
1218
+ hidden_states = self.layers[rank_layer].input_layernorm(hidden_states)
1219
+
1220
+ num_heads = self_attn.num_heads
1221
+ num_key_value_heads = self_attn.num_key_value_heads
1222
+ head_dim = self_attn.head_dim
1223
+
1224
+ bsz, q_len, _ = hidden_states.size()
1225
+
1226
+ query_states = self_attn.q_proj(hidden_states)
1227
+ key_states = self_attn.k_proj(hidden_states)
1228
+ value_states = self_attn.v_proj(hidden_states)
1229
+
1230
+ query_states = query_states.view(bsz, q_len, num_heads, head_dim).transpose(1, 2)
1231
+ key_states = key_states.view(bsz, q_len, num_key_value_heads, head_dim).transpose(1, 2)
1232
+ value_states = value_states.view(bsz, q_len, num_key_value_heads, head_dim).transpose(1, 2)
1233
+
1234
+ kv_seq_len = key_states.shape[-2]
1235
+ cos, sin = self_attn.rotary_emb(value_states, seq_len=kv_seq_len)
1236
+
1237
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
1238
+ key_states = repeat_kv(key_states, self_attn.num_key_value_groups)
1239
+
1240
+ # attention_mask
1241
+ eager_attention_mask = _prepare_4d_causal_attention_mask(
1242
+ attention_mask, (batch_size, q_len), hidden_states, past_key_values_length=0
1243
+ ).to(device=query_states.device)
1244
+
1245
+ # take valid features
1246
+ features = [cur_features[cur_attention_mask] for cur_features, cur_attention_mask in zip(features, attention_mask)]
1247
+ labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
1248
+ attention_mask = [cur_attention_mask[cur_attention_mask] for cur_attention_mask, cur_attention_mask in zip(attention_mask, attention_mask)]
1249
+
1250
+ # rank & drop
1251
+ for i in range(batch_size):
1252
+ image_index = self.first_image_token_position[i]
1253
+ if image_index == -1:
1254
+ cur_input_embeds = features[i]
1255
+ features_list.append(cur_input_embeds)
1256
+ attention_mask_list.append(attention_mask[i])
1257
+ labels_list.append(labels[i])
1258
+ continue
1259
+
1260
+ if 'attention' in llm_compress_type:
1261
+
1262
+ # obtain current states
1263
+ cur_key_states = key_states[i]
1264
+ cur_query_states = query_states[i]
1265
+ cur_eager_attention_mask = eager_attention_mask[i]
1266
+
1267
+ # choose last instruction token as query
1268
+ if self.training:
1269
+ answer_index = torch.where(labels[i] != -100)[0].tolist()
1270
+ index_before_answer = []
1271
+ for index in answer_index:
1272
+ if labels[i][index-1] == -100:
1273
+ index_before_answer.append(index-1)
1274
+ if index_before_answer == []:
1275
+ cur_input_embeds = features[i]
1276
+ features_list.append(cur_input_embeds)
1277
+ attention_mask_list.append(attention_mask[i])
1278
+ labels_list.append(labels[i])
1279
+ continue
1280
+
1281
+ index_before_answer=torch.tensor(index_before_answer,device=labels[0].device)
1282
+ text_query_states = cur_query_states[:,index_before_answer,:]
1283
+ text_eager_attention_mask = cur_eager_attention_mask[:,index_before_answer,:]
1284
+
1285
+ else:
1286
+ prompt_total_len = self.text_prompt_lens[i] + image_tokens[i]
1287
+ text_query_states = cur_query_states[:,prompt_total_len-1,:].unsqueeze(1)
1288
+ text_eager_attention_mask = cur_eager_attention_mask[:,prompt_total_len-1,:].unsqueeze(1)
1289
+
1290
+ # calculate attention map
1291
+ attn_weights = torch.matmul(text_query_states, cur_key_states.transpose(1, 2)) / math.sqrt(head_dim) #(num_head, text_token,seq_len)
1292
+ attn_weights = attn_weights + text_eager_attention_mask
1293
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) #(num_head, text_token,seq_len)
1294
+
1295
+ attention_avg_head = torch.mean(attn_weights, dim=0) # ave across heads
1296
+ attention_avg_head = attention_avg_head[:,image_index:image_index+image_tokens[i]] # select image token as keys
1297
+ attention_avg_text = torch.mean(attention_avg_head, dim=0) # (576)
1298
+
1299
+ if llm_compress_type == 'attention':
1300
+ top_rank_index = attention_avg_text.topk(keep_length[i]).indices
1301
+ else:
1302
+ raise NotImplementedError(llm_compress_type)
1303
+
1304
+ elif llm_compress_type == 'uniform':
1305
+ top_rank_index = torch.linspace(0, image_tokens[i]-1, keep_length[i], dtype=torch.long)
1306
+ else:
1307
+ raise NotImplementedError(llm_compress_type)
1308
+
1309
+ top_rank_index = top_rank_index + image_index
1310
+ top_rank_index= top_rank_index.sort().values
1311
+
1312
+ start_index = image_index + image_tokens[i]
1313
+ new_input_embeds = torch.cat([features[i][ :image_index, :] ,features[i][ top_rank_index, :], features[i][start_index:, :]], dim=0)
1314
+
1315
+ new_labels = torch.cat([labels[i][ :image_index],labels[i][ top_rank_index], labels[i][start_index:]], dim=0)
1316
+ new_attention_mask = torch.cat([attention_mask[i][:image_index], attention_mask[i][top_rank_index], attention_mask[i][start_index:]], dim=0)
1317
+
1318
+ features_list.append(new_input_embeds)
1319
+ attention_mask_list.append(new_attention_mask)
1320
+ labels_list.append(new_labels)
1321
+
1322
+ # Truncate sequences to max length as image embeddings can make the sequence longer
1323
+ tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None)
1324
+ if tokenizer_model_max_length is not None:
1325
+ new_input_embeds = [x[:tokenizer_model_max_length] for x in features_list]
1326
+ new_attention_mask = [x[:tokenizer_model_max_length] for x in attention_mask_list]
1327
+ new_labels = [x[:tokenizer_model_max_length] for x in labels_list]
1328
+
1329
+ max_len = max(x.shape[0] for x in new_input_embeds)
1330
+
1331
+ # padding the sequences to form batch
1332
+ embeds_padded=[]
1333
+ labels_paded=[]
1334
+ attention_mask_padded=[]
1335
+ position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
1336
+ for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
1337
+ cur_len_emb=cur_new_embed.shape[0]
1338
+ dif=max_len - cur_len_emb # padding to longest seq
1339
+
1340
+ cur_new_embed = torch.cat([cur_new_embed,torch.zeros((dif, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)],dim=0)
1341
+ cur_new_labels = torch.cat([cur_new_labels,torch.full((dif,),IGNORE_INDEX,dtype=cur_new_labels.dtype, device=cur_new_labels.device)],dim=0)
1342
+ cur_attention_mask = new_attention_mask[i]
1343
+ cur_attention_mask = torch.cat([cur_attention_mask,torch.full((dif,),False, dtype=cur_attention_mask.dtype, device=cur_attention_mask.device)],dim=0)
1344
+
1345
+ embeds_padded.append(cur_new_embed)
1346
+ labels_paded.append(cur_new_labels)
1347
+ attention_mask_padded.append(cur_attention_mask)
1348
+
1349
+ cur_len = new_attention_mask[i].sum().item()
1350
+ position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
1351
+
1352
+
1353
+ new_input_embeds = torch.stack(embeds_padded,dim=0)
1354
+ new_input_embeds = new_input_embeds.to(features[0].dtype)
1355
+
1356
+ new_attention_mask = torch.stack(attention_mask_padded,dim=0)
1357
+ new_labels = torch.stack(labels_paded,dim=0)
1358
+
1359
+ if _position_ids is None:
1360
+ position_ids = None
1361
+ if _labels is None:
1362
+ new_labels = None
1363
+
1364
+ if _attention_mask is None:
1365
+ new_attention_mask = None
1366
+ else:
1367
+ new_attention_mask = new_attention_mask.to(dtype=_attention_mask.dtype)
1368
+
1369
+ return position_ids, new_attention_mask, new_input_embeds, new_labels
1370
+
1371
+ else:
1372
+ raise ValueError(f"Unexpected tokenizer_padding_side: {self.config.tokenizer_padding_side}")
1373
+
1374
+
1375
+ class Qwen2ForCausalLM_Flash(Qwen2PreTrainedModel):
1376
+ _tied_weights_keys = ["lm_head.weight"]
1377
+
1378
+ def __init__(self, config):
1379
+ super().__init__(config)
1380
+ self.model = Qwen2Model_Flash(config)
1381
+ self.vocab_size = config.vocab_size
1382
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1383
+
1384
+ # Initialize weights and apply final processing
1385
+ self.post_init()
1386
+
1387
+ def get_input_embeddings(self):
1388
+ return self.model.embed_tokens
1389
+
1390
+ def set_input_embeddings(self, value):
1391
+ self.model.embed_tokens = value
1392
+
1393
+ def get_output_embeddings(self):
1394
+ return self.lm_head
1395
+
1396
+ def set_output_embeddings(self, new_embeddings):
1397
+ self.lm_head = new_embeddings
1398
+
1399
+ def set_decoder(self, decoder):
1400
+ self.model = decoder
1401
+
1402
+ def get_decoder(self):
1403
+ return self.model
1404
+
1405
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1406
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1407
+ def forward(
1408
+ self,
1409
+ input_ids: torch.LongTensor = None,
1410
+ attention_mask: Optional[torch.Tensor] = None,
1411
+ position_ids: Optional[torch.LongTensor] = None,
1412
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1413
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1414
+ labels: Optional[torch.LongTensor] = None,
1415
+ use_cache: Optional[bool] = None,
1416
+ output_attentions: Optional[bool] = None,
1417
+ output_hidden_states: Optional[bool] = None,
1418
+ return_dict: Optional[bool] = None,
1419
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1420
+ r"""
1421
+ Args:
1422
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1423
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1424
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1425
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1426
+
1427
+ Returns:
1428
+
1429
+ Example:
1430
+
1431
+ ```python
1432
+ >>> from transformers import AutoTokenizer, Qwen2ForCausalLM
1433
+
1434
+ >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1435
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1436
+
1437
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1438
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1439
+
1440
+ >>> # Generate
1441
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1442
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1443
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1444
+ ```"""
1445
+
1446
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1447
+ output_hidden_states = (
1448
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1449
+ )
1450
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1451
+
1452
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1453
+ outputs, labels = self.model(
1454
+ input_ids=input_ids,
1455
+ attention_mask=attention_mask,
1456
+ position_ids=position_ids,
1457
+ past_key_values=past_key_values,
1458
+ inputs_embeds=inputs_embeds,
1459
+ use_cache=use_cache,
1460
+ output_attentions=output_attentions,
1461
+ output_hidden_states=output_hidden_states,
1462
+ return_dict=return_dict,
1463
+ labels=labels
1464
+ )
1465
+
1466
+ hidden_states = outputs[0]
1467
+ logits = self.lm_head(hidden_states)
1468
+ logits = logits.float()
1469
+
1470
+ loss = None
1471
+ if labels is not None:
1472
+ # Shift so that tokens < n predict n
1473
+ shift_logits = logits[..., :-1, :].contiguous()
1474
+ shift_labels = labels[..., 1:].contiguous()
1475
+ # Flatten the tokens
1476
+ loss_fct = CrossEntropyLoss()
1477
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1478
+ shift_labels = shift_labels.view(-1)
1479
+ # Enable model parallelism
1480
+ shift_labels = shift_labels.to(shift_logits.device)
1481
+ loss = loss_fct(shift_logits, shift_labels)
1482
+
1483
+ if not return_dict:
1484
+ output = (logits,) + outputs[1:]
1485
+ return (loss,) + output if loss is not None else output
1486
+
1487
+ return CausalLMOutputWithPast(
1488
+ loss=loss,
1489
+ logits=logits,
1490
+ past_key_values=outputs.past_key_values,
1491
+ hidden_states=outputs.hidden_states,
1492
+ attentions=outputs.attentions,
1493
+ )
1494
+
1495
+ def prepare_inputs_for_generation(
1496
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1497
+ ):
1498
+ # Omit tokens covered by past_key_values
1499
+ if past_key_values is not None:
1500
+ if isinstance(past_key_values, Cache):
1501
+ cache_length = past_key_values.get_seq_length()
1502
+ # seen_tokens property has been removed in transformers 4.54.0
1503
+ past_length = getattr(past_key_values, 'seen_tokens', cache_length)
1504
+ # get_max_length() has been replaced by get_max_cache_shape() in transformers 4.49.0
1505
+ # in transformers 4.54.0, DynamicCache returns -1 instead of None to indicate no limit
1506
+ if hasattr(past_key_values, 'get_max_cache_shape'):
1507
+ max_cache_length = past_key_values.get_max_cache_shape()
1508
+ # Convert -1 to None for consistency with old behavior
1509
+ max_cache_length = None if max_cache_length == -1 else max_cache_length
1510
+ else:
1511
+ max_cache_length = past_key_values.get_max_length()
1512
+ else:
1513
+ cache_length = past_length = past_key_values[0][0].shape[2]
1514
+ max_cache_length = None
1515
+
1516
+ # Keep only the unprocessed tokens:
1517
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1518
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1519
+ # input)
1520
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1521
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1522
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1523
+ # input_ids based on the past_length.
1524
+ elif past_length < input_ids.shape[1]:
1525
+ input_ids = input_ids[:, past_length:]
1526
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1527
+
1528
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1529
+ if (
1530
+ max_cache_length is not None
1531
+ and attention_mask is not None
1532
+ and cache_length + input_ids.shape[1] > max_cache_length
1533
+ ):
1534
+ attention_mask = attention_mask[:, -max_cache_length:]
1535
+
1536
+ position_ids = kwargs.get("position_ids", None)
1537
+ if attention_mask is not None and position_ids is None:
1538
+ # create position_ids on the fly for batch generation
1539
+ position_ids = attention_mask.long().cumsum(-1) - 1
1540
+ position_ids.masked_fill_(attention_mask == 0, 1)
1541
+ if past_key_values:
1542
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1543
+
1544
+
1545
+ def is_cache_empty(past_key_values):
1546
+ if past_key_values is None or len(past_key_values) == 0:
1547
+ return True
1548
+ if hasattr(past_key_values, 'is_initialized'):
1549
+ return past_key_values.is_initialized == False
1550
+ if isinstance(past_key_values, Cache):
1551
+ for idx, layer in enumerate(past_key_values.layers):
1552
+ if past_key_values.get_seq_length(idx) > 0:
1553
+ return False
1554
+ return True
1555
+ return False
1556
+
1557
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1558
+ # in newer transformers versions, past_key_values can be an empty cache in the 1st generation step.
1559
+ if inputs_embeds is not None and is_cache_empty(past_key_values):
1560
+ model_inputs = {"inputs_embeds": inputs_embeds}
1561
+ else:
1562
+ model_inputs = {"input_ids": input_ids}
1563
+
1564
+ model_inputs.update(
1565
+ {
1566
+ "position_ids": position_ids,
1567
+ "past_key_values": past_key_values,
1568
+ "use_cache": kwargs.get("use_cache"),
1569
+ "attention_mask": attention_mask,
1570
+ }
1571
+ )
1572
+ return model_inputs
1573
+
1574
+ @staticmethod
1575
+ def _reorder_cache(past_key_values, beam_idx):
1576
+ reordered_past = ()
1577
+ for layer_past in past_key_values:
1578
+ reordered_past += (
1579
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1580
+ )
1581
+ return reordered_past
1582
+
1583
+
modeling_videochat_flash.py ADDED
@@ -0,0 +1,722 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ import re
17
+ import torch
18
+ import torch.nn as nn
19
+ import random
20
+ from typing import List, Optional, Tuple, Union, Dict
21
+
22
+ from transformers import AutoConfig, AutoModelForCausalLM
23
+ from transformers.modeling_outputs import CausalLMOutputWithPast
24
+ from transformers.generation.utils import GenerateOutput
25
+ from transformers import Qwen2Config
26
+
27
+ from .vision_tower_builder import build_vision_tower
28
+ from .mm_projector_builder import build_vision_projector
29
+
30
+ from .constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, DEFAULT_IMAGE_TOKEN
31
+ from .conversation import conv_templates, SeparatorStyle
32
+ from .mm_utils import tokenizer_image_token, KeywordsStoppingCriteria, get_anyres_image_grid_shape, load_video
33
+ from .modeling_qwen2_flash import Qwen2Model_Flash, Qwen2ForCausalLM_Flash
34
+
35
+
36
+ class LlavaMetaModel:
37
+
38
+ def __init__(self, config):
39
+ super(LlavaMetaModel, self).__init__(config)
40
+
41
+ if hasattr(config, "mm_vision_tower"):
42
+ delay_load = getattr(config, "delay_load", False)
43
+ self.vision_tower = build_vision_tower(config, delay_load=delay_load)
44
+ self.mm_projector = build_vision_projector(config, vision_cfg=self.vision_tower.config)
45
+
46
+ if "unpad" in getattr(config, "mm_patch_merge_type", ""):
47
+ self.image_newline = nn.Parameter(torch.empty(config.hidden_size, dtype=self.dtype))
48
+ if "nopad" in getattr(config, "mm_patch_merge_type", "") and getattr(self.config, "mm_newline_position", "nothing") != "nothing":
49
+ self.frame_newline = nn.Parameter(torch.empty(config.hidden_size, dtype=self.dtype))
50
+
51
+ def get_vision_tower(self):
52
+ vision_tower = getattr(self, "vision_tower", None)
53
+ if type(vision_tower) is list:
54
+ vision_tower = vision_tower[0]
55
+ return vision_tower
56
+
57
+ def initialize_vision_modules(self, model_args, fsdp=None):
58
+ vision_tower = model_args.vision_tower
59
+ mm_vision_select_layer = model_args.mm_vision_select_layer
60
+ mm_vision_select_feature = model_args.mm_vision_select_feature
61
+ pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
62
+ mm_patch_merge_type = model_args.mm_patch_merge_type
63
+
64
+ self.config.mm_vision_tower = vision_tower
65
+ self.config.vision_tower_pretrained = getattr(model_args, "vision_tower_pretrained", "")
66
+
67
+ if self.get_vision_tower() is None:
68
+ vision_tower = build_vision_tower(model_args)
69
+
70
+ if fsdp is not None and len(fsdp) > 0:
71
+ self.vision_tower = [vision_tower]
72
+ else:
73
+ self.vision_tower = vision_tower
74
+ else:
75
+ if fsdp is not None and len(fsdp) > 0:
76
+ vision_tower = self.vision_tower[0]
77
+ else:
78
+ vision_tower = self.vision_tower
79
+ vision_tower.load_model()
80
+
81
+
82
+
83
+ self.config.use_mm_proj = True
84
+ self.config.mm_projector_type = getattr(model_args, "mm_projector_type", "linear")
85
+ self.config.mm_vision_select_layer = mm_vision_select_layer
86
+ self.config.mm_vision_select_feature = mm_vision_select_feature
87
+ self.config.mm_patch_merge_type = mm_patch_merge_type
88
+
89
+ if getattr(self, "mm_projector", None) is None:
90
+ self.mm_projector = build_vision_projector(self.config, vision_cfg=vision_tower.config)
91
+
92
+ if "unpad" in mm_patch_merge_type:
93
+ embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
94
+ self.image_newline = nn.Parameter(torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std)
95
+ if "nopad" in getattr(self.config, "mm_patch_merge_type", "") and getattr(self.config, "mm_newline_position", "nothing") != "nothing":
96
+ embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
97
+ self.frame_newline = nn.Parameter(torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std)
98
+ else:
99
+ # In case it is frozen by LoRA
100
+ for p in self.mm_projector.parameters():
101
+ p.requires_grad = True
102
+
103
+ if pretrain_mm_mlp_adapter is not None:
104
+ mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location="cpu")
105
+
106
+ def get_w(weights, keyword):
107
+ return {k.split(keyword + ".")[1]: v for k, v in weights.items() if keyword in k}
108
+
109
+ if self.config.mm_projector_type =='lxh_qformer':
110
+ incompatible_keys = self.mm_projector.load_state_dict(get_w(mm_projector_weights, "mm_projector"), strict=False)
111
+ else:
112
+ incompatible_keys = self.mm_projector.load_state_dict(get_w(mm_projector_weights, "mm_projector"))
113
+ print(f"Loaded mm projector weights from {pretrain_mm_mlp_adapter}. Incompatible keys: {incompatible_keys}")
114
+
115
+
116
+ class LlavaMetaForCausalLM(ABC):
117
+
118
+ @abstractmethod
119
+ def get_model(self):
120
+ pass
121
+
122
+ def get_vision_tower(self):
123
+ return self.get_model().get_vision_tower()
124
+
125
+
126
+ def encode_video_image(self, images_list, video_idx_in_batch):
127
+ # video encoder编码后按图像的connector处理
128
+ bs = len(images_list)
129
+
130
+ concat_images = []
131
+ concat_videos = []
132
+ for idx, image in enumerate(images_list):
133
+ if idx in video_idx_in_batch:
134
+ concat_videos.append(image)
135
+ else:
136
+ concat_images.append(image)
137
+ # print(concat_videos[0].shape)
138
+ has_image = len(concat_images) > 0
139
+ has_video = len(concat_videos) > 0
140
+
141
+ mm_local_num_frames = getattr(self.config, "mm_local_num_frames", -1)
142
+ assert mm_local_num_frames != -1
143
+ if has_image:
144
+ image_split_sizes = [image.shape[0] for image in concat_images]
145
+ concat_images = torch.cat([image.unsqueeze(1) for image in concat_images], dim=0)
146
+ # print("input vit image.shape:", concat_images.shape)
147
+ images_features = self.get_model().get_vision_tower()(concat_images) # B_i, N, D
148
+ images_features = torch.split(images_features, image_split_sizes)
149
+
150
+ if has_video:
151
+ video_split_sizes = [video.shape[0] // mm_local_num_frames for video in concat_videos]
152
+ concat_videos = torch.cat([video.reshape(video.shape[0] // mm_local_num_frames, mm_local_num_frames, video.shape[1], video.shape[2], video.shape[3]) for video in concat_videos], dim=0)
153
+ # print("input vit video.shape:", concat_videos.shape)
154
+ videos_features = self.get_model().get_vision_tower()(concat_videos) # B_v, N, D
155
+ videos_features = [v.reshape(-1, v.shape[-2] // mm_local_num_frames, v.shape[-1]) for v in torch.split(videos_features, video_split_sizes)]
156
+
157
+
158
+ all_videos_or_images_features = []
159
+ img_idx = 0
160
+ vid_idx = 0
161
+
162
+ for idx in range(bs):
163
+
164
+ if idx in video_idx_in_batch:
165
+ feat = self.get_model().mm_projector(videos_features[vid_idx], compress=True, local_num_frames=getattr(self.config, "mm_local_num_frames", -1))
166
+
167
+ vid_idx += 1
168
+ else:
169
+ feat = self.get_model().mm_projector(images_features[img_idx], compress=False)
170
+ img_idx += 1
171
+ # print("video_idx_in_batch:", video_idx_in_batch)
172
+ all_videos_or_images_features.append(feat)
173
+
174
+ if has_video:
175
+ assert vid_idx == len(videos_features), f"vid: {vid_idx} != {len(videos_features)}"
176
+ if has_image:
177
+ assert img_idx == len(images_features), f"img: {img_idx} != {len(images_features)}"
178
+
179
+ return all_videos_or_images_features
180
+
181
+
182
+
183
+ def prepare_inputs_labels_for_multimodal(self, input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities=["image"], image_sizes=None):
184
+ assert type(modalities) is list, modalities
185
+
186
+ vision_tower = self.get_vision_tower()
187
+ # rank_print(modalities)
188
+ if vision_tower is None or images is None or input_ids.shape[1] == 1:
189
+ return input_ids, position_ids, attention_mask, past_key_values, None, labels
190
+
191
+ if type(images) is list or images.ndim == 5:
192
+ if type(images) is list:
193
+ images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images]
194
+
195
+ video_idx_in_batch = []
196
+ for _ in range(len(modalities)):
197
+ if modalities[_] == "video":
198
+ video_idx_in_batch.append(_)
199
+
200
+ images_list = []
201
+ for image in images:
202
+ if image.ndim == 4:
203
+ images_list.append(image)
204
+ else:
205
+ images_list.append(image.unsqueeze(0))
206
+
207
+
208
+ vision_encode_type = getattr(self.config, "vision_encode_type", "image")
209
+ mm_patch_merge_type = getattr(self.config, "mm_patch_merge_type", "flat")
210
+ image_aspect_ratio = getattr(self.config, "image_aspect_ratio", "square")
211
+ frame_aspect_ratio = getattr(self.config, "frame_aspect_ratio", "square")
212
+ mm_newline_position = getattr(self.config, "mm_newline_position", "nothing")
213
+
214
+
215
+ if vision_encode_type == "video_image": # video backbone, process video with compress
216
+ image_features = self.encode_video_image(images_list, video_idx_in_batch=video_idx_in_batch)
217
+ else:
218
+ raise NotImplementedError(vision_encode_type)
219
+
220
+
221
+ if mm_patch_merge_type == "flat":
222
+ image_features = [x.flatten(0, 1) for x in image_features]
223
+ elif mm_patch_merge_type.startswith("spatial"):
224
+ new_image_features = []
225
+ for image_idx, image_feature in enumerate(image_features):
226
+
227
+ if image_idx in video_idx_in_batch: # video operations
228
+
229
+ if "anyres" in frame_aspect_ratio:
230
+ raise NotImplementedError
231
+ else:
232
+ frame_feature = image_feature
233
+
234
+ if "pad" in mm_patch_merge_type:
235
+ if mm_newline_position == 'one_token':
236
+ frame_feature = frame_feature.flatten(0, 1)
237
+ if "unpad" in mm_patch_merge_type:
238
+ frame_feature = torch.cat((frame_feature, self.model.image_newline[None].to(frame_feature.device)), dim=0)
239
+ else:
240
+ frame_feature = torch.cat((frame_feature, self.model.frame_newline[None].to(frame_feature.device)), dim=0)
241
+ elif mm_newline_position == 'nothing':
242
+ frame_feature = frame_feature.flatten(0, 1)
243
+ else:
244
+ raise NotImplementedError("add pad please!!")
245
+ else:
246
+ frame_feature = frame_feature.flatten(0, 1)
247
+
248
+ # print(f"final video frame_feature.shape: {frame_feature.shape}")
249
+ image_feature = frame_feature
250
+
251
+ elif image_feature.shape[0] > 1: # multi patches and multi images operations
252
+ base_image_feature = image_feature[0]
253
+ image_feature = image_feature[1:]
254
+ origin_size = image_feature.shape
255
+
256
+ height = width = self.get_model().mm_projector.num_image_patches_per_side
257
+ assert height * width == base_image_feature.shape[0], f"height:{height}, width: {width}, base_image_feature: {base_image_feature.shape}"
258
+
259
+ if "anyres_max" in image_aspect_ratio:
260
+ matched_anyres_max_num_patches = re.match(r"anyres_max_(\d+)", image_aspect_ratio)
261
+ if matched_anyres_max_num_patches:
262
+ max_num_patches = int(matched_anyres_max_num_patches.group(1))
263
+
264
+ if "anyres" in image_aspect_ratio:
265
+ if hasattr(self.get_vision_tower(), "image_size"):
266
+ vision_tower_image_size = self.get_vision_tower().image_size
267
+ else:
268
+ raise ValueError("vision_tower_image_size is not found in the vision tower.")
269
+ try:
270
+ num_patch_width, num_patch_height = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, vision_tower_image_size, max_resolutions=None)
271
+ except Exception as e:
272
+ print(f"Error: {e}")
273
+ raise e
274
+ # num_patch_width, num_patch_height = 2, 2
275
+
276
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
277
+ else:
278
+ raise NotImplementedError(image_aspect_ratio)
279
+ image_feature = image_feature.view(2, 2, height, width, -1)
280
+
281
+ if "maxpool2x2" in mm_patch_merge_type:
282
+ raise NotImplementedError
283
+ elif "unpad" in mm_patch_merge_type and "anyres_max" in image_aspect_ratio and matched_anyres_max_num_patches:
284
+ raise NotImplementedError
285
+ elif "unpad" in mm_patch_merge_type:
286
+ raise NotImplementedError
287
+ else:
288
+ image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
289
+ image_feature = image_feature.flatten(0, 3)
290
+ if "nobase" in mm_patch_merge_type:
291
+ pass
292
+ else:
293
+ try:
294
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
295
+ except Exception as e:
296
+ raise ValueError(f"{num_patch_width} {num_patch_height} now: base_image_feature: {base_image_feature.shape}, {image_feature.shape}, image_sizes[image_idx]: {image_sizes[image_idx]}, origin_size: {origin_size}, {image_sizes[image_idx]}, {self.config.image_grid_pinpoints}, {vision_tower_image_size}")
297
+ else: # single image operations
298
+ image_feature = image_feature[0]
299
+ if "unpad" in mm_patch_merge_type:
300
+ image_feature = torch.cat((image_feature, self.model.image_newline[None]), dim=0)
301
+
302
+ # print(f"image/video_feature.shape: {image_feature.shape}")
303
+ new_image_features.append(image_feature)
304
+ image_features = new_image_features
305
+ else:
306
+ raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}")
307
+ else:
308
+ # raise NotImplementedError(f"images.shape={images.shape}, modalities={modalities}")
309
+ image_features = self.encode_image(images)
310
+
311
+ # TODO: image start / end is not implemented here to support pretraining.
312
+ if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(self.config, "mm_use_im_start_end", False):
313
+ raise NotImplementedError
314
+ # print(f"Total images len(image_features: {len(image_features)}")
315
+
316
+ # Let's just add dummy tensors if they do not exist,
317
+ # it is a headache to deal with None all the time.
318
+ # But it is not ideal, and if you have a better idea,
319
+ # please open an issue / submit a PR, thanks.
320
+ _labels = labels
321
+ _position_ids = position_ids
322
+ _attention_mask = attention_mask
323
+ if attention_mask is None:
324
+ attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
325
+ else:
326
+ attention_mask = attention_mask.bool()
327
+ if position_ids is None:
328
+ position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
329
+ if labels is None:
330
+ labels = torch.full_like(input_ids, IGNORE_INDEX)
331
+
332
+
333
+ input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
334
+ labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
335
+
336
+ new_input_embeds = []
337
+ new_labels = []
338
+ cur_image_idx = 0
339
+
340
+ mm_llm_compress = getattr(self.config, "mm_llm_compress", False)
341
+
342
+ if mm_llm_compress:
343
+ self.model.llm_compress_type = getattr(self.config, "llm_compress_type", "attention")
344
+ self.model.llm_compress_layer_list = getattr(self.config, "llm_compress_layer_list", [8, 16, 24])
345
+ self.model.llm_image_token_ratio_list = getattr(self.config, "llm_image_token_ratio_list", [1.0, 0.5, 0.25, 0.125])
346
+ first_image_token_position = []
347
+ text_prompt_lens = []
348
+ else:
349
+ self.model.llm_compress_type = "attention"
350
+ self.model.llm_compress_layer_list = []
351
+ self.model.llm_image_token_ratio_list = []
352
+ first_image_token_position = []
353
+ text_prompt_lens = []
354
+
355
+ # rank_print("Inserting Images embedding")
356
+ for batch_idx, cur_input_ids in enumerate(input_ids):
357
+ num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
358
+
359
+ if mm_llm_compress:
360
+ ####### copy from pdrop, only support single image/video NOTE ##################
361
+ # record image position for further dropping
362
+ image_index = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist()
363
+ assert len(image_index) == 1, f"Only support singe/video: {image_index}"
364
+ if image_index == []:
365
+ first_image_token_position.append(-1)
366
+ else:
367
+ first_image_token_position.append(image_index[0])
368
+
369
+
370
+ # record input instruction length in inference mode
371
+ if not self.training:
372
+ if image_index == []:
373
+ assert num_images == 0, num_images
374
+ else:
375
+ assert num_images == 1, f"num_images={num_images}"
376
+ text_prompt_lens.append(cur_input_ids.shape[0] - num_images) # consider image place holder
377
+
378
+ ###############################################
379
+
380
+ # print(f"num_images={num_images}")
381
+ if num_images == 0:
382
+ cur_image_features = image_features[cur_image_idx]
383
+ cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
384
+ cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
385
+ new_input_embeds.append(cur_input_embeds)
386
+ new_labels.append(labels[batch_idx])
387
+ cur_image_idx += 1
388
+ continue
389
+
390
+ image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
391
+ cur_input_ids_noim = []
392
+ cur_labels = labels[batch_idx]
393
+ cur_labels_noim = []
394
+ for i in range(len(image_token_indices) - 1):
395
+ cur_input_ids_noim.append(cur_input_ids[image_token_indices[i] + 1 : image_token_indices[i + 1]])
396
+ cur_labels_noim.append(cur_labels[image_token_indices[i] + 1 : image_token_indices[i + 1]])
397
+ split_sizes = [x.shape[0] for x in cur_labels_noim]
398
+ cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
399
+ cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
400
+ cur_new_input_embeds = []
401
+ cur_new_labels = []
402
+
403
+ for i in range(num_images + 1):
404
+ cur_new_input_embeds.append(cur_input_embeds_no_im[i])
405
+ cur_new_labels.append(cur_labels_noim[i])
406
+ if i < num_images:
407
+ try:
408
+ cur_image_features = image_features[cur_image_idx]
409
+ except IndexError:
410
+ print(f"cur_image_idx={cur_image_idx} is not ok")
411
+ cur_image_features = image_features[cur_image_idx - 1]
412
+ cur_image_idx += 1
413
+ cur_new_input_embeds.append(cur_image_features)
414
+ cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
415
+
416
+ cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
417
+
418
+ # import pdb; pdb.set_trace()
419
+ cur_new_input_embeds = torch.cat(cur_new_input_embeds)
420
+ cur_new_labels = torch.cat(cur_new_labels)
421
+
422
+ new_input_embeds.append(cur_new_input_embeds)
423
+ new_labels.append(cur_new_labels)
424
+
425
+
426
+ if mm_llm_compress:
427
+ self.model.first_image_token_position = first_image_token_position
428
+ self.model.text_prompt_lens = text_prompt_lens
429
+ self.model.num_image_token_lens = [image_feature.shape[0] for image_feature in image_features]
430
+
431
+ # Truncate sequences to max length as image embeddings can make the sequence longer
432
+ tokenizer_model_max_length = getattr(self.config, "tokenizer_model_max_length", None)
433
+ # rank_print("Finishing Inserting")
434
+
435
+ new_input_embeds = [x[:tokenizer_model_max_length] for x, modality in zip(new_input_embeds, modalities)]
436
+ new_labels = [x[:tokenizer_model_max_length] for x, modality in zip(new_labels, modalities)]
437
+
438
+ # Combine them
439
+ max_len = max(x.shape[0] for x in new_input_embeds)
440
+ batch_size = len(new_input_embeds)
441
+
442
+ new_input_embeds_padded = []
443
+ new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
444
+ attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
445
+ position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
446
+ # print("Prepare pos id")
447
+
448
+ for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
449
+ cur_len = cur_new_embed.shape[0]
450
+ if getattr(self.config, "tokenizer_padding_side", "right") == "left":
451
+ new_input_embeds_padded.append(torch.cat((torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), cur_new_embed), dim=0))
452
+ if cur_len > 0:
453
+ new_labels_padded[i, -cur_len:] = cur_new_labels
454
+ attention_mask[i, -cur_len:] = True
455
+ position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
456
+ else:
457
+ new_input_embeds_padded.append(torch.cat((cur_new_embed, torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0))
458
+ if cur_len > 0:
459
+ new_labels_padded[i, :cur_len] = cur_new_labels
460
+ attention_mask[i, :cur_len] = True
461
+ position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
462
+
463
+ new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
464
+ # print("tokenizer padding")
465
+
466
+ if _labels is None:
467
+ new_labels = None
468
+ else:
469
+ new_labels = new_labels_padded
470
+
471
+ if _attention_mask is None:
472
+ attention_mask = None
473
+ else:
474
+ attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
475
+
476
+ if _position_ids is None:
477
+ position_ids = None
478
+ if getattr(self.config, "use_pos_skipping", False) and self.training:
479
+ position_ids = torch.arange(new_input_embeds.size(1), device=new_input_embeds.device).unsqueeze(0).to(new_input_embeds.device)
480
+ split_position = random.randint(0, new_input_embeds.size(1))
481
+ left_add = random.randint(0, self.config.pos_skipping_range)
482
+ right_add = random.randint(left_add, self.config.pos_skipping_range)
483
+ position_ids[:, :split_position] += left_add
484
+ position_ids[:, split_position:] += right_add
485
+ # import pdb; pdb.set_trace()
486
+ # print("Finish preparing")
487
+ return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
488
+
489
+ def initialize_vision_tokenizer(self, model_args, tokenizer):
490
+ if model_args.mm_use_im_patch_token:
491
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
492
+ self.resize_token_embeddings(len(tokenizer))
493
+
494
+ if model_args.mm_use_im_start_end:
495
+ num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
496
+ self.resize_token_embeddings(len(tokenizer))
497
+
498
+ if num_new_tokens > 0:
499
+ input_embeddings = self.get_input_embeddings().weight.data
500
+ output_embeddings = self.get_output_embeddings().weight.data
501
+
502
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
503
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
504
+
505
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
506
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
507
+
508
+ if model_args.tune_mm_mlp_adapter:
509
+ for p in self.get_input_embeddings().parameters():
510
+ p.requires_grad = True
511
+ for p in self.get_output_embeddings().parameters():
512
+ p.requires_grad = False
513
+
514
+ if model_args.pretrain_mm_mlp_adapter:
515
+ mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location="cpu")
516
+ embed_tokens_weight = mm_projector_weights["model.embed_tokens.weight"]
517
+ assert num_new_tokens == 2
518
+ if input_embeddings.shape == embed_tokens_weight.shape:
519
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
520
+ elif embed_tokens_weight.shape[0] == num_new_tokens:
521
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight
522
+ else:
523
+ raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
524
+ elif model_args.mm_use_im_patch_token:
525
+ if model_args.tune_mm_mlp_adapter:
526
+ for p in self.get_input_embeddings().parameters():
527
+ p.requires_grad = False
528
+ for p in self.get_output_embeddings().parameters():
529
+ p.requires_grad = False
530
+
531
+
532
+
533
+ class VideoChatFlashQwenConfig(Qwen2Config):
534
+ model_type = "videochat_flash_qwen"
535
+
536
+
537
+ class VideoChatFlashQwenModel(LlavaMetaModel, Qwen2Model_Flash):
538
+ config_class = VideoChatFlashQwenConfig
539
+
540
+ def __init__(self, config: VideoChatFlashQwenConfig):
541
+ super(VideoChatFlashQwenModel, self).__init__(config)
542
+
543
+
544
+ class VideoChatFlashQwenForCausalLM(LlavaMetaForCausalLM, Qwen2ForCausalLM_Flash):
545
+ config_class = VideoChatFlashQwenConfig
546
+
547
+ def __init__(self, config):
548
+ # super(Qwen2ForCausalLM, self).__init__(config)
549
+ Qwen2ForCausalLM_Flash.__init__(self, config)
550
+ config.model_type = "videochat_flash_qwen"
551
+ # config.rope_scaling = None
552
+
553
+ self.model = VideoChatFlashQwenModel(config)
554
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
555
+ # Initialize weights and apply final processing
556
+ self.post_init()
557
+
558
+ def get_model(self):
559
+ return self.model
560
+
561
+ def forward(
562
+ self,
563
+ input_ids: torch.LongTensor = None,
564
+ attention_mask: Optional[torch.Tensor] = None,
565
+ position_ids: Optional[torch.LongTensor] = None,
566
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
567
+ inputs_embeds: Optional[torch.FloatTensor] = None,
568
+ labels: Optional[torch.LongTensor] = None,
569
+ use_cache: Optional[bool] = None,
570
+ output_attentions: Optional[bool] = None,
571
+ output_hidden_states: Optional[bool] = None,
572
+ images: Optional[torch.FloatTensor] = None,
573
+ image_sizes: Optional[List[List[int]]] = None,
574
+ return_dict: Optional[bool] = None,
575
+ modalities: Optional[List[str]] = ["image"],
576
+ dpo_forward: Optional[bool] = False,
577
+ cache_position=None,
578
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
579
+
580
+ if inputs_embeds is None:
581
+ (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes)
582
+
583
+ # print("inputs_embeds.shape:", inputs_embeds.shape)
584
+ if dpo_forward:
585
+ raise NotImplementedError
586
+ else:
587
+ return super().forward(
588
+ input_ids=input_ids,
589
+ attention_mask=attention_mask,
590
+ position_ids=position_ids,
591
+ past_key_values=past_key_values,
592
+ inputs_embeds=inputs_embeds,
593
+ labels=labels,
594
+ use_cache=use_cache,
595
+ output_attentions=output_attentions,
596
+ output_hidden_states=output_hidden_states,
597
+ return_dict=return_dict,
598
+ )
599
+
600
+ @torch.no_grad()
601
+ def generate(
602
+ self,
603
+ inputs: Optional[torch.Tensor] = None,
604
+ images: Optional[torch.Tensor] = None,
605
+ image_sizes: Optional[torch.Tensor] = None,
606
+ modalities: Optional[List[str]] = ["image"],
607
+ **kwargs,
608
+ ) -> Union[GenerateOutput, torch.LongTensor]:
609
+ position_ids = kwargs.pop("position_ids", None)
610
+ attention_mask = kwargs.pop("attention_mask", None)
611
+ if "inputs_embeds" in kwargs:
612
+ raise NotImplementedError("`inputs_embeds` is not supported")
613
+
614
+ if images is not None:
615
+ (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes)
616
+ else:
617
+ self.model.image_token_posi = [-1]
618
+ self.model.prompt_len = None
619
+ self.model.image_tokens = [0]
620
+ inputs_embeds = self.get_model().embed_tokens(inputs)
621
+
622
+ return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
623
+
624
+ @torch.no_grad()
625
+ def chat(self,
626
+ video_path,
627
+ tokenizer,
628
+ user_prompt,
629
+ chat_history=None,
630
+ return_history=True,
631
+ max_num_frames=512,
632
+ media_dict=None,
633
+ generation_config={}):
634
+
635
+ frames, time_msg = load_video(video_path, max_num_frames=max_num_frames, media_dict=media_dict)
636
+
637
+ image_sizes = [frames[0].shape[:2]]
638
+
639
+ if torch.cuda.is_available():
640
+ frames = [self.get_vision_tower().image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].to(self.model.dtype).cuda()]
641
+ else:
642
+ frames = [self.get_vision_tower().image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].to(self.model.dtype)]
643
+
644
+ conv = conv_templates["qwen_2"].copy()
645
+
646
+ if chat_history is None or len(chat_history) == 0:
647
+ user_prompt = f'{DEFAULT_IMAGE_TOKEN}\n{time_msg.strip()} {user_prompt}'
648
+ else:
649
+ assert DEFAULT_IMAGE_TOKEN in chat_history[0]['content'], chat_history
650
+ for msg in chat_history:
651
+ conv.append_message(msg['role'], msg['content'])
652
+
653
+ conv.append_message(conv.roles[0], user_prompt)
654
+ conv.append_message(conv.roles[1], None)
655
+
656
+ prompt = conv.get_prompt()
657
+
658
+ if torch.cuda.is_available():
659
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda()
660
+ else:
661
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0)
662
+
663
+ if tokenizer.pad_token_id is None:
664
+ if "qwen" in tokenizer.name_or_path.lower():
665
+ print("Setting pad token to bos token for qwen model.")
666
+ tokenizer.pad_token_id = 151643
667
+
668
+ if torch.cuda.is_available():
669
+ attention_masks = input_ids.ne(tokenizer.pad_token_id).long().cuda()
670
+ else:
671
+ attention_masks = input_ids.ne(tokenizer.pad_token_id).long()
672
+
673
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
674
+ keywords = [stop_str]
675
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
676
+
677
+ with torch.inference_mode():
678
+ output_ids = self.generate(
679
+ inputs=input_ids,
680
+ images=frames,
681
+ attention_mask=attention_masks,
682
+ modalities=["video"],
683
+ image_sizes=image_sizes,
684
+ use_cache=True,
685
+ stopping_criteria=[stopping_criteria],
686
+ **generation_config
687
+ )
688
+
689
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
690
+ if outputs.endswith(stop_str):
691
+ outputs = outputs[: -len(stop_str)]
692
+
693
+ outputs = outputs.strip()
694
+
695
+ # print(f"\033[91m== Question: \033[0m\n{prompt}\n")
696
+ # print(f"\033[91m== Response: \033[0m\n{outputs}\n")
697
+
698
+ if chat_history is None:
699
+ chat_history = []
700
+
701
+ chat_history.append({"role":conv.roles[0], "content":user_prompt})
702
+ chat_history.append({"role":conv.roles[1], "content":outputs})
703
+ if return_history:
704
+ return outputs, chat_history
705
+ else:
706
+ return outputs
707
+
708
+
709
+
710
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
711
+ images = kwargs.pop("images", None)
712
+ image_sizes = kwargs.pop("image_sizes", None)
713
+ inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
714
+ if images is not None:
715
+ inputs["images"] = images
716
+ if image_sizes is not None:
717
+ inputs["image_sizes"] = image_sizes
718
+ return inputs
719
+
720
+
721
+ AutoConfig.register("videochat_flash_qwen", VideoChatFlashQwenConfig)
722
+ AutoModelForCausalLM.register(VideoChatFlashQwenConfig, VideoChatFlashQwenForCausalLM)
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|im_end|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|endoftext|>",
204
+ "padding_side": "right",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
vision_tower_builder.py ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import torch.utils.checkpoint as checkpoint
6
+ from functools import partial
7
+ from einops import rearrange
8
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
9
+ from typing import Optional, Tuple, Union, Dict
10
+ from functools import partial, reduce
11
+ from PIL import Image
12
+ from torch import nn
13
+ from transformers.image_processing_utils import BatchFeature, get_size_dict
14
+ from transformers.image_transforms import (
15
+ convert_to_rgb,
16
+ normalize,
17
+ rescale,
18
+ resize,
19
+ to_channel_dimension_format,
20
+ )
21
+ from transformers.image_utils import (
22
+ ChannelDimension,
23
+ PILImageResampling,
24
+ to_numpy_array,
25
+ )
26
+
27
+ try:
28
+ from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func
29
+ from flash_attn.bert_padding import unpad_input, pad_input
30
+ except:
31
+ pass
32
+
33
+ class FlashAttention(nn.Module):
34
+ """Implement the scaled dot product attention with softmax.
35
+ Arguments
36
+ ---------
37
+ softmax_scale: The temperature to use for the softmax attention.
38
+ (default: 1/sqrt(d_keys) where d_keys is computed at
39
+ runtime)
40
+ attention_dropout: The dropout rate to apply to the attention
41
+ (default: 0.0)
42
+ """
43
+
44
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
45
+ super().__init__()
46
+ self.softmax_scale = softmax_scale
47
+ self.dropout_p = attention_dropout
48
+
49
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
50
+ max_s=None, need_weights=False):
51
+ """Implements the multihead softmax attention.
52
+ Arguments
53
+ ---------
54
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
55
+ if unpadded: (nnz, 3, h, d)
56
+ key_padding_mask: a bool tensor of shape (B, S)
57
+ """
58
+ assert not need_weights
59
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
60
+ assert qkv.is_cuda
61
+
62
+ if cu_seqlens is None:
63
+ batch_size = qkv.shape[0]
64
+ seqlen = qkv.shape[1]
65
+ if key_padding_mask is None:
66
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
67
+ max_s = seqlen
68
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
69
+ device=qkv.device)
70
+ output = flash_attn_varlen_qkvpacked_func(
71
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
72
+ softmax_scale=self.softmax_scale, causal=causal
73
+ )
74
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
75
+ else:
76
+ nheads = qkv.shape[-2]
77
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
78
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
79
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
80
+ output_unpad = flash_attn_varlen_qkvpacked_func(
81
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
82
+ softmax_scale=self.softmax_scale, causal=causal
83
+ )
84
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
85
+ indices, batch_size, seqlen),
86
+ 'b s (h d) -> b s h d', h=nheads)
87
+ else:
88
+ assert max_s is not None
89
+ output = flash_attn_varlen_qkvpacked_func(
90
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
91
+ softmax_scale=self.softmax_scale, causal=causal
92
+ )
93
+
94
+ return output, None
95
+
96
+
97
+
98
+
99
+
100
+ # --------------------------------------------------------
101
+ # 2D sine-cosine position embedding
102
+ # References:
103
+ # Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
104
+ # MoCo v3: https://github.com/facebookresearch/moco-v3
105
+ # --------------------------------------------------------
106
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
107
+ """
108
+ grid_size: int of the grid height and width
109
+ return:
110
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
111
+ """
112
+ grid_h = np.arange(grid_size, dtype=np.float32)
113
+ grid_w = np.arange(grid_size, dtype=np.float32)
114
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
115
+ grid = np.stack(grid, axis=0)
116
+
117
+ grid = grid.reshape([2, 1, grid_size, grid_size])
118
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
119
+ if cls_token:
120
+ pos_embed = np.concatenate(
121
+ [np.zeros([1, embed_dim]), pos_embed], axis=0
122
+ )
123
+ return pos_embed
124
+
125
+
126
+ def get_1d_sincos_pos_embed(embed_dim, t_size, cls_token=False):
127
+ """
128
+ t_size: int of the temporal size
129
+ return:
130
+ pos_embed: [t_size, embed_dim] or [1+t_size, embed_dim] (w/ or w/o cls_token)
131
+ """
132
+ grid_t = np.arange(t_size, dtype=np.float32)
133
+ pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid_t)
134
+ if cls_token:
135
+ pos_embed = np.concatenate(
136
+ [np.zeros([1, embed_dim]), pos_embed], axis=0
137
+ )
138
+ return pos_embed
139
+
140
+
141
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
142
+ assert embed_dim % 2 == 0
143
+
144
+ # use half of dimensions to encode grid_h
145
+ emb_h = get_1d_sincos_pos_embed_from_grid(
146
+ embed_dim // 2, grid[0]
147
+ ) # (H*W, D/2)
148
+ emb_w = get_1d_sincos_pos_embed_from_grid(
149
+ embed_dim // 2, grid[1]
150
+ ) # (H*W, D/2)
151
+
152
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
153
+ return emb
154
+
155
+
156
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
157
+ """
158
+ embed_dim: output dimension for each position
159
+ pos: a list of positions to be encoded: size (M,)
160
+ out: (M, D)
161
+ """
162
+ assert embed_dim % 2 == 0
163
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
164
+ omega /= embed_dim / 2.0
165
+ omega = 1.0 / 10000**omega # (D/2,)
166
+
167
+ pos = pos.reshape(-1) # (M,)
168
+ out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
169
+
170
+ emb_sin = np.sin(out) # (M, D/2)
171
+ emb_cos = np.cos(out) # (M, D/2)
172
+
173
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
174
+ return emb
175
+
176
+ # --------------------------------------------------------
177
+ # 3D sine-cosine position embedding
178
+ # References:
179
+ # MVD: https://github.com/ruiwang2021/mvd/blob/main/modeling_finetune.py
180
+ # --------------------------------------------------------
181
+ def get_3d_sincos_pos_embed(embed_dim, grid_size, t_size, cls_token=False):
182
+ """
183
+ grid_size: int of the grid height and width
184
+ t_size: int of the temporal size
185
+ return:
186
+ pos_embed: [t_size*grid_size*grid_size, embed_dim] or [1+t_size*grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
187
+ """
188
+ assert embed_dim % 4 == 0
189
+ embed_dim_spatial = embed_dim // 4 * 3
190
+ embed_dim_temporal = embed_dim // 4
191
+
192
+ # spatial
193
+ grid_h = np.arange(grid_size, dtype=np.float32)
194
+ grid_w = np.arange(grid_size, dtype=np.float32)
195
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
196
+ grid = np.stack(grid, axis=0)
197
+
198
+ grid = grid.reshape([2, 1, grid_size, grid_size])
199
+ pos_embed_spatial = get_2d_sincos_pos_embed_from_grid(
200
+ embed_dim_spatial, grid
201
+ )
202
+
203
+ # temporal
204
+ grid_t = np.arange(t_size, dtype=np.float32)
205
+ pos_embed_temporal = get_1d_sincos_pos_embed_from_grid(
206
+ embed_dim_temporal, grid_t
207
+ )
208
+
209
+ # concate: [T, H, W] order
210
+ pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :]
211
+ pos_embed_temporal = np.repeat(
212
+ pos_embed_temporal, grid_size**2, axis=1
213
+ ) # [T, H*W, D // 4]
214
+ pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :]
215
+ pos_embed_spatial = np.repeat(
216
+ pos_embed_spatial, t_size, axis=0
217
+ ) # [T, H*W, D // 4 * 3]
218
+
219
+ pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1)
220
+ pos_embed = pos_embed.reshape([-1, embed_dim]) # [T*H*W, D]
221
+
222
+ if cls_token:
223
+ pos_embed = np.concatenate(
224
+ [np.zeros([1, embed_dim]), pos_embed], axis=0
225
+ )
226
+ return pos_embed
227
+
228
+
229
+
230
+ class RMSNorm(nn.Module):
231
+ def __init__(self, hidden_size, eps=1e-6):
232
+ super().__init__()
233
+ self.weight = nn.Parameter(torch.ones(hidden_size))
234
+ self.variance_epsilon = eps
235
+
236
+ def forward(self, hidden_states):
237
+ input_dtype = hidden_states.dtype
238
+ hidden_states = hidden_states.to(torch.float32)
239
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
240
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
241
+ return self.weight * hidden_states.to(input_dtype)
242
+
243
+
244
+ class LayerScale(nn.Module):
245
+ def __init__(self, dim, init_values=1e-5, inplace=False, force_fp32=False):
246
+ super().__init__()
247
+ self.inplace = inplace
248
+ self.weight = nn.Parameter(init_values * torch.ones(dim))
249
+ self.force_fp32 = force_fp32
250
+
251
+ @torch.cuda.amp.autocast(enabled=False)
252
+ def forward(self, x):
253
+ if self.force_fp32:
254
+ output_type = x.dtype
255
+ out = x.float().mul_(self.weight.float()) if self.inplace else x.float() * self.weight.float()
256
+ return out.to(dtype=output_type)
257
+ else:
258
+ out = x.mul_(self.weight) if self.inplace else x * self.weight
259
+ return out
260
+
261
+
262
+ class Attention(nn.Module):
263
+ def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., use_flash_attn=False,
264
+ causal=False, norm_layer=nn.LayerNorm, qk_normalization=False, use_fused_rmsnorm=False):
265
+ super().__init__()
266
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
267
+ self.num_heads = num_heads
268
+ head_dim = dim // num_heads
269
+ self.scale = head_dim ** -0.5
270
+
271
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
272
+ self.attn_drop = nn.Dropout(attn_drop)
273
+ self.proj = nn.Linear(dim, dim)
274
+ self.proj_drop = nn.Dropout(proj_drop)
275
+
276
+ self.use_flash_attn = use_flash_attn
277
+ if use_flash_attn:
278
+ self.causal = causal
279
+ self.inner_attn = FlashAttention(attention_dropout=attn_drop)
280
+
281
+ self.qk_normalization = qk_normalization
282
+ self.q_norm = norm_layer(dim) if qk_normalization else nn.Identity()
283
+ self.k_norm = norm_layer(dim) if qk_normalization else nn.Identity()
284
+ self.use_fused_rmsnorm = use_fused_rmsnorm
285
+
286
+ def _naive_attn(self, x):
287
+ B, N, C = x.shape
288
+ # print(x.shape, torch.cuda.memory_allocated(), torch.cuda.memory_allocated())
289
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
290
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
291
+
292
+ if self.qk_normalization:
293
+ B_, H_, N_, D_ = q.shape
294
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
295
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
296
+
297
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
298
+ # attn = attn - attn.max(-1)[0].unsqueeze(-1) # in case of overflow for fp16
299
+ attn = attn.softmax(dim=-1)
300
+ attn = self.attn_drop(attn)
301
+ # print(torch.cuda.memory_allocated(), torch.cuda.memory_allocated())
302
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
303
+ # print(f"\033[31m这{x.device}是{self.proj.weight.device} {self.proj.bias.device}\033[0m")
304
+ # print(f"\033[31m类型{x.dtype}是{self.proj.weight.dtype} {self.proj.bias.dtype}\033[0m")
305
+ x = self.proj(x)
306
+ x = self.proj_drop(x)
307
+ return x
308
+
309
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
310
+
311
+ qkv = self.qkv(x)
312
+ qkv = rearrange(qkv, "b s (three h d) -> b s three h d", three=3, h=self.num_heads)
313
+
314
+ if self.qk_normalization:
315
+ q, k, v = qkv.unbind(2)
316
+ if self.use_fused_rmsnorm:
317
+ q = self.q_norm(q.flatten(-2, -1))[0].view(q.shape)
318
+ k = self.k_norm(k.flatten(-2, -1))[0].view(k.shape)
319
+ else:
320
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
321
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
322
+ qkv = torch.stack([q, k, v], dim=2)
323
+
324
+ context, _ = self.inner_attn(
325
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=self.causal
326
+ )
327
+ outs = self.proj(rearrange(context, "b s h d -> b s (h d)"))
328
+ outs = self.proj_drop(outs)
329
+ return outs
330
+
331
+ def forward(self, x):
332
+ x = self._naive_attn(x) if not self.use_flash_attn else self._flash_attn(x)
333
+ return x
334
+
335
+
336
+ class Mlp(nn.Module):
337
+ """ MLP as used in Vision Transformer, MLP-Mixer and related networks
338
+ """
339
+
340
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,
341
+ bias=True, drop=0.):
342
+ super().__init__()
343
+ out_features = out_features or in_features
344
+ hidden_features = hidden_features or in_features
345
+ bias = to_2tuple(bias)
346
+ drop_probs = to_2tuple(drop)
347
+
348
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0])
349
+ self.act = act_layer()
350
+ self.drop1 = nn.Dropout(drop_probs[0])
351
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])
352
+ self.drop2 = nn.Dropout(drop_probs[1])
353
+
354
+ def forward(self, x):
355
+ x = self.fc1(x)
356
+ x = self.act(x)
357
+ x = self.drop1(x)
358
+ x = self.fc2(x)
359
+ x = self.drop2(x)
360
+ return x
361
+
362
+
363
+ class Block(nn.Module):
364
+
365
+ def __init__(
366
+ self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., init_values=None,
367
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_flash_attn=False, use_fused_mlp=False,
368
+ fused_mlp_heuristic=1, with_cp=False, qk_normalization=False, layerscale_no_force_fp32=False,
369
+ use_fused_rmsnorm=False):
370
+ super().__init__()
371
+
372
+ self.norm1 = norm_layer(dim)
373
+ self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
374
+ use_flash_attn=use_flash_attn, causal=False, norm_layer=norm_layer,
375
+ qk_normalization=qk_normalization,
376
+ use_fused_rmsnorm=use_fused_rmsnorm)
377
+ self.ls1 = LayerScale(dim, init_values=init_values,
378
+ force_fp32=(not layerscale_no_force_fp32)) if init_values else nn.Identity()
379
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
380
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
381
+
382
+ self.norm2 = norm_layer(dim)
383
+ mlp_hidden_dim = int(dim * mlp_ratio)
384
+ if use_fused_mlp:
385
+ raise NotImplementedError
386
+ self.mlp = FusedMLP(in_features=dim, hidden_features=mlp_hidden_dim, heuristic=fused_mlp_heuristic)
387
+ else:
388
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
389
+ self.ls2 = LayerScale(dim, init_values=init_values,
390
+ force_fp32=(not layerscale_no_force_fp32)) if init_values else nn.Identity()
391
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
392
+
393
+ self.with_cp = with_cp
394
+ self.use_fused_rmsnorm = use_fused_rmsnorm
395
+
396
+ def forward(self, x, residual=None):
397
+
398
+ def _inner_forward(x, residual=None):
399
+ if self.use_fused_rmsnorm:
400
+ x, residual = self.norm1(x, residual)
401
+ x = self.drop_path1(self.ls1(self.attn(x)))
402
+ x, residual = self.norm2(x, residual)
403
+ x = self.drop_path2(self.ls2(self.mlp(x)))
404
+ return x, residual
405
+ else:
406
+ assert residual is None
407
+ x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x))))
408
+ x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
409
+ return x
410
+
411
+ if self.with_cp:
412
+ # print(f"\033[31m use_checkpoint [0m")
413
+ return checkpoint.checkpoint(_inner_forward, x, residual)
414
+ else:
415
+ return _inner_forward(x, residual=residual)
416
+
417
+
418
+ class PatchEmbed(nn.Module):
419
+ """ 3D Image to Patch Embedding
420
+ """
421
+
422
+ def __init__(
423
+ self, img_size=224, patch_size=16, in_chans=3, embed_dim=768,
424
+ num_frames=8, tubelet_size=1, norm_layer=None
425
+ ):
426
+ super().__init__()
427
+ img_size = to_2tuple(img_size)
428
+ patch_size = to_2tuple(patch_size)
429
+ self.img_size = img_size
430
+ self.patch_size = patch_size
431
+ self.grid_size = (
432
+ num_frames // tubelet_size,
433
+ img_size[0] // patch_size[0],
434
+ img_size[1] // patch_size[1]
435
+ ) # (T, H, W)
436
+ self.num_patches = self.grid_size[0] * self.grid_size[1] * self.grid_size[2]
437
+ self.num_img_patches = self.grid_size[1] * self.grid_size[2]
438
+
439
+ self.proj = nn.Conv3d(
440
+ in_channels=in_chans, out_channels=embed_dim,
441
+ kernel_size=(tubelet_size, patch_size[0], patch_size[1]),
442
+ stride=(tubelet_size, patch_size[0], patch_size[1])
443
+ )
444
+ self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
445
+
446
+ def forward(self, x):
447
+ x = self.proj(x)
448
+ x = x.flatten(3).permute(0, 2, 3, 1) # B x C x T x HW => B x T x HW x C
449
+ x = self.norm(x)
450
+ return x
451
+
452
+
453
+ class PretrainVisionTransformer_clean(nn.Module):
454
+ def __init__(
455
+ self,
456
+ in_chans: int = 3,
457
+ patch_size: int = 14,
458
+ img_size: int = 224,
459
+ qkv_bias: bool = False, # follow internvl_clip to set False
460
+ drop_path_rate: float = 0.25, # may need ablation
461
+ embed_dim: int = 1408,
462
+ num_heads: int = 16,
463
+ mlp_ratio: float = 48/11,
464
+ init_values: float = 1e-5, # may need ablation
465
+ qk_normalization: bool = True,
466
+ depth: int = 40,
467
+ use_flash_attn: bool = True,
468
+ use_fused_rmsnorm: bool = True,
469
+ use_fused_mlp: bool = True,
470
+ fused_mlp_heuristic: int = 1,
471
+ attn_pool_num_heads: int = 16,
472
+ clip_embed_dim: int = 768,
473
+ layerscale_no_force_fp32: bool = False, # whether True for training?
474
+ num_frames: int = 8,
475
+ tubelet_size: int = 1,
476
+ sep_pos_embed: bool = False,
477
+ sep_image_video_pos_embed: bool = False,
478
+ use_checkpoint: bool = False,
479
+ checkpoint_num: int = 0,
480
+ # for unmasked teacher
481
+ x_vis_return_idx=-1,
482
+ x_vis_only=False
483
+ ):
484
+ super().__init__()
485
+
486
+ self.num_frames = num_frames
487
+ self.tubelet_size = tubelet_size
488
+ # assert use_flash_attn == use_fused_rmsnorm == use_fused_mlp, f'use_flash_attn:{use_flash_attn}, use_fused_rmsnorm{use_fused_rmsnorm} and use_fused_mlp{use_fused_mlp} should be consistent'
489
+
490
+ self.use_flash_attn = use_flash_attn
491
+ self.embed_dim = embed_dim
492
+
493
+ print(f"Origin depth: {depth}")
494
+ depth = depth + x_vis_return_idx + 1
495
+ print(f"New depth: {depth}")
496
+ self.depth = depth
497
+
498
+ self.x_vis_only = x_vis_only
499
+
500
+ if use_fused_rmsnorm:
501
+ raise NotImplementedError
502
+ norm_layer_for_blocks = partial(DropoutAddRMSNorm, eps=1e-6, prenorm=True)
503
+ else:
504
+ norm_layer_for_blocks = partial(RMSNorm, eps=1e-6)
505
+ self.norm_layer_for_blocks = norm_layer_for_blocks
506
+ self.patch_embed = PatchEmbed(
507
+ img_size, patch_size, in_chans, embed_dim,
508
+ num_frames=num_frames, tubelet_size=tubelet_size,
509
+ )
510
+ num_patches = self.patch_embed.num_patches
511
+ num_img_patches = self.patch_embed.num_img_patches
512
+ # print(f"num_patches: {num_patches}, num_img_patches: {num_img_patches}")
513
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
514
+
515
+ # stolen from https://github.com/facebookresearch/mae_st/blob/dc072aaaf640d06892e23a33b42223a994efe272/models_vit.py#L65-L73C17
516
+ self.sep_pos_embed = sep_pos_embed
517
+ self.sep_image_video_pos_embed = sep_image_video_pos_embed
518
+ if sep_pos_embed:
519
+ raise NotImplementedError
520
+ else:
521
+ if sep_image_video_pos_embed:
522
+ print("Use separate position embedding, for image and video we use different pos_embed.")
523
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
524
+ self.img_pos_embed = nn.Parameter(torch.zeros(1, num_img_patches + 1, embed_dim))
525
+ else:
526
+ print("Use joint position embedding, for image and video we use same pos_embed.")
527
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
528
+
529
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
530
+ # choose which layer to use checkpoint
531
+ with_cp_list = [False] * depth
532
+ if use_checkpoint:
533
+ for idx in range(depth):
534
+ if idx < checkpoint_num:
535
+ with_cp_list[idx] = True
536
+ print(f"Droppath rate: {dpr}")
537
+ print(f"Checkpoint list: {with_cp_list}")
538
+
539
+ self.blocks = nn.ModuleList([
540
+ Block(embed_dim, num_heads, mlp_ratio, qkv_bias=qkv_bias,
541
+ norm_layer=norm_layer_for_blocks,
542
+ drop_path=dpr[i], init_values=init_values, attn_drop=0.,
543
+ use_flash_attn=use_flash_attn, use_fused_mlp=use_fused_mlp,
544
+ fused_mlp_heuristic=fused_mlp_heuristic,
545
+ with_cp=with_cp_list[i],
546
+ qk_normalization=qk_normalization,
547
+ layerscale_no_force_fp32=layerscale_no_force_fp32,
548
+ use_fused_rmsnorm=use_fused_rmsnorm)
549
+ for i in range(depth)])
550
+
551
+ if not self.x_vis_only:
552
+ raise NotImplementedError
553
+
554
+ self.init_pos_embed()
555
+ trunc_normal_(self.cls_token, std=.02) # NOTE 对chat没用,都要加载预训练的
556
+ self.apply(self._init_weights)
557
+ self.fix_init_weight()
558
+
559
+ def init_pos_embed(self):
560
+ print("Init pos_embed from sincos pos_embed")
561
+ if self.sep_pos_embed:
562
+ raise NotImplementedError
563
+ else:
564
+ pos_embed = get_3d_sincos_pos_embed(
565
+ self.pos_embed.shape[-1],
566
+ self.patch_embed.grid_size[1], # height & weight
567
+ self.patch_embed.grid_size[0], # t_size
568
+ cls_token=True
569
+ )
570
+ self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
571
+
572
+ if self.sep_image_video_pos_embed:
573
+ img_pos_embed = get_3d_sincos_pos_embed(
574
+ self.pos_embed.shape[-1],
575
+ self.patch_embed.grid_size[1], # height & weight
576
+ 1,
577
+ cls_token=True
578
+ )
579
+ self.img_pos_embed.data.copy_(torch.from_numpy(img_pos_embed).float().unsqueeze(0))
580
+
581
+
582
+ def _init_weights(self, m):
583
+ if isinstance(m, nn.Linear):
584
+ trunc_normal_(m.weight, std=.02)
585
+ if isinstance(m, nn.Linear) and m.bias is not None:
586
+ nn.init.constant_(m.bias, 0)
587
+ elif isinstance(m, nn.LayerNorm):
588
+ nn.init.constant_(m.bias, 0)
589
+ nn.init.constant_(m.weight, 1.0)
590
+
591
+ def fix_init_weight(self):
592
+ def rescale(param, layer_id):
593
+ param.div_(math.sqrt(2.0 * layer_id))
594
+
595
+ for layer_id, layer in enumerate(self.blocks):
596
+ rescale(layer.attn.proj.weight.data, layer_id + 1)
597
+ rescale(layer.mlp.fc2.weight.data, layer_id + 1)
598
+
599
+ @property
600
+ def dtype(self):
601
+ return self.patch_embed.proj.weight.dtype
602
+
603
+ def get_num_layers(self):
604
+ return len(self.blocks)
605
+
606
+ @torch.jit.ignore
607
+ def no_weight_decay(self):
608
+ return {
609
+ 'pos_embed',
610
+ 'pos_embed_spatial',
611
+ 'pos_embed_temporal',
612
+ 'pos_embed_cls',
613
+ 'img_pos_embed',
614
+ 'cls_token'
615
+ }
616
+
617
+ # @torch.cuda.amp.autocast(enabled=False)
618
+ def forward(self, x, mask=None, use_image=False):
619
+ x = self.patch_embed(x.type(self.dtype))
620
+ # print(f"x.shape: {x.shape} x.dtype: {x.dtype}, model.dtype: {self.dtype}")
621
+ B, T, L, C = x.shape # T: temporal; L: spatial
622
+ x = x.view([B, T * L, C])
623
+
624
+ # append cls token
625
+ cls_tokens = self.cls_token.expand(B, -1, -1)
626
+ x = torch.cat((cls_tokens, x), dim=1)
627
+
628
+ # add pos_embed
629
+ if self.sep_pos_embed:
630
+ raise NotImplementedError
631
+ else:
632
+ if use_image:
633
+ if self.sep_image_video_pos_embed:
634
+ pos_embed = self.img_pos_embed
635
+ else:
636
+ # (1, num_img_patches + 1, embed_dim)
637
+ # print('origin pos_embed.shape:', self.pos_embed.shape)
638
+ cls_pos_embed = self.pos_embed[:, 0:1, :]
639
+ # print('cls_pos_embed.shape:', cls_pos_embed.shape)
640
+
641
+ img_pos_embed = self.pos_embed[:, 1:, :].view(1, self.num_frames, self.patch_embed.num_patches // self.num_frames, self.embed_dim).mean(dim=1)
642
+ # print('img_pos_embed.shape:', img_pos_embed.shape)
643
+
644
+ pos_embed = torch.cat([cls_pos_embed, img_pos_embed], dim=1)
645
+ # print('final img_pos_embed.shape:', pos_embed.shape)
646
+ else:
647
+ pos_embed = self.pos_embed
648
+
649
+ # print("pos_embed.shape:", pos_embed.shape)
650
+ x = x + pos_embed
651
+
652
+ # mask tokens, ~mask means visible
653
+ if mask is not None:
654
+ x = x[~mask].reshape(B, -1, C)
655
+ else:
656
+ x = x.reshape(B, -1, C)
657
+
658
+ residual = None
659
+
660
+ for idx, blk in enumerate(self.blocks):
661
+ if isinstance(x, tuple) and len(x) == 2:
662
+ x, residual = x
663
+ x = blk(x, residual=residual)
664
+
665
+ if isinstance(x, tuple) and len(x) == 2:
666
+ x, residual = x
667
+ if residual is not None:
668
+ x = x + residual
669
+
670
+ x_vis = x
671
+ if self.x_vis_only:
672
+ return x_vis
673
+ else:
674
+ x_pool_vis = self.clip_projector(x_vis)
675
+ return x_vis, x_pool_vis, None, None
676
+
677
+
678
+
679
+
680
+
681
+ class InternVideo2ImageProcessor:
682
+ def __init__(self, image_mean=(0.485, 0.456, 0.406), image_std=(0.229, 0.224, 0.225), size=(224, 224), crop_size: Dict[str, int] = None, resample=PILImageResampling.BICUBIC, rescale_factor=1 / 255, data_format=ChannelDimension.FIRST):
683
+ crop_size = crop_size if crop_size is not None else {"height": size[0], "width": size[1]}
684
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
685
+
686
+ self.image_mean = image_mean
687
+ self.image_std = image_std
688
+ self.size = size
689
+ self.resample = resample
690
+ self.rescale_factor = rescale_factor
691
+ self.data_format = data_format
692
+ self.crop_size = crop_size
693
+
694
+ def preprocess(self, images, return_tensors, target_size=None):
695
+ if isinstance(images, Image.Image):
696
+ images = [images]
697
+ else:
698
+ # to adapt video data
699
+ images = [to_numpy_array(image) for image in images]
700
+ assert isinstance(images, list)
701
+
702
+ if target_size is None:
703
+ target_size = self.size
704
+
705
+ transforms = [
706
+ convert_to_rgb,
707
+ to_numpy_array,
708
+ partial(resize, size=target_size, resample=self.resample, data_format=self.data_format),
709
+ partial(rescale, scale=self.rescale_factor, data_format=self.data_format),
710
+ partial(normalize, mean=self.image_mean, std=self.image_std, data_format=self.data_format),
711
+ partial(to_channel_dimension_format, channel_dim=self.data_format, input_channel_dim=self.data_format),
712
+ ]
713
+
714
+ images = reduce(lambda x, f: [*map(f, x)], transforms, images)
715
+ data = {"pixel_values": images}
716
+
717
+ return BatchFeature(data=data, tensor_type=return_tensors)
718
+
719
+
720
+ class InternVideo2VisionConfig:
721
+ model_type = "internvideo2_vision_model"
722
+
723
+ def __init__(
724
+ self,
725
+ num_frames=4,
726
+ hidden_size=128,
727
+ num_hidden_layers=2,
728
+ num_attention_heads=16,
729
+ num_channels=3,
730
+ image_size=224,
731
+ patch_size=14,
732
+ x_vis_return_idx=-2,
733
+ sep_image_video_pos_embed=True,
734
+ use_checkpoint=False,
735
+ checkpoint_num=40,
736
+ # **kwargs,
737
+ ):
738
+ # super().__init__(**kwargs)
739
+ self.num_frames = num_frames
740
+ self.hidden_size = hidden_size
741
+ self.num_hidden_layers = num_hidden_layers
742
+ self.num_attention_heads = num_attention_heads
743
+ self.num_channels = num_channels
744
+ self.patch_size = patch_size
745
+ self.image_size = image_size
746
+ self.x_vis_return_idx = x_vis_return_idx
747
+ self.sep_image_video_pos_embed = sep_image_video_pos_embed
748
+ self.use_checkpoint = use_checkpoint
749
+ self.checkpoint_num = checkpoint_num
750
+
751
+
752
+ def build_vit(config, pt_type='origin'):
753
+
754
+ model = PretrainVisionTransformer_clean(
755
+ in_chans=config.num_channels, img_size=config.image_size, patch_size=config.patch_size,
756
+ embed_dim=config.hidden_size, depth=config.num_hidden_layers, num_heads=config.num_attention_heads, mlp_ratio=48/11,
757
+ # clip_embed_dim=config.vision_encoder.clip_embed_dim,
758
+ attn_pool_num_heads=16, qkv_bias=False,
759
+ drop_path_rate=0.25,
760
+ init_values=0.00001,
761
+ qk_normalization=True,
762
+ use_flash_attn=torch.cuda.is_available(),
763
+ use_fused_rmsnorm=False,
764
+ use_fused_mlp=False,
765
+ fused_mlp_heuristic=1,
766
+ layerscale_no_force_fp32=False,
767
+ num_frames=config.num_frames,
768
+ tubelet_size=1,
769
+ sep_pos_embed=False,
770
+ sep_image_video_pos_embed=config.sep_image_video_pos_embed,
771
+ use_checkpoint=config.use_checkpoint,
772
+ checkpoint_num=config.checkpoint_num,
773
+ x_vis_return_idx=config.x_vis_return_idx,
774
+ x_vis_only=True
775
+ )
776
+
777
+ if config.num_frames != 4:
778
+ raise NotImplementedError
779
+
780
+
781
+ return model
782
+
783
+
784
+
785
+ class InternVideo2VisionTower(nn.Module):
786
+ def __init__(self, vision_tower, vision_tower_cfg, delay_load=False, pt_type='origin', image_size=224):
787
+ super().__init__()
788
+
789
+ self.is_loaded = False
790
+ self.pt_type = pt_type
791
+
792
+ self.config = InternVideo2VisionConfig(num_frames=vision_tower_cfg.mm_local_num_frames, x_vis_return_idx=vision_tower_cfg.mm_vision_select_layer, image_size=image_size)
793
+
794
+ self.vision_tower_name = vision_tower
795
+
796
+ self.image_processor = InternVideo2ImageProcessor(size=(image_size, image_size))
797
+
798
+ if not delay_load:
799
+ print(f"Loading vision tower: {vision_tower}")
800
+ self.load_model()
801
+ elif getattr(vision_tower_cfg, "unfreeze_mm_vision_tower", False):
802
+ # TODO: better detector is needed.
803
+ print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
804
+ self.load_model()
805
+ elif hasattr(vision_tower_cfg, "mm_tunable_parts") and "mm_vision_tower" in vision_tower_cfg.mm_tunable_parts:
806
+ print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
807
+ self.load_model()
808
+ else:
809
+ raise NotImplementedError
810
+ self.cfg_only = self.config
811
+
812
+ def load_model(self, device_map=None):
813
+ if self.is_loaded:
814
+ print("{} is already loaded, `load_model` called again, skipping.".format(self.vision_tower_name))
815
+ return
816
+
817
+ self.vision_tower = build_vit(self.config, pt_type=self.pt_type)
818
+ self.vision_tower.requires_grad_(False)
819
+
820
+ self.is_loaded = True
821
+
822
+ def forward(self, images):
823
+ if type(images) is list:
824
+ raise NotImplementedError
825
+ else:
826
+ # input: B T C H W
827
+ # output: B T*L C
828
+ T = images.shape[1]
829
+ images = images.permute(0, 2, 1, 3, 4)
830
+ image_embeds = self.vision_tower(images, use_image=(T == 1))
831
+
832
+ return image_embeds[:, 1:, :]
833
+
834
+ @property
835
+ def dummy_feature(self):
836
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
837
+
838
+ @property
839
+ def dtype(self):
840
+ for p in self.vision_tower.parameters():
841
+ return p.dtype
842
+
843
+ @property
844
+ def device(self):
845
+ for p in self.vision_tower.parameters():
846
+ return p.device
847
+
848
+ @property
849
+ def hidden_size(self):
850
+ return self.config.hidden_size
851
+
852
+ @property
853
+ def num_patches(self):
854
+ return (self.config.image_size // self.config.patch_size) ** 2
855
+
856
+ @property
857
+ def num_patches_per_side(self):
858
+ return self.config.image_size // self.config.patch_size
859
+
860
+ @property
861
+ def image_size(self):
862
+ return self.config.image_size
863
+
864
+
865
+ def build_vision_tower(vision_tower_cfg, **kwargs):
866
+ vision_tower = getattr(vision_tower_cfg, "mm_vision_tower", getattr(vision_tower_cfg, "vision_tower", None))
867
+ return InternVideo2VisionTower(vision_tower, vision_tower_cfg=vision_tower_cfg, **kwargs)
868
+
vocab.json ADDED
The diff for this file is too large to render. See raw diff