Yuan-lab commited on
Commit
befcde3
·
verified ·
1 Parent(s): 6abf97c

Upload 14 files

Browse files
configuration_intern_vit.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ from typing import Union
9
+
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class InternVisionConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
19
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
20
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
+ documentation from [`PretrainedConfig`] for more information.
22
+ Args:
23
+ num_channels (`int`, *optional*, defaults to 3):
24
+ Number of color channels in the input images (e.g., 3 for RGB).
25
+ patch_size (`int`, *optional*, defaults to 14):
26
+ The size (resolution) of each patch.
27
+ image_size (`int`, *optional*, defaults to 224):
28
+ The size (resolution) of each image.
29
+ qkv_bias (`bool`, *optional*, defaults to `False`):
30
+ Whether to add a bias to the queries and values in the self-attention layers.
31
+ hidden_size (`int`, *optional*, defaults to 3200):
32
+ Dimensionality of the encoder layers and the pooler layer.
33
+ num_attention_heads (`int`, *optional*, defaults to 25):
34
+ Number of attention heads for each attention layer in the Transformer encoder.
35
+ intermediate_size (`int`, *optional*, defaults to 12800):
36
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
37
+ qk_normalization (`bool`, *optional*, defaults to `True`):
38
+ Whether to normalize the queries and keys in the self-attention layers.
39
+ num_hidden_layers (`int`, *optional*, defaults to 48):
40
+ Number of hidden layers in the Transformer encoder.
41
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
42
+ Whether to use flash attention mechanism.
43
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
44
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
45
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
46
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
47
+ The epsilon used by the layer normalization layers.
48
+ dropout (`float`, *optional*, defaults to 0.0):
49
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
50
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
51
+ Dropout rate for stochastic depth.
52
+ attention_dropout (`float`, *optional*, defaults to 0.0):
53
+ The dropout ratio for the attention probabilities.
54
+ initializer_range (`float`, *optional*, defaults to 0.02):
55
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
56
+ initializer_factor (`float`, *optional*, defaults to 0.1):
57
+ A factor for layer scale.
58
+ """
59
+
60
+ model_type = 'intern_vit_6b'
61
+
62
+ def __init__(
63
+ self,
64
+ num_channels=3,
65
+ patch_size=14,
66
+ image_size=224,
67
+ qkv_bias=False,
68
+ hidden_size=3200,
69
+ num_attention_heads=25,
70
+ intermediate_size=12800,
71
+ qk_normalization=True,
72
+ num_hidden_layers=48,
73
+ use_flash_attn=True,
74
+ hidden_act='gelu',
75
+ norm_type='rms_norm',
76
+ layer_norm_eps=1e-6,
77
+ dropout=0.0,
78
+ drop_path_rate=0.0,
79
+ attention_dropout=0.0,
80
+ initializer_range=0.02,
81
+ initializer_factor=0.1,
82
+ **kwargs,
83
+ ):
84
+ super().__init__(**kwargs)
85
+
86
+ self.hidden_size = hidden_size
87
+ self.intermediate_size = intermediate_size
88
+ self.dropout = dropout
89
+ self.drop_path_rate = drop_path_rate
90
+ self.num_hidden_layers = num_hidden_layers
91
+ self.num_attention_heads = num_attention_heads
92
+ self.num_channels = num_channels
93
+ self.patch_size = patch_size
94
+ self.image_size = image_size
95
+ self.initializer_range = initializer_range
96
+ self.initializer_factor = initializer_factor
97
+ self.attention_dropout = attention_dropout
98
+ self.layer_norm_eps = layer_norm_eps
99
+ self.hidden_act = hidden_act
100
+ self.norm_type = norm_type
101
+ self.qkv_bias = qkv_bias
102
+ self.qk_normalization = qk_normalization
103
+ self.use_flash_attn = use_flash_attn
104
+
105
+ @classmethod
106
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
107
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
108
+
109
+ if 'vision_config' in config_dict:
110
+ config_dict = config_dict['vision_config']
111
+
112
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
113
+ logger.warning(
114
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
115
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
116
+ )
117
+
118
+ return cls.from_dict(config_dict, **kwargs)
configuration_yuan.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers.configuration_utils import PretrainedConfig
3
+
4
+
5
+ class YuanConfig(PretrainedConfig):
6
+ model_type = "yuan2"
7
+ keys_to_ignore_at_inference = ["past_key_values"]
8
+
9
+ def __init__(
10
+ self,
11
+ vocab_size=135040,
12
+ hidden_size=2048,
13
+ ffn_hidden_size=8192,
14
+ intermediate_size=8192,
15
+ num_hidden_layers=24,
16
+ num_attention_heads=32,
17
+ hidden_act="silu",
18
+ model_max_length=8192,
19
+ initializer_range=0.02,
20
+ rms_norm_eps=1e-6,
21
+ use_cache=True,
22
+ pad_token_id=77185,
23
+ bos_token_id=77185,
24
+ eos_token_id=77185,
25
+ tie_word_embeddings=True,
26
+ **kwargs,
27
+ ):
28
+ self.vocab_size = vocab_size
29
+ self.model_max_length = model_max_length
30
+ self.hidden_size = hidden_size
31
+ self.ffn_hidden_size = ffn_hidden_size
32
+ self.intermediate_size = intermediate_size
33
+ self.num_hidden_layers = num_hidden_layers
34
+ self.num_attention_heads = num_attention_heads
35
+ self.hidden_act = hidden_act
36
+ self.initializer_range = initializer_range
37
+ self.rms_norm_eps = rms_norm_eps
38
+ self.use_cache = use_cache
39
+ super().__init__(
40
+ pad_token_id=pad_token_id,
41
+ bos_token_id=bos_token_id,
42
+ eos_token_id=eos_token_id,
43
+ tie_word_embeddings=tie_word_embeddings,
44
+ **kwargs,
45
+ )
46
+
configuration_yuanvl.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from transformers import AutoConfig, LlamaConfig
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ from .configuration_intern_vit import InternVisionConfig
14
+ from .configuration_yuan import YuanConfig
15
+
16
+ logger = logging.get_logger(__name__)
17
+
18
+
19
+ class YuanVLChatConfig(PretrainedConfig):
20
+ model_type = 'yuanvl'
21
+ is_composition = True
22
+
23
+ def __init__(
24
+ self,
25
+ vision_config=None,
26
+ llm_config=None,
27
+ use_backbone_lora=0,
28
+ use_llm_lora=0,
29
+ select_layer=-1,
30
+ force_image_size=None,
31
+ downsample_ratio=0.5,
32
+ template=None,
33
+ dynamic_image_size=False,
34
+ use_thumbnail=False,
35
+ ps_version='v1',
36
+ min_dynamic_patch=1,
37
+ max_dynamic_patch=6,
38
+ img_context_token_id=77188,
39
+ **kwargs):
40
+ super().__init__(**kwargs)
41
+
42
+ if vision_config is None:
43
+ vision_config = {'architectures': ['InternVisionModel']}
44
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
45
+
46
+ if llm_config is None:
47
+ llm_config = {'architectures': ['YuanForCausalLM']}
48
+ logger.info('llm_config is None. Initializing the YuanForCausalLM config with default values (`YuanForCausalLM`).')
49
+
50
+ self.vision_config = InternVisionConfig(**vision_config)
51
+ if llm_config.get('architectures')[0] == 'YuanForCausalLM':
52
+ self.llm_config = YuanConfig(**llm_config)
53
+ else:
54
+ raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
55
+ self.use_backbone_lora = use_backbone_lora
56
+ self.use_llm_lora = use_llm_lora
57
+ self.select_layer = select_layer
58
+ self.force_image_size = force_image_size
59
+ self.downsample_ratio = downsample_ratio
60
+ self.template = template
61
+ self.dynamic_image_size = dynamic_image_size
62
+ self.use_thumbnail = use_thumbnail
63
+ self.ps_version = ps_version # pixel shuffle version
64
+ self.min_dynamic_patch = min_dynamic_patch
65
+ self.max_dynamic_patch = max_dynamic_patch
66
+ self.img_context_token_id = img_context_token_id
67
+
68
+ logger.info(f'vision_select_layer: {self.select_layer}')
69
+ logger.info(f'ps_version: {self.ps_version}')
70
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
71
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
72
+
73
+ def to_dict(self):
74
+ """
75
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
76
+ Returns:
77
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
78
+ """
79
+ output = copy.deepcopy(self.__dict__)
80
+ output['vision_config'] = self.vision_config.to_dict()
81
+ output['llm_config'] = self.llm_config.to_dict()
82
+ output['model_type'] = self.__class__.model_type
83
+ output['use_backbone_lora'] = self.use_backbone_lora
84
+ output['use_llm_lora'] = self.use_llm_lora
85
+ output['select_layer'] = self.select_layer
86
+ output['force_image_size'] = self.force_image_size
87
+ output['downsample_ratio'] = self.downsample_ratio
88
+ output['template'] = self.template
89
+ output['dynamic_image_size'] = self.dynamic_image_size
90
+ output['use_thumbnail'] = self.use_thumbnail
91
+ output['ps_version'] = self.ps_version
92
+ output['min_dynamic_patch'] = self.min_dynamic_patch
93
+ output['max_dynamic_patch'] = self.max_dynamic_patch
94
+
95
+ return output
conversation.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation prompt templates.
3
+ We kindly request that you import fastchat instead of copying this file if you wish to use it.
4
+ If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
5
+ Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
6
+ """
7
+
8
+ import dataclasses
9
+ from enum import IntEnum, auto
10
+ from typing import Dict, List, Tuple, Union
11
+
12
+
13
+ class SeparatorStyle(IntEnum):
14
+ """Separator styles."""
15
+
16
+ ADD_COLON_SINGLE = auto()
17
+ ADD_COLON_TWO = auto()
18
+ ADD_COLON_SPACE_SINGLE = auto()
19
+ NO_COLON_SINGLE = auto()
20
+ NO_COLON_TWO = auto()
21
+ ADD_NEW_LINE_SINGLE = auto()
22
+ LLAMA2 = auto()
23
+ CHATGLM = auto()
24
+ CHATML = auto()
25
+ CHATINTERN = auto()
26
+ DOLLY = auto()
27
+ RWKV = auto()
28
+ PHOENIX = auto()
29
+ ROBIN = auto()
30
+ FALCON_CHAT = auto()
31
+ CHATGLM3 = auto()
32
+ INTERNVL_ZH = auto()
33
+ MPT = auto()
34
+
35
+
36
+ @dataclasses.dataclass
37
+ class Conversation:
38
+ """A class that manages prompt templates and keeps all conversation history."""
39
+
40
+ # The name of this template
41
+ name: str
42
+ # The template of the system prompt
43
+ system_template: str = '{system_message}'
44
+ # The system message
45
+ system_message: str = ''
46
+ # The names of two roles
47
+ roles: Tuple[str] = ('USER', 'ASSISTANT')
48
+ # All messages. Each item is (role, message).
49
+ messages: List[List[str]] = ()
50
+ # The number of few shot examples
51
+ offset: int = 0
52
+ # The separator style and configurations
53
+ sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
54
+ sep: str = '\n'
55
+ sep2: str = None
56
+ # Stop criteria (the default one is EOS token)
57
+ stop_str: Union[str, List[str]] = None
58
+ # Stops generation if meeting any token in this list
59
+ stop_token_ids: List[int] = None
60
+
61
+ def get_prompt(self) -> str:
62
+ """Get the prompt for generation."""
63
+ system_prompt = self.system_template.format(system_message=self.system_message)
64
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
65
+ ret = system_prompt + self.sep
66
+ for role, message in self.messages:
67
+ if message:
68
+ ret += role + ': ' + message + self.sep
69
+ else:
70
+ ret += role + ':'
71
+ return ret
72
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
73
+ seps = [self.sep, self.sep2]
74
+ ret = system_prompt + seps[0]
75
+ for i, (role, message) in enumerate(self.messages):
76
+ if message:
77
+ ret += role + ': ' + message + seps[i % 2]
78
+ else:
79
+ ret += role + ':'
80
+ return ret
81
+ elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
82
+ ret = system_prompt + self.sep
83
+ for role, message in self.messages:
84
+ if message:
85
+ ret += role + ': ' + message + self.sep
86
+ else:
87
+ ret += role + ': ' # must be end with a space
88
+ return ret
89
+ elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
90
+ ret = '' if system_prompt == '' else system_prompt + self.sep
91
+ for role, message in self.messages:
92
+ if message:
93
+ ret += role + '\n' + message + self.sep
94
+ else:
95
+ ret += role + '\n'
96
+ return ret
97
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
98
+ ret = system_prompt
99
+ for role, message in self.messages:
100
+ if message:
101
+ ret += role + message + self.sep
102
+ else:
103
+ ret += role
104
+ return ret
105
+ elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
106
+ seps = [self.sep, self.sep2]
107
+ ret = system_prompt
108
+ for i, (role, message) in enumerate(self.messages):
109
+ if message:
110
+ ret += role + message + seps[i % 2]
111
+ else:
112
+ ret += role
113
+ return ret
114
+ elif self.sep_style == SeparatorStyle.RWKV:
115
+ ret = system_prompt
116
+ for i, (role, message) in enumerate(self.messages):
117
+ if message:
118
+ ret += (
119
+ role
120
+ + ': '
121
+ + message.replace('\r\n', '\n').replace('\n\n', '\n')
122
+ )
123
+ ret += '\n\n'
124
+ else:
125
+ ret += role + ':'
126
+ return ret
127
+ elif self.sep_style == SeparatorStyle.LLAMA2:
128
+ seps = [self.sep, self.sep2]
129
+ if self.system_message:
130
+ ret = system_prompt
131
+ else:
132
+ ret = '[INST] '
133
+ for i, (role, message) in enumerate(self.messages):
134
+ tag = self.roles[i % 2]
135
+ if message:
136
+ if i == 0:
137
+ ret += message + ' '
138
+ else:
139
+ ret += tag + ' ' + message + seps[i % 2]
140
+ else:
141
+ ret += tag
142
+ return ret
143
+ elif self.sep_style == SeparatorStyle.CHATGLM:
144
+ # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
145
+ # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
146
+ round_add_n = 1 if self.name == 'chatglm2' else 0
147
+ if system_prompt:
148
+ ret = system_prompt + self.sep
149
+ else:
150
+ ret = ''
151
+
152
+ for i, (role, message) in enumerate(self.messages):
153
+ if i % 2 == 0:
154
+ ret += f'[Round {i//2 + round_add_n}]{self.sep}'
155
+
156
+ if message:
157
+ ret += f'{role}:{message}{self.sep}'
158
+ else:
159
+ ret += f'{role}:'
160
+ return ret
161
+ elif self.sep_style == SeparatorStyle.CHATML:
162
+ ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
163
+ for role, message in self.messages:
164
+ if message:
165
+ ret += role + '\n' + message + self.sep + '\n'
166
+ else:
167
+ ret += role + '\n'
168
+ return ret
169
+ elif self.sep_style == SeparatorStyle.CHATGLM3:
170
+ ret = ''
171
+ if self.system_message:
172
+ ret += system_prompt
173
+ for role, message in self.messages:
174
+ if message:
175
+ ret += role + '\n' + ' ' + message
176
+ else:
177
+ ret += role
178
+ return ret
179
+ elif self.sep_style == SeparatorStyle.CHATINTERN:
180
+ # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
181
+ seps = [self.sep, self.sep2]
182
+ ret = system_prompt
183
+ for i, (role, message) in enumerate(self.messages):
184
+ # if i % 2 == 0:
185
+ # ret += "<s>"
186
+ if message:
187
+ ret += role + ':' + message + seps[i % 2] + '\n'
188
+ else:
189
+ ret += role + ':'
190
+ return ret
191
+ elif self.sep_style == SeparatorStyle.DOLLY:
192
+ seps = [self.sep, self.sep2]
193
+ ret = system_prompt
194
+ for i, (role, message) in enumerate(self.messages):
195
+ if message:
196
+ ret += role + ':\n' + message + seps[i % 2]
197
+ if i % 2 == 1:
198
+ ret += '\n\n'
199
+ else:
200
+ ret += role + ':\n'
201
+ return ret
202
+ elif self.sep_style == SeparatorStyle.PHOENIX:
203
+ ret = system_prompt
204
+ for role, message in self.messages:
205
+ if message:
206
+ ret += role + ': ' + '<s>' + message + '</s>'
207
+ else:
208
+ ret += role + ': ' + '<s>'
209
+ return ret
210
+ elif self.sep_style == SeparatorStyle.ROBIN:
211
+ ret = system_prompt + self.sep
212
+ for role, message in self.messages:
213
+ if message:
214
+ ret += role + ':\n' + message + self.sep
215
+ else:
216
+ ret += role + ':\n'
217
+ return ret
218
+ elif self.sep_style == SeparatorStyle.FALCON_CHAT:
219
+ ret = ''
220
+ if self.system_message:
221
+ ret += system_prompt + self.sep
222
+ for role, message in self.messages:
223
+ if message:
224
+ ret += role + ': ' + message + self.sep
225
+ else:
226
+ ret += role + ':'
227
+
228
+ return ret
229
+ elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
230
+ seps = [self.sep, self.sep2]
231
+ ret = self.system_message + seps[0]
232
+ for i, (role, message) in enumerate(self.messages):
233
+ if message:
234
+ ret += role + ': ' + message + seps[i % 2]
235
+ else:
236
+ ret += role + ':'
237
+ return ret
238
+ elif self.sep_style == SeparatorStyle.MPT:
239
+ ret = system_prompt + self.sep
240
+ for role, message in self.messages:
241
+ if message:
242
+ if type(message) is tuple:
243
+ message, _, _ = message
244
+ ret += role + message + self.sep
245
+ else:
246
+ ret += role
247
+ return ret
248
+ else:
249
+ raise ValueError(f'Invalid style: {self.sep_style}')
250
+
251
+ def set_system_message(self, system_message: str):
252
+ """Set the system message."""
253
+ self.system_message = system_message
254
+
255
+ def append_message(self, role: str, message: str):
256
+ """Append a new message."""
257
+ self.messages.append([role, message])
258
+
259
+ def update_last_message(self, message: str):
260
+ """Update the last output.
261
+ The last message is typically set to be None when constructing the prompt,
262
+ so we need to update it in-place after getting the response from a model.
263
+ """
264
+ self.messages[-1][1] = message
265
+
266
+ def to_gradio_chatbot(self):
267
+ """Convert the conversation to gradio chatbot format."""
268
+ ret = []
269
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
270
+ if i % 2 == 0:
271
+ ret.append([msg, None])
272
+ else:
273
+ ret[-1][-1] = msg
274
+ return ret
275
+
276
+ def to_openai_api_messages(self):
277
+ """Convert the conversation to OpenAI chat completion format."""
278
+ ret = [{'role': 'system', 'content': self.system_message}]
279
+
280
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
281
+ if i % 2 == 0:
282
+ ret.append({'role': 'user', 'content': msg})
283
+ else:
284
+ if msg is not None:
285
+ ret.append({'role': 'assistant', 'content': msg})
286
+ return ret
287
+
288
+ def copy(self):
289
+ return Conversation(
290
+ name=self.name,
291
+ system_template=self.system_template,
292
+ system_message=self.system_message,
293
+ roles=self.roles,
294
+ messages=[[x, y] for x, y in self.messages],
295
+ offset=self.offset,
296
+ sep_style=self.sep_style,
297
+ sep=self.sep,
298
+ sep2=self.sep2,
299
+ stop_str=self.stop_str,
300
+ stop_token_ids=self.stop_token_ids,
301
+ )
302
+
303
+ def dict(self):
304
+ return {
305
+ 'template_name': self.name,
306
+ 'system_message': self.system_message,
307
+ 'roles': self.roles,
308
+ 'messages': self.messages,
309
+ 'offset': self.offset,
310
+ }
311
+
312
+
313
+ # A global registry for all conversation templates
314
+ conv_templates: Dict[str, Conversation] = {}
315
+
316
+
317
+ def register_conv_template(template: Conversation, override: bool = False):
318
+ """Register a new conversation template."""
319
+ if not override:
320
+ assert (
321
+ template.name not in conv_templates
322
+ ), f'{template.name} has been registered.'
323
+
324
+ conv_templates[template.name] = template
325
+
326
+
327
+ def get_conv_template(name: str) -> Conversation:
328
+ """Get a conversation template."""
329
+ return conv_templates[name].copy()
330
+
331
+
332
+ # Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
333
+ # is that during training, the preprocessing function for the Hermes-2 template doesn't add
334
+ # <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
335
+ # Therefore, they are completely equivalent during inference.
336
+ register_conv_template(
337
+ Conversation(
338
+ name='Hermes-2',
339
+ system_template='<|im_start|>system\n{system_message}',
340
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
341
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
342
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
343
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
344
+ sep_style=SeparatorStyle.MPT,
345
+ sep='<|im_end|>',
346
+ stop_str='<|endoftext|>',
347
+ )
348
+ )
349
+
350
+
351
+ register_conv_template(
352
+ Conversation(
353
+ name='internlm2-chat',
354
+ system_template='<|im_start|>system\n{system_message}',
355
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
356
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
357
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
358
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
359
+ sep_style=SeparatorStyle.MPT,
360
+ sep='<|im_end|>',
361
+ )
362
+ )
363
+
364
+
365
+ register_conv_template(
366
+ Conversation(
367
+ name='phi3-chat',
368
+ system_template='<|system|>\n{system_message}',
369
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
370
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
371
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
372
+ roles=('<|user|>\n', '<|assistant|>\n'),
373
+ sep_style=SeparatorStyle.MPT,
374
+ sep='<|end|>',
375
+ )
376
+ )
377
+
378
+
379
+ register_conv_template(
380
+ Conversation(
381
+ name='internvl2_5',
382
+ system_template='<|im_start|>system\n{system_message}',
383
+ system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
384
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
385
+ sep_style=SeparatorStyle.MPT,
386
+ sep='<|im_end|>\n',
387
+ )
388
+ )
389
+
390
+ register_conv_template(
391
+ Conversation(
392
+ name='yuan-chat',
393
+ system_template='<|im_start|>system\n{system_message}',
394
+ system_message='你是IEI-源多模态模型,英文名是YuanVL,是由浪潮信息开发的多模态大语言模型。',
395
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
396
+ sep_style=SeparatorStyle.MPT,
397
+ sep='<|im_end|>\n',
398
+ )
399
+ )
flash_attention.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py
2
+ import torch
3
+ import torch.nn as nn
4
+ from einops import rearrange
5
+
6
+ try: # v1
7
+ from flash_attn.flash_attn_interface import \
8
+ flash_attn_unpadded_qkvpacked_func
9
+ except: # v2
10
+ from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
11
+
12
+ from flash_attn.bert_padding import pad_input, unpad_input
13
+
14
+
15
+ class FlashAttention(nn.Module):
16
+ """Implement the scaled dot product attention with softmax.
17
+ Arguments
18
+ ---------
19
+ softmax_scale: The temperature to use for the softmax attention.
20
+ (default: 1/sqrt(d_keys) where d_keys is computed at
21
+ runtime)
22
+ attention_dropout: The dropout rate to apply to the attention
23
+ (default: 0.0)
24
+ """
25
+
26
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
27
+ super().__init__()
28
+ self.softmax_scale = softmax_scale
29
+ self.dropout_p = attention_dropout
30
+
31
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
32
+ max_s=None, need_weights=False):
33
+ """Implements the multihead softmax attention.
34
+ Arguments
35
+ ---------
36
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
37
+ if unpadded: (nnz, 3, h, d)
38
+ key_padding_mask: a bool tensor of shape (B, S)
39
+ """
40
+ assert not need_weights
41
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
42
+ assert qkv.is_cuda
43
+
44
+ if cu_seqlens is None:
45
+ batch_size = qkv.shape[0]
46
+ seqlen = qkv.shape[1]
47
+ if key_padding_mask is None:
48
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
49
+ max_s = seqlen
50
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
51
+ device=qkv.device)
52
+ output = flash_attn_unpadded_qkvpacked_func(
53
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
54
+ softmax_scale=self.softmax_scale, causal=causal
55
+ )
56
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
57
+ else:
58
+ nheads = qkv.shape[-2]
59
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
60
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
61
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
62
+ output_unpad = flash_attn_unpadded_qkvpacked_func(
63
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
64
+ softmax_scale=self.softmax_scale, causal=causal
65
+ )
66
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
67
+ indices, batch_size, seqlen),
68
+ 'b s (h d) -> b s h d', h=nheads)
69
+ else:
70
+ assert max_s is not None
71
+ output = flash_attn_unpadded_qkvpacked_func(
72
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
73
+ softmax_scale=self.softmax_scale, causal=causal
74
+ )
75
+
76
+ return output, None
modeling_intern_vit.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2023 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import torch.utils.checkpoint
11
+ from einops import rearrange
12
+ from timm.models.layers import DropPath
13
+ from torch import nn
14
+ from transformers.activations import ACT2FN
15
+ from transformers.modeling_outputs import (BaseModelOutput,
16
+ BaseModelOutputWithPooling)
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import logging
19
+
20
+ from .configuration_intern_vit import InternVisionConfig
21
+
22
+ try:
23
+ from flash_attention import FlashAttention
24
+ has_flash_attn = True
25
+ except:
26
+ print('FlashAttention is not installed.')
27
+ has_flash_attn = False
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class InternRMSNorm(nn.Module):
34
+ def __init__(self, hidden_size, eps=1e-6):
35
+ super().__init__()
36
+ self.weight = nn.Parameter(torch.ones(hidden_size))
37
+ self.variance_epsilon = eps
38
+
39
+ def forward(self, hidden_states):
40
+ input_dtype = hidden_states.dtype
41
+ hidden_states = hidden_states.to(torch.float32)
42
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
43
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
44
+ return self.weight * hidden_states.to(input_dtype)
45
+
46
+
47
+ try:
48
+ from apex.normalization import FusedRMSNorm
49
+
50
+ InternRMSNorm = FusedRMSNorm # noqa
51
+
52
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
53
+ except ImportError:
54
+ # using the normal InternRMSNorm
55
+ pass
56
+ except Exception:
57
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
58
+ pass
59
+
60
+
61
+ NORM2FN = {
62
+ 'rms_norm': InternRMSNorm,
63
+ 'layer_norm': nn.LayerNorm,
64
+ }
65
+
66
+
67
+ class InternVisionEmbeddings(nn.Module):
68
+ def __init__(self, config: InternVisionConfig):
69
+ super().__init__()
70
+ self.config = config
71
+ self.embed_dim = config.hidden_size
72
+ self.image_size = config.image_size
73
+ self.patch_size = config.patch_size
74
+
75
+ self.class_embedding = nn.Parameter(
76
+ torch.randn(1, 1, self.embed_dim),
77
+ )
78
+
79
+ self.patch_embedding = nn.Conv2d(
80
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
81
+ )
82
+
83
+ self.num_patches = (self.image_size // self.patch_size) ** 2
84
+ self.num_positions = self.num_patches + 1
85
+
86
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
87
+
88
+ def _get_pos_embed(self, pos_embed, H, W):
89
+ target_dtype = pos_embed.dtype
90
+ pos_embed = pos_embed.float().reshape(
91
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
92
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\
93
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
94
+ return pos_embed
95
+
96
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
97
+ target_dtype = self.patch_embedding.weight.dtype
98
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
99
+ batch_size, _, height, width = patch_embeds.shape
100
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
101
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
102
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
103
+ position_embedding = torch.cat([
104
+ self.position_embedding[:, :1, :],
105
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
106
+ ], dim=1)
107
+ embeddings = embeddings + position_embedding.to(target_dtype)
108
+ return embeddings
109
+
110
+
111
+ class InternAttention(nn.Module):
112
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
113
+
114
+ def __init__(self, config: InternVisionConfig):
115
+ super().__init__()
116
+ self.config = config
117
+ self.embed_dim = config.hidden_size
118
+ self.num_heads = config.num_attention_heads
119
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
120
+ self.use_flash_attn = True # modify
121
+ if config.use_flash_attn and not has_flash_attn:
122
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
123
+ self.head_dim = self.embed_dim // self.num_heads
124
+ if self.head_dim * self.num_heads != self.embed_dim:
125
+ raise ValueError(
126
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
127
+ f' {self.num_heads}).'
128
+ )
129
+
130
+ self.scale = self.head_dim ** -0.5
131
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
132
+ self.attn_drop = nn.Dropout(config.attention_dropout)
133
+ self.proj_drop = nn.Dropout(config.dropout)
134
+
135
+ self.qk_normalization = config.qk_normalization
136
+
137
+ if self.qk_normalization:
138
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
139
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
140
+
141
+ if self.use_flash_attn:
142
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
143
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
144
+
145
+ def _naive_attn(self, x):
146
+ B, N, C = x.shape
147
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
148
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
149
+
150
+ if self.qk_normalization:
151
+ B_, H_, N_, D_ = q.shape
152
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
153
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
154
+
155
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
156
+ attn = attn.softmax(dim=-1)
157
+ attn = self.attn_drop(attn)
158
+
159
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
160
+ x = self.proj(x)
161
+ x = self.proj_drop(x)
162
+ return x
163
+
164
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
165
+ qkv = self.qkv(x)
166
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
167
+
168
+ if self.qk_normalization:
169
+ q, k, v = qkv.unbind(2)
170
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
171
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
172
+ qkv = torch.stack([q, k, v], dim=2)
173
+
174
+ context, _ = self.inner_attn(
175
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
176
+ )
177
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
178
+ outs = self.proj_drop(outs)
179
+ return outs
180
+
181
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
182
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
183
+ return x
184
+
185
+
186
+ class InternMLP(nn.Module):
187
+ def __init__(self, config: InternVisionConfig):
188
+ super().__init__()
189
+ self.config = config
190
+ self.act = ACT2FN[config.hidden_act]
191
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
192
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
193
+
194
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
195
+ hidden_states = self.fc1(hidden_states)
196
+ hidden_states = self.act(hidden_states)
197
+ hidden_states = self.fc2(hidden_states)
198
+ return hidden_states
199
+
200
+
201
+ class InternVisionEncoderLayer(nn.Module):
202
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
203
+ super().__init__()
204
+ self.embed_dim = config.hidden_size
205
+ self.intermediate_size = config.intermediate_size
206
+ self.norm_type = config.norm_type
207
+
208
+ self.attn = InternAttention(config)
209
+ self.mlp = InternMLP(config)
210
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
211
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
212
+
213
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
214
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
215
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
216
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
217
+
218
+ def forward(
219
+ self,
220
+ hidden_states: torch.Tensor,
221
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
222
+ """
223
+ Args:
224
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
225
+ """
226
+
227
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
228
+
229
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
230
+
231
+ return hidden_states
232
+
233
+
234
+ class InternVisionEncoder(nn.Module):
235
+ """
236
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
237
+ [`InternEncoderLayer`].
238
+
239
+ Args:
240
+ config (`InternConfig`):
241
+ The corresponding vision configuration for the `InternEncoder`.
242
+ """
243
+
244
+ def __init__(self, config: InternVisionConfig):
245
+ super().__init__()
246
+ self.config = config
247
+ # stochastic depth decay rule
248
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
249
+ self.layers = nn.ModuleList([
250
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
251
+ self.gradient_checkpointing = True
252
+
253
+ def forward(
254
+ self,
255
+ inputs_embeds,
256
+ output_hidden_states: Optional[bool] = None,
257
+ return_dict: Optional[bool] = None,
258
+ ) -> Union[Tuple, BaseModelOutput]:
259
+ r"""
260
+ Args:
261
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
262
+ Embedded representation of the inputs. Should be float, not int tokens.
263
+ output_hidden_states (`bool`, *optional*):
264
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
265
+ for more detail.
266
+ return_dict (`bool`, *optional*):
267
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
268
+ """
269
+ output_hidden_states = (
270
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
271
+ )
272
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
273
+
274
+ encoder_states = () if output_hidden_states else None
275
+ hidden_states = inputs_embeds
276
+
277
+ for idx, encoder_layer in enumerate(self.layers):
278
+ if output_hidden_states:
279
+ encoder_states = encoder_states + (hidden_states,)
280
+ if self.gradient_checkpointing and self.training:
281
+ layer_outputs = torch.utils.checkpoint.checkpoint(
282
+ encoder_layer,
283
+ hidden_states)
284
+ else:
285
+ layer_outputs = encoder_layer(
286
+ hidden_states,
287
+ )
288
+ hidden_states = layer_outputs
289
+ #import pdb
290
+ #pdb.set_trace()
291
+
292
+ if output_hidden_states:
293
+ encoder_states = encoder_states + (hidden_states,)
294
+
295
+ if not return_dict:
296
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
297
+ return BaseModelOutput(
298
+ last_hidden_state=hidden_states, hidden_states=encoder_states
299
+ )
300
+
301
+
302
+ class InternVisionModel(PreTrainedModel):
303
+ main_input_name = 'pixel_values'
304
+ config_class = InternVisionConfig
305
+ _no_split_modules = ['InternVisionEncoderLayer']
306
+
307
+ def __init__(self, config: InternVisionConfig):
308
+ super().__init__(config)
309
+ self.config = config
310
+
311
+ self.embeddings = InternVisionEmbeddings(config)
312
+ self.encoder = InternVisionEncoder(config)
313
+
314
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
315
+ pos_emb = self.embeddings.position_embedding
316
+ _, num_positions, embed_dim = pos_emb.shape
317
+ cls_emb = pos_emb[:, :1, :]
318
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
319
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
320
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
321
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
322
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
323
+ self.embeddings.image_size = new_size
324
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
325
+
326
+ def get_input_embeddings(self):
327
+ return self.embeddings
328
+
329
+ def forward(
330
+ self,
331
+ pixel_values: Optional[torch.FloatTensor] = None,
332
+ output_hidden_states: Optional[bool] = None,
333
+ return_dict: Optional[bool] = None,
334
+ pixel_embeds: Optional[torch.FloatTensor] = None,
335
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
336
+ output_hidden_states = (
337
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
338
+ )
339
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
340
+
341
+ if pixel_values is None and pixel_embeds is None:
342
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
343
+
344
+ if pixel_embeds is not None:
345
+ hidden_states = pixel_embeds
346
+ else:
347
+ if len(pixel_values.shape) == 4:
348
+ hidden_states = self.embeddings(pixel_values)
349
+ else:
350
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
351
+ encoder_outputs = self.encoder(
352
+ inputs_embeds=hidden_states,
353
+ output_hidden_states=output_hidden_states,
354
+ return_dict=return_dict,
355
+ )
356
+ last_hidden_state = encoder_outputs.last_hidden_state
357
+ pooled_output = last_hidden_state[:, 0, :]
358
+
359
+ if not return_dict:
360
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
361
+
362
+ return BaseModelOutputWithPooling(
363
+ last_hidden_state=last_hidden_state,
364
+ pooler_output=pooled_output,
365
+ hidden_states=encoder_outputs.hidden_states,
366
+ attentions=encoder_outputs.attentions,
367
+ )
modeling_yuanlm2.py ADDED
@@ -0,0 +1,1639 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Yuan model."""
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+ import torch.nn.functional as F
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import einsum, nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+ from transformers.activations import ACT2FN
29
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
30
+ from transformers.modeling_utils import PreTrainedModel
31
+ from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
32
+ from configuration_yuan import YuanConfig
33
+ from einops import rearrange
34
+ # from flash_attn import flash_attn_varlen_func as flash_attn_unpadded_func
35
+ #from apex.normalization import MixedFusedRMSNorm as RMSNorm
36
+ #from flash_attn import flash_attn_func
37
+ from transformer_engine.pytorch import RMSNorm
38
+ import pdb
39
+ import copy
40
+ try:
41
+ import grouped_gemm as gg
42
+ except ImportError:
43
+ gg = None
44
+ try:
45
+ from flash_attn import flash_attn_varlen_func as flash_attn_unpadded_func
46
+ from flash_attn import flash_attn_func
47
+ except ImportError:
48
+ flash_attn_unpadded_func = None
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CONFIG_FOR_DOC = "YuanConfig"
54
+
55
+ """
56
+ class YuanRotaryEmbedding(nn.Module):
57
+ def __init__(self, dim, base=10000, dtype=torch.float32, device=None, scaling_factor=1.0, rope_type='default'):
58
+ super().__init__()
59
+ inv_freq = (1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))).to(dtype)#.to('cuda:1')
60
+ self.register_buffer('inv_freq', inv_freq)
61
+
62
+ def forward(self, max_seq_len, offset=0):
63
+ self.inv_freq = self.inv_freq.to(torch.float32)
64
+ seq = torch.arange(max_seq_len, device=self.inv_freq.device) + offset
65
+ freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
66
+ # first part even vector components, second part odd vector components,
67
+ # 2 * dim in dimension size
68
+ emb = torch.cat((freqs, freqs), dim=-1)
69
+ # emb [seq_length, .., dim]
70
+ return emb[:, None, None, :]"""
71
+
72
+ class YuanRotaryEmbedding(nn.Module):
73
+ def __init__(self, dim, base=10000, dtype=torch.float32, rotary_interleaved=False, seq_len_interpolation_factor=None):
74
+ super().__init__()
75
+ self.base = base
76
+ self.dim = dim
77
+ self.rotary_interleaved = rotary_interleaved
78
+ self.seq_len_interpolation_factor = seq_len_interpolation_factor
79
+
80
+ def get_rotary_seq_len(
81
+ self,
82
+ inference_param=None,
83
+ transformer_input: torch.Tensor=None,
84
+ transformer_config=None,
85
+ ):
86
+ if inference_param is not None:
87
+ rotary_seq_len = inference_param.max_sequence_length
88
+ else:
89
+ rotary_seq_len = transformer_input.size[0]
90
+ if transformer_config.sequence_parallel:
91
+ rotary_seq_len *= transformer_config.tensor_model_parallel_size
92
+
93
+ return rotary_seq_len
94
+
95
+ def forward(self, max_seq_len, offset=0):
96
+
97
+ """Forward pass of RoPE embedding.
98
+
99
+ Args:
100
+ max_seq_len (int): Maximum size of sequence
101
+ offset (int, optional): _description_. Defaults to 0.
102
+
103
+ Returns:
104
+ Tensor: Embeddings after applying RoPE.
105
+ """
106
+ inv_freq = (1.0 / ( self.base**(torch.arange(0, self.dim, 2, dtype=torch.float32, device=torch.cuda.current_device()) / self.dim))).to(torch.float32)
107
+
108
+ #max_seq_len_int = max_seq_len.item() if max_seq_len.numel() == 1 else max_seq_len.max().item()
109
+ seq = (
110
+ torch.arange(max_seq_len, device=inv_freq.device, dtype=inv_freq.dtype)
111
+ + offset
112
+ )
113
+
114
+ if self.seq_len_interpolation_factor is not None:
115
+ seq *= 1 / self.seq_len_interpolation_factor
116
+
117
+ freqs = torch.outer(seq, inv_freq)
118
+ # first part even vector components, second part odd vector components,
119
+ # 2 * dim in dimension size
120
+ if not self.rotary_interleaved:
121
+ emb = torch.cat((freqs, freqs), dim=-1)
122
+ else:
123
+ emb = torch.stack((freqs.view(-1, 1), freqs.view(-1, 1)), dim=-1).view(
124
+ freqs.shape[0], -1
125
+ )
126
+ # emb [seq_length, .., dim]
127
+ emb = emb[:, None, None, :]
128
+ #emb = emb[:, None, :]
129
+ return emb
130
+
131
+
132
+ def _rotate_half(x, rotary_interleaved):
133
+ """huggingface version
134
+ change sign so the last dimension becomes [-odd, +even]
135
+
136
+ x1, x2 = torch.chunk(x, 2, dim=-1)
137
+ return torch.cat((-x2, x1), dim=-1)
138
+ """
139
+ if not rotary_interleaved:
140
+ x1, x2 = torch.chunk(x, 2, dim=-1)
141
+ return torch.cat((-x2, x1), dim=-1)
142
+ else:
143
+ x1 = x[:, :, :, ::2]
144
+ x2 = x[:, :, :, 1::2]
145
+ x_new = torch.stack((-x2, x1), dim=-1)
146
+ return x_new.view(x_new.shape[0], x_new.shape[1], x_new.shape[2], -1)
147
+
148
+ def apply_rotary_pos_emb(t, freqs, position_ids, rotary_interleaved=False):
149
+
150
+ rot_dim = freqs.shape[-1]
151
+ #if position_ids.shape[1] > 1:
152
+ freqs = freqs[position_ids]
153
+ freqs = freqs.view(t.shape[1],freqs.shape[1],freqs.shape[2],freqs.shape[4]).transpose(0,1)
154
+ # ideally t_pass is empty so rotary pos embedding is applied to all tensor t
155
+ t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
156
+
157
+ # first part is cosine component
158
+ # second part is sine component, need to change signs with _rotate_half method
159
+ t_type = t.dtype
160
+ cos_ = torch.cos(freqs).to(t.dtype)
161
+ sin_ = torch.sin(freqs).to(t.dtype)
162
+
163
+ t = (t * cos_) + (_rotate_half(t, rotary_interleaved) * sin_)
164
+ return torch.cat((t, t_pass), dim=-1)
165
+ """huggingface version
166
+ input tensor t is of shape [seq_length, ..., dim]
167
+ rotary positional embeding tensor freqs is of shape [seq_length, ..., dim]
168
+ check https://kexue.fm/archives/8265 for detailed formulas
169
+
170
+ dtype = t.dtype
171
+ rot_dim = freqs.shape[-1]
172
+ t_pass = t[..., rot_dim:]
173
+ if position_ids.shape[1] > 1:
174
+ freqs = freqs[position_ids]
175
+ freqs = freqs.view(t.shape[1],freqs.shape[1],freqs.shape[2],freqs.shape[4]).transpose(0,1)
176
+ # ideally t_pass is empty so rotary pos embedding is applied to all tensor t
177
+ t = t[..., :rot_dim]
178
+ # first part is cosine component
179
+ # second part is sine component, need to change signs with _rotate_half method
180
+ t = (t * freqs.cos()) + (_rotate_half(t) * freqs.sin())
181
+ t = t.to(dtype)
182
+ """
183
+
184
+ return torch.cat((t, t_pass), dim=-1)
185
+
186
+ class LocalizedFiltering(torch.nn.Module):
187
+ """
188
+ Mega's Exponential Moving Average layer, largely left unmodified from the original repo with the exception of
189
+ variable names and moving away from the stateful representation of incremental decoding state. See
190
+ "https://arxiv.org/abs/2209.10655" for more details.
191
+ """
192
+
193
+ def __init__(self, hidden_size, lf_conv2d_group, lf_conv2d_num_pad):
194
+ super().__init__()
195
+
196
+ self.embed_dim = hidden_size
197
+ self.lf_conv2d_group = lf_conv2d_group
198
+ self.lf_conv2d_num_pad = lf_conv2d_num_pad
199
+ if self.lf_conv2d_num_pad == 1:
200
+ self.training = True
201
+ self.conv1 = torch.nn.Conv2d(self.embed_dim, self.embed_dim // 2, (2, 1), stride=(1, 1), padding=(self.lf_conv2d_num_pad, 0), groups=self.lf_conv2d_group)
202
+ self.conv2 = torch.nn.Conv2d(self.embed_dim // 2, self.embed_dim, (2, 1), stride=(1, 1), padding=(self.lf_conv2d_num_pad, 0), groups=self.lf_conv2d_group)
203
+ self.output_layernorm = RMSNorm(self.embed_dim, eps=1e-6)
204
+
205
+ def _train_forward(self, inputs):
206
+ inputs = inputs.transpose(0,1)
207
+ seq_len, bsz, embed_dim = inputs.size()
208
+ if embed_dim != self.embed_dim:
209
+ raise ValueError(
210
+ f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}"
211
+ )
212
+ residual = inputs
213
+
214
+ inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
215
+ output1 = self.conv1(inputs)
216
+ output1 = output1[:, :, :seq_len, :]
217
+
218
+ output2 = self.conv2(output1)
219
+ output2 = output2[:, :, :seq_len, :].permute(2, 3, 0, 1).contiguous()
220
+ output2 = output2.view(seq_len, bsz, embed_dim)
221
+ assert output2.shape == residual.shape
222
+
223
+ torch.cuda.set_device(output2.device)
224
+ lf_output = self.output_layernorm(output2 + residual)
225
+ lf_output = lf_output.transpose(0,1)
226
+ return lf_output
227
+
228
+ def _inference_forward(self, inputs, before_hidden_states):
229
+
230
+ if before_hidden_states is None:
231
+ residual = inputs
232
+ seq_len, bsz, embed_dim = inputs.size()
233
+
234
+ inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
235
+
236
+ pad_zero1 = torch.zeros(bsz, embed_dim, 1, 1).to(inputs)
237
+ inputs = torch.cat((pad_zero1, inputs), dim=2).contiguous()
238
+ output1 = self.conv1(inputs)
239
+
240
+ pad_zero2 = torch.zeros(bsz, embed_dim // 2, 1, 1).to(output1)
241
+ output1 = torch.cat((pad_zero2, output1), dim=2).contiguous()
242
+ output2 = self.conv2(output1)
243
+
244
+ output2 = output2.permute(2, 3, 0, 1).contiguous()
245
+
246
+ output2 = output2.view(seq_len, bsz, embed_dim)
247
+
248
+ assert output2.shape == residual.shape
249
+
250
+ lf_output = self.output_layernorm(output2 + residual)
251
+
252
+ else:
253
+ residual = inputs
254
+
255
+ seq_len, bsz, embed_dim = inputs.size()
256
+ seq_len_before, _, _ = before_hidden_states.size()
257
+
258
+ assert seq_len == 1 and seq_len_before == 2
259
+
260
+ inputs = torch.cat((before_hidden_states, inputs), dim=0)
261
+ inputs = inputs.view(3, 1, bsz, embed_dim).permute(2, 3, 0, 1)
262
+
263
+ output1 = self.conv1(inputs)
264
+ output2 = self.conv2(output1)
265
+ output2 = output2.view(1, bsz, embed_dim)
266
+
267
+ assert output2.shape == residual.shape
268
+
269
+ lf_output = self.output_layernorm(output2 + residual)
270
+
271
+ return lf_output
272
+ '''#IEIyuan huggingface version
273
+ if before_hidden_states == None:
274
+ inputs = inputs.transpose(0,1)
275
+ seq_len, bsz, embed_dim = inputs.size()
276
+ if embed_dim != self.embed_dim:
277
+ raise ValueError(
278
+ f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}"
279
+ )
280
+ residual = inputs
281
+ inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
282
+ inputs = torch.cat((torch.zeros(bsz, embed_dim, 1, 1, dtype=inputs.dtype, device=inputs.device), inputs), dim=2).contiguous()
283
+ output1 = self.conv1(inputs)
284
+
285
+ output1 = torch.cat((torch.zeros(bsz, embed_dim // 2, 1, 1, dtype=inputs.dtype, device=inputs.device), output1), dim=2).contiguous()
286
+ output2 = self.conv2(output1).permute(2, 3, 0, 1).contiguous()
287
+ output2 = output2.view(seq_len, bsz, embed_dim)
288
+ assert output2.shape == residual.shape
289
+ norm_input = (output2 + residual)#.to('cuda:0')
290
+ torch.cuda.set_device(norm_input.device)
291
+ lf_output = self.output_layernorm(norm_input)
292
+ lf_output = lf_output#.to('cuda:1')
293
+ lf_output = lf_output.transpose(0,1)
294
+ return lf_output
295
+ else:
296
+ inputs = inputs.transpose(0,1)
297
+ before_hidden_states = before_hidden_states.transpose(0,1)
298
+ seq_len, bsz, embed_dim = inputs.size()
299
+ if embed_dim != self.embed_dim:
300
+ raise ValueError(
301
+ f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}"
302
+ )
303
+ residual = inputs
304
+ inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
305
+ before_hidden_states = before_hidden_states.view(2, 1, bsz, embed_dim).permute(2, 3, 0, 1)
306
+ inputs = torch.cat((before_hidden_states, inputs), dim=2).contiguous()
307
+ output1 = self.conv1(inputs)
308
+ output2 = self.conv2(output1).permute(2, 3, 0, 1).contiguous()
309
+ output2 = output2.view(seq_len, bsz, embed_dim)
310
+ assert output2.shape == residual.shape
311
+
312
+ norm_input = (output2 + residual)#.to('cuda:0')
313
+ torch.cuda.set_device(norm_input.device)
314
+ lf_output = self.output_layernorm(norm_input)
315
+ lf_output = lf_output#.to('cuda:1')
316
+ lf_output = lf_output.transpose(0,1)
317
+ return lf_output
318
+ '''
319
+
320
+
321
+ def forward(
322
+ self,
323
+ inputs,
324
+ before_hidden_states = None,
325
+ ) -> torch.Tensor:
326
+ # assert self.lf_conv2d_num_pad == 1
327
+ if self.training:
328
+ lf_output = self._train_forward(inputs)
329
+ else:
330
+ lf_output = self._inference_forward(inputs, before_hidden_states)
331
+
332
+ return lf_output
333
+
334
+
335
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
336
+ def _make_causal_mask(
337
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
338
+ ):
339
+ """
340
+ Make causal mask used for bi-directional self-attention.
341
+ """
342
+ bsz, tgt_len = input_ids_shape
343
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
344
+ mask_cond = torch.arange(mask.size(-1), device=device)
345
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
346
+ mask = mask.to(dtype)
347
+
348
+ if past_key_values_length > 0:
349
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
350
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
351
+
352
+
353
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
354
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
355
+ """
356
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
357
+ """
358
+ bsz, src_len = mask.size()
359
+ tgt_len = tgt_len if tgt_len is not None else src_len
360
+
361
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
362
+
363
+ inverted_mask = 1.0 - expanded_mask
364
+
365
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
366
+
367
+
368
+ class YuanRMSNorm(nn.Module):
369
+ def __init__(self, hidden_size, eps=1e-6):
370
+ """
371
+ YuanRMSNorm is equivalent to LlamaRMSNorm
372
+ """
373
+ super().__init__()
374
+ self.weight = nn.Parameter(torch.ones(hidden_size))
375
+ self.variance_epsilon = eps
376
+
377
+ def forward(self, hidden_states):
378
+ input_dtype = hidden_states.dtype
379
+ hidden_states = hidden_states.to(torch.float32)
380
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
381
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
382
+ return self.weight * hidden_states.to(input_dtype)
383
+
384
+ # flash attn
385
+ class FlashSelfAttention(torch.nn.Module):
386
+ """Implement the scaled dot product attention with softmax.
387
+ Arguments
388
+ ---------
389
+ softmax_scale: The temperature to use for the softmax attention.
390
+ (default: 1/sqrt(d_keys) where d_keys is computed at
391
+ runtime)
392
+ attention_dropout: The dropout rate to apply to the attention
393
+ (default: 0.0)
394
+ """
395
+ def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
396
+ device=None, dtype=None):
397
+ super().__init__()
398
+ assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, '
399
+ 'e.g., with pip install flash-attn')
400
+ assert rearrange is not None, 'Please install einops first, e.g., with pip install einops'
401
+ self.causal = causal
402
+ self.softmax_scale = softmax_scale
403
+ self.dropout_p = attention_dropout
404
+
405
+ def forward(self, q, k, v):
406
+ """Implements the multihead softmax attention.
407
+ Arguments
408
+ ---------
409
+ q, k, v: The tensor containing the query, key, and value. (B, S, H, D)
410
+ """
411
+
412
+ assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q,k,v)))
413
+ assert all((i.is_cuda for i in (q,k,v)))
414
+
415
+ batch_size, seqlen_q = q.shape[1], q.shape[0]
416
+ seqlen_k = k.shape[0]
417
+ q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]]
418
+ cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q.device)
419
+ if self.training:
420
+ # during training q,k,v always have same seqlen
421
+ assert seqlen_k == seqlen_q
422
+ is_causal = self.causal
423
+ cu_seqlens_k = cu_seqlens_q
424
+ dropout_p = self.dropout_p
425
+ else:
426
+ # turn off FA causal mask after first inference autoregressive iteration
427
+ # only on first autoregressive step q,k,v have same seqlen
428
+ is_causal = seqlen_q == seqlen_k
429
+ cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=q.device)
430
+ #cu_seqlens_q = [cu_seqlens_q[0], cu_seqlens_q[-1]]
431
+ #cu_seqlens_k = [cu_seqlens_k[0], cu_seqlens_k[-1]]
432
+ dropout_p = 0
433
+
434
+ output = flash_attn_unpadded_func(q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, dropout_p, softmax_scale=self.softmax_scale, causal=is_causal)
435
+
436
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
437
+ return output
438
+
439
+ class ParallelAttention_router(nn.Module):
440
+ def __init__(self, config):
441
+ super(ParallelAttention_router, self).__init__()
442
+ layer_number=0
443
+ self.layer_number = max(1, layer_number)
444
+
445
+ self.hidden_size = config.hidden_size
446
+ self.projection_size = config.moe_config['moe_num_experts']
447
+
448
+ self.num_attention_router_heads = config.moe_config['num_attention_router_heads']
449
+ self.hidden_size_per_attention_head = config.max_position_embeddings // self.num_attention_router_heads
450
+ self.query_key_value = nn.Linear(self.hidden_size, self.projection_size*3, bias=False)
451
+
452
+ def forward(self, hidden_states, attention_mask=None, enc_position_ids=None,
453
+ encoder_output=None, inference_params=None,
454
+ rotary_pos_emb=None):
455
+ is_first_step = False
456
+ before_hidden_states = None
457
+
458
+ #mixed_x_layer = torch.matmul(hidden_states, self.query_key_value)
459
+ mixed_x_layer = self.query_key_value(hidden_states)
460
+ (query_layer, key_layer, value_layer) = torch.split(mixed_x_layer, self.projection_size, -1)
461
+ b, s, z = query_layer.shape
462
+
463
+ # use fp32 router
464
+ query_layer = query_layer.float().view(b,s,z,1)
465
+ key_layer = key_layer.float().view(b,s,z,1)
466
+ value_layer = value_layer.float().view(b,s,z,1)
467
+
468
+ attn_weights = torch.matmul(query_layer, key_layer.transpose(2, 3))
469
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
470
+ attn_output = torch.matmul(attn_weights, value_layer)
471
+ router_output = attn_output.view(-1, z)
472
+ return router_output
473
+
474
+ class YuanExpertMLP(nn.Module):
475
+ def __init__(self, config):
476
+ super(YuanExpertMLP, self).__init__()
477
+ self.gated_linear_unit = config.moe_config['gated_linear_unit']
478
+ #self.ffn_hidden_size = config.moe_config['ffn_hidden_size']
479
+ self.ffn_hidden_size = config.ffn_hidden_size
480
+
481
+
482
+ if self.gated_linear_unit:
483
+ self.w1 = nn.Linear(config.hidden_size, self.ffn_hidden_size*2, bias=False)
484
+
485
+ else:
486
+ self.w1 = nn.Linear(config.hidden_size, self.ffn_hidden_size, bias=False)
487
+
488
+ self.act_fn = ACT2FN[config.hidden_act]
489
+ self.w2 = nn.Linear(self.ffn_hidden_size, config.hidden_size, bias=False)
490
+
491
+
492
+ def forward(self, x):
493
+ x = self.w1(x)
494
+ if self.gated_linear_unit:
495
+ x = torch.chunk(x, 2, dim=-1)
496
+ x = self.act_fn(x[0]) * x[1]
497
+ else:
498
+ x = self.act_fn(x)
499
+ x = self.w2(x)
500
+ return x
501
+
502
+
503
+
504
+ class YuanMLP(nn.Module):
505
+ def __init__(
506
+ self,
507
+ hidden_size: int,
508
+ intermediate_size: int,
509
+ hidden_act: str
510
+ ):
511
+ super().__init__()
512
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
513
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
514
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
515
+ self.act_fn = ACT2FN[hidden_act]
516
+
517
+ def forward(self, x):
518
+ return self.down_proj(self.gate_proj(x) * self.act_fn(self.up_proj(x)))
519
+
520
+
521
+ class YuanAttention(nn.Module):
522
+ """Localized Filtering-based Attention 'YUAN 2.0: A Large Language Model with Localized Filtering-based Attention' paper"""
523
+
524
+ def __init__(self, config: YuanConfig):
525
+ super().__init__()
526
+ self.config = config
527
+ self.hidden_size = config.hidden_size
528
+ self.num_heads = config.num_attention_heads
529
+ self.lf_conv2d_group = config.lf_conv2d_group
530
+ self.lf_conv2d_num_pad = config.lf_conv2d_num_pad
531
+
532
+ try:
533
+ self.attention_projection_size = config.attention_projection_size
534
+ except:
535
+ self.attention_projection_size = None
536
+
537
+ if self.attention_projection_size is None:
538
+ self.head_dim = self.hidden_size // self.num_heads
539
+ else:
540
+ self.head_dim = self.attention_projection_size // self.num_heads
541
+
542
+ self.max_position_embeddings = config.max_position_embeddings
543
+ self.causal_mask = config.causal_mask
544
+ self.attn_mask_type = config.attn_mask_type
545
+ self.softmax_scale = 1.0 / math.sqrt(self.head_dim)
546
+ self.use_flash_attention = config.use_flash_attention
547
+ try:
548
+ self.use_shareqk = config.use_shareqk
549
+ except Exception as e:
550
+ self.use_shareqk=False
551
+ self.dropout = 0.0
552
+ self.attention_projection_size = config.attention_projection_size
553
+ self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
554
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
555
+
556
+ if self.use_shareqk:
557
+ self.qk_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
558
+ self.qk_weight = nn.Parameter(torch.Tensor(2, self.hidden_size))
559
+ self.qk_bias = nn.Parameter(torch.Tensor(2, self.hidden_size))
560
+ else:
561
+ self.lf_gate = LocalizedFiltering(self.hidden_size, self.lf_conv2d_group, self.lf_conv2d_num_pad)
562
+ self.get_query_key = nn.Linear(self.hidden_size, 2 * self.attention_projection_size, bias=False)
563
+ self.core_attention = FlashSelfAttention(causal=True, attention_dropout=config.attn_dropout, softmax_scale=self.softmax_scale)
564
+ #self.core_attention_flash = DotProductAttention(num_attention_heads=self.num_heads,
565
+ # kv_channels=self.head_dim)
566
+
567
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
568
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
569
+
570
+ def forward(
571
+ self,
572
+ hidden_states: torch.Tensor,
573
+ attention_mask: Optional[torch.Tensor] = None,
574
+ position_ids: Optional[torch.LongTensor] = None,
575
+ position_ids_k: Optional[torch.LongTensor] = None,
576
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
577
+ rotary_pos_emb: Optional[Tuple[torch.Tensor]] = None,
578
+ output_attentions: bool = False,
579
+ use_cache: bool = False,
580
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
581
+
582
+ q_len, bsz, _ = hidden_states.size()
583
+ hidden_states = hidden_states#.to('cuda:1')
584
+ is_first_step = False
585
+ import pdb
586
+ if use_cache:
587
+ if past_key_value is None:
588
+ before_hidden_states = None
589
+ is_first_step = True
590
+ if q_len > 1:
591
+ inference_hidden_states_memory = hidden_states[-2:, :, :]
592
+ else:
593
+ inference_hidden_states_memory = torch.cat((torch.zeros_like(hidden_states), hidden_states), dim=0)
594
+ else:
595
+ before_hidden_states = past_key_value[2]
596
+ inference_hidden_states_memory = torch.cat((before_hidden_states[-1:, :, :], hidden_states), dim=0)
597
+ value_states = self.v_proj(hidden_states).view(q_len, bsz, self.num_heads, self.head_dim)
598
+ if self.use_shareqk:
599
+ qk_states = self.qk_proj(hidden_states).view(q_len, bsz, self.num_heads*self.head_dim)
600
+ query_key = qk_states.unsqueeze(2) * self.qk_weight + self.qk_bias
601
+ query_states, key_states = torch.unbind(query_key, dim=2)
602
+
603
+ query_states = query_states.view(q_len, bsz, self.num_heads, self.head_dim).transpose(1, 2)
604
+ key_states = key_states.view(q_len, bsz, self.num_heads, self.head_dim).transpose(1, 2)
605
+ else:
606
+ hidden_states = self.lf_gate(hidden_states, before_hidden_states)
607
+ mixed_qk_layer = self.get_query_key(hidden_states)
608
+ #mixed_qk_layer = torch.matmul(hidden_states, qk_tensor)
609
+ new_tensor_shape = mixed_qk_layer.size()[:-1] + (self.num_heads, 2 * self.head_dim)
610
+ mixed_qk_layer = mixed_qk_layer.view(*new_tensor_shape)
611
+ (query_states, key_states) = torch.split(mixed_qk_layer, self.head_dim, dim=-1)
612
+
613
+
614
+ kv_seq_len = key_states.shape[1]
615
+ if past_key_value is not None:
616
+ kv_seq_len += past_key_value[0].shape[1]
617
+
618
+ # duplicate the pos_emb for self attention
619
+ if rotary_pos_emb is not None:
620
+ if position_ids.shape[1] == 1:
621
+ q_seq_start = position_ids[0,-1]
622
+ #seq_start = past_key_value[0].shape[0]
623
+ q_seq_end = q_seq_start + 1
624
+ k_seq_end = q_seq_end
625
+ else:
626
+ q_seq_start = 0
627
+ q_seq_end = q_seq_start+key_states.shape[0]
628
+ k_seq_end = q_seq_end
629
+
630
+ rotary_pos_shape = rotary_pos_emb.shape
631
+ if isinstance(rotary_pos_emb, tuple):
632
+ rotary_pos_emb = rotary_pos_emb
633
+ else:
634
+ rotary_pos_emb = ((rotary_pos_emb,) * 2)
635
+ q_pos_emb, k_pos_emb = rotary_pos_emb
636
+ #q_pos_emb = q_pos_emb[q_seq_start:q_seq_end]
637
+ #k_pos_emb = k_pos_emb[:k_seq_end]
638
+ #import pdb
639
+ #pdb.set_trace()
640
+ if past_key_value is not None:
641
+ # reuse k, v, self_attention
642
+ key_states = torch.cat([past_key_value[0], key_states], dim=0)
643
+ value_states = torch.cat([past_key_value[1], value_states], dim=0)
644
+ past_key_value = (key_states, value_states, inference_hidden_states_memory) if use_cache else None
645
+ #query_states = apply_rotary_pos_emb(query_states.permute(1, 0, 2, 3), q_pos_emb, position_ids)
646
+ #key_states = apply_rotary_pos_emb(key_states.permute(1, 0, 2, 3), k_pos_emb, position_ids)
647
+ query_states = apply_rotary_pos_emb(query_states, q_pos_emb, position_ids)
648
+ key_states = apply_rotary_pos_emb(key_states, k_pos_emb, position_ids_k)
649
+
650
+ attn_weights = None
651
+ #query_states = query_states.transpose(0,1)
652
+ #key_states = key_states.transpose(0,1)
653
+ #value_states = value_states
654
+ attn_output = self.core_attention(query_states, key_states, value_states)
655
+ #attn_output = self.core_attention(query_states, key_states, value_states, attention_mask)
656
+ q_len, bsz, _, _ = attn_output.shape
657
+ attn_output = attn_output.reshape(q_len, bsz, -1)
658
+
659
+ attn_output = self.o_proj(attn_output)
660
+
661
+ return attn_output, attn_weights, past_key_value
662
+
663
+ class MoEDroplessTokenDispatcher:
664
+ def __init__(self, num_experts: int, config: YuanConfig) -> None:
665
+ self.num_experts = num_experts
666
+ assert self.num_experts > 0, "Expected at least one expert"
667
+ self.router_topk = config.moe_config['moe_top_k']
668
+
669
+ def token_permutation(
670
+ self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor
671
+ ):
672
+ self.hidden_shape = hidden_states.shape
673
+ hidden_states = hidden_states.view(-1, self.hidden_shape[-1])
674
+
675
+ if self.router_topk > 1:
676
+ global_local_map = torch.ones_like(max_ind).bool()
677
+ local_indices = max_ind.masked_select(global_local_map)
678
+ local_probs = max_prob.masked_select(global_local_map)
679
+ global_local_map = global_local_map.nonzero()[:, 0]
680
+ global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1])
681
+ local_hidden_states = torch.gather(hidden_states, 0, global_local_map)
682
+
683
+ indices = torch.argsort(local_indices, dim=0)
684
+ tokens_per_expert = torch.histc(
685
+ local_indices,
686
+ bins=self.num_experts,
687
+ min=0,
688
+ max=self.num_experts - 1,
689
+ )
690
+ tokens_per_expert = tokens_per_expert.cpu().to(torch.long)
691
+
692
+ indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1])
693
+ permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices)
694
+ return (permuted_local_hidden_states, tokens_per_expert, local_probs, indices, global_local_map)
695
+
696
+ def token_unpermutation(
697
+ self,
698
+ hidden_states: torch.Tensor,
699
+ scores: torch.Tensor,
700
+ indices: torch.Tensor,
701
+ global_local_map: torch.Tensor = None,
702
+ ):
703
+ scores = scores.to(dtype=hidden_states.dtype)
704
+ unpermuted_local_hidden = torch.zeros_like(hidden_states)
705
+ assert indices.shape == hidden_states.shape, f'{indices.shape}, {hidden_states.shape}'
706
+ unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states)
707
+
708
+ if self.router_topk > 1:
709
+ unpermuted_local_hidden = unpermuted_local_hidden * scores.view(-1, 1)
710
+ unpermuted_local_bias = None
711
+ output_total = unpermuted_local_hidden
712
+ output_bias_total = unpermuted_local_bias
713
+
714
+ if self.router_topk > 1:
715
+ global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1]
716
+ global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]]
717
+ unpermuted_global_hidden = torch.zeros(
718
+ global_hidden_shape,
719
+ dtype=hidden_states.dtype,
720
+ device=hidden_states.device,
721
+ )
722
+ output_total = unpermuted_global_hidden.scatter_add(
723
+ 0, global_local_map, unpermuted_local_hidden
724
+ )
725
+
726
+ output_total = output_total.view(self.hidden_shape)
727
+
728
+ return output_total
729
+
730
+ class GroupedMLP(nn.Module):
731
+ """An efficient implementation of the Experts layer using CUTLASS GroupedGEMM.
732
+
733
+ This class is designed to execute multiple experts in parallel, thereby maximizing computational efficiency.
734
+ """
735
+
736
+ def __init__(self, num_experts: int, config: YuanConfig):
737
+ super().__init__()
738
+ self.num_experts = num_experts
739
+ self.config = config
740
+
741
+ def glu(x):
742
+ x = torch.chunk(x, 2, dim=-1)
743
+ return torch.nn.functional.silu(x[0]) * x[1]
744
+
745
+ self.activation_func = glu
746
+ #self.ffn_hidden_size = config.moe_config['ffn_hidden_size']
747
+ self.ffn_hidden_size = config.ffn_hidden_size
748
+ fc1_output_size_per_partition = self.ffn_hidden_size * 2
749
+ fc2_input_size = self.ffn_hidden_size
750
+
751
+ self.w1 = nn.ModuleList([nn.Linear(self.config.hidden_size, self.ffn_hidden_size * 2, bias=False) for _ in range(num_experts)])
752
+ self.w2 = nn.ModuleList([nn.Linear(self.ffn_hidden_size, self.config.hidden_size, bias=False) for _ in range(num_experts)])
753
+ def forward(self, permuted_hidden_states, tokens_per_expert):
754
+ torch.cuda.set_device(permuted_hidden_states.device)
755
+ permuted_hidden_states = permuted_hidden_states#.to('cuda:0')
756
+ #fc1_output = gg.ops.gmm(permuted_hidden_states, self.weight1, tokens_per_expert.cpu(), trans_b=False)
757
+
758
+ #intermediate_parallel = self.activation_func(fc1_output)
759
+ #fc2_output = gg.ops.gmm(intermediate_parallel, self.weight2, tokens_per_expert.cpu(), trans_b=False)
760
+
761
+ fc2_outputs = []
762
+ start_idx = 0
763
+ for i in range(self.num_experts):
764
+ if tokens_per_expert[i] == 0:
765
+ continue
766
+ end_idx = start_idx + tokens_per_expert[i]
767
+ #fc1_output = torch.matmul(permuted_hidden_states[start_idx:end_idx], self.w1[i])
768
+ # Use custom attributes for each expert's Linear layers
769
+
770
+ fc1_output = self.w1[i](permuted_hidden_states[start_idx:end_idx])
771
+ #print("shape1:", self.w1[i].shape, "shape2:", permuted_hidden_states[start_idx:end_idx].shape)
772
+ intermediate_parallel = self.activation_func(fc1_output)
773
+ #fc2_output = torch.matmul(intermediate_parallel, self.w2[i])
774
+ fc2_output = self.w2[i](intermediate_parallel)
775
+ fc2_outputs.append(fc2_output)
776
+ start_idx = end_idx
777
+ fc2_output = torch.cat(fc2_outputs, dim=0)
778
+ return fc2_output#.to('cuda:1')
779
+
780
+ class YuanMoeLayer(nn.Module):
781
+ def __init__(self, config:YuanConfig):
782
+ super().__init__()
783
+ self.config = config
784
+ self.num_experts = config.moe_config['moe_num_experts']
785
+ self.top_k = config.moe_config['moe_top_k']
786
+ self.norm_topk_prob = config.moe_config['norm_topk_prob']
787
+ self.hidden_size = config.hidden_size
788
+
789
+ expert_indices_offset = (0)
790
+
791
+ #self.gate = ParallelAttention_router(config)
792
+ self.router = ParallelAttention_router(config)
793
+ self.token_dispatcher = MoEDroplessTokenDispatcher(self.num_experts, config=self.config)
794
+ self.experts = GroupedMLP(self.num_experts, self.config)
795
+
796
+ def routing(self, logits: torch.Tensor) -> torch.Tensor:
797
+ top_logits, indices = torch.topk(logits, k=self.top_k, dim=1)
798
+ scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits)
799
+ return scores, indices
800
+
801
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
802
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
803
+ #logits = self.gate(hidden_states)
804
+ logits = self.router(hidden_states)
805
+ scores, indices = self.routing(logits)
806
+ scores = scores.to(hidden_states.dtype)
807
+ (dispatched_input, tokens_per_expert, scores, indices, global_local_map, ) = self.token_dispatcher.token_permutation(hidden_states, scores, indices)
808
+ expert_output = self.experts(dispatched_input, tokens_per_expert)
809
+ output = self.token_dispatcher.token_unpermutation(expert_output, scores, indices, global_local_map)
810
+ return output
811
+
812
+ class YuanDecoderLayer(nn.Module):
813
+ def __init__(self, config: YuanConfig, num_layer):
814
+ super().__init__()
815
+ self.hidden_size = config.hidden_size
816
+ self.self_attn = YuanAttention(config=config)
817
+ self.num_layer = num_layer
818
+
819
+ if config.moe_config['moe_num_experts'] > 0:
820
+ self.mlp = YuanMoeLayer(config)
821
+ else:
822
+ self.mlp = YuanMLP(
823
+ hidden_size=self.hidden_size,
824
+ intermediate_size=config.intermediate_size,
825
+ hidden_act=config.hidden_act,
826
+ )
827
+
828
+
829
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
830
+ self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
831
+
832
+ def forward(
833
+ self,
834
+ hidden_states: torch.Tensor,
835
+ attention_mask: Optional[torch.Tensor] = None,
836
+ position_ids: Optional[torch.LongTensor] = None,
837
+ position_ids_k: Optional[torch.LongTensor] = None,
838
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
839
+ rotary_pos_emb: Optional[Tuple[torch.Tensor]] = None,
840
+ output_attentions: Optional[bool] = False,
841
+ use_cache: Optional[bool] = False,
842
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
843
+ """
844
+ Args:
845
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
846
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
847
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
848
+ output_attentions (`bool`, *optional*):
849
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
850
+ returned tensors for more detail.
851
+ use_cache (`bool`, *optional*):
852
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
853
+ (see `past_key_values`).
854
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
855
+ """
856
+ import pdb
857
+ residual = hidden_states#.to('cuda:1')
858
+ torch.cuda.set_device(hidden_states.device)
859
+ hidden_states = self.input_layernorm(hidden_states) #.to('cuda:0')).to('cuda:1')
860
+
861
+ # Self Attention
862
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
863
+ hidden_states=hidden_states,
864
+ attention_mask=attention_mask,
865
+ position_ids=position_ids,
866
+ position_ids_k=position_ids_k,
867
+ past_key_value=past_key_value,
868
+ rotary_pos_emb=rotary_pos_emb,
869
+ output_attentions=output_attentions,
870
+ use_cache=use_cache,
871
+ )
872
+
873
+ import pdb
874
+ #print(hidden_states)
875
+ #pdb.set_trace()
876
+ hidden_states = residual + hidden_states.permute(1, 0, 2)
877
+
878
+ # Fully Connected
879
+ residual = hidden_states#.to('cuda:1')
880
+ torch.cuda.set_device(hidden_states.device)
881
+ hidden_states = self.post_attention_layernorm(hidden_states) #.to('cuda:0')).to('cuda:1')
882
+ hidden_states = self.mlp(hidden_states)# .to('cuda:1')
883
+ hidden_states = residual + hidden_states
884
+ outputs = (hidden_states,)
885
+
886
+ if output_attentions:
887
+ outputs += (self_attn_weights,)
888
+
889
+ if use_cache:
890
+ outputs += (present_key_value,)
891
+
892
+ return outputs
893
+
894
+
895
+ YUAN_START_DOCSTRING = r"""
896
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
897
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
898
+ etc.)
899
+
900
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
901
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
902
+ and behavior.
903
+
904
+ Parameters:
905
+ config ([`YuanConfig`]):
906
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
907
+ load the weights associated with the model, only the configuration. Check out the
908
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
909
+ """
910
+
911
+
912
+ @add_start_docstrings(
913
+ "The bare Yuan Model outputting raw hidden-states without any specific head on top.",
914
+ YUAN_START_DOCSTRING,
915
+ )
916
+ class YuanPreTrainedModel(PreTrainedModel):
917
+ config_class = YuanConfig
918
+ base_model_prefix = "model"
919
+ supports_gradient_checkpointing = True
920
+ _no_split_modules = ["YuanDecoderLayer"]
921
+ _skip_keys_device_placement = "past_key_values"
922
+ _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
923
+
924
+ def _init_weights(self, module):
925
+ std = self.config.initializer_range
926
+ if isinstance(module, nn.Linear):
927
+ module.weight.data.normal_(mean=0.0, std=std)
928
+ if module.bias is not None:
929
+ module.bias.data.zero_()
930
+ elif isinstance(module, nn.Embedding):
931
+ module.weight.data.normal_(mean=0.0, std=std)
932
+ if module.padding_idx is not None:
933
+ module.weight.data[module.padding_idx].zero_()
934
+
935
+ def _set_gradient_checkpointing(self, module, value=False):
936
+ if isinstance(module, YuanModel):
937
+ module.gradient_checkpointing = value
938
+
939
+
940
+ YUAN_INPUTS_DOCSTRING = r"""
941
+ Args:
942
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
943
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
944
+ it.
945
+
946
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
947
+ [`PreTrainedTokenizer.__call__`] for details.
948
+
949
+ [What are input IDs?](../glossary#input-ids)
950
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
951
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
952
+
953
+ - 1 for tokens that are **not masked**,
954
+ - 0 for tokens that are **masked**.
955
+
956
+ [What are attention masks?](../glossary#attention-mask)
957
+
958
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
959
+ [`PreTrainedTokenizer.__call__`] for details.
960
+
961
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
962
+ `past_key_values`).
963
+
964
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
965
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
966
+ information on the default strategy.
967
+
968
+ - 1 indicates the head is **not masked**,
969
+ - 0 indicates the head is **masked**.
970
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
971
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
972
+ config.n_positions - 1]`.
973
+
974
+ [What are position IDs?](../glossary#position-ids)
975
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
976
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
977
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
978
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
979
+
980
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
981
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
982
+
983
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
984
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
985
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
986
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
987
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
988
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
989
+ model's internal embedding lookup matrix.
990
+ use_cache (`bool`, *optional*):
991
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
992
+ `past_key_values`).
993
+ output_attentions (`bool`, *optional*):
994
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
995
+ tensors for more detail.
996
+ output_hidden_states (`bool`, *optional*):
997
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
998
+ more detail.
999
+ return_dict (`bool`, *optional*):
1000
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1001
+ """
1002
+
1003
+
1004
+ @add_start_docstrings(
1005
+ "The bare Yuan Model outputting raw hidden-states without any specific head on top.",
1006
+ YUAN_START_DOCSTRING,
1007
+ )
1008
+ class YuanModel(YuanPreTrainedModel):
1009
+ """
1010
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`YuanDecoderLayer`]
1011
+
1012
+ Args:
1013
+ config: YuanConfig
1014
+ """
1015
+
1016
+ def __init__(self, config: YuanConfig):
1017
+ super().__init__(config)
1018
+ self.padding_idx = config.pad_token_id
1019
+ self.vocab_size = config.vocab_size
1020
+
1021
+ #TODO: control it by config
1022
+ self.eod_token = config.eod_token
1023
+ self.reset_attention_mask = config.reset_attention_mask
1024
+ self.reset_position_ids = config.reset_position_ids
1025
+ self.max_position_embeddings = config.max_position_embeddings
1026
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1027
+ self.layers = nn.ModuleList([YuanDecoderLayer(config, i) for i in range(config.num_hidden_layers)])
1028
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1029
+ self.gradient_checkpointing = False
1030
+ # Initialize weights and apply final processing
1031
+ self.post_init()
1032
+
1033
+ self.seq_length = config.max_position_embeddings
1034
+ rotary_dim = config.hidden_size // config.num_attention_heads
1035
+ if config.rotary_percent < 1.0:
1036
+ rotary_dim = int(rotary_dim * config.rotary_percent)
1037
+ self.rotary_pos_emb = YuanRotaryEmbedding(rotary_dim, base=config.rotary_base, dtype=config.torch_dtype)
1038
+
1039
+
1040
+ def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
1041
+ return self.embed_tokens(input_ids)
1042
+
1043
+ def set_input_embeddings(self, value):
1044
+ self.embed_tokens = value
1045
+
1046
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
1047
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
1048
+ # create causal mask
1049
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1050
+ combined_attention_mask = None
1051
+ if input_shape[-1] > 1:
1052
+ combined_attention_mask = _make_causal_mask(
1053
+ input_shape,
1054
+ inputs_embeds.dtype,
1055
+ device=inputs_embeds.device,
1056
+ past_key_values_length=past_key_values_length,
1057
+ )
1058
+
1059
+ if attention_mask is not None:
1060
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1061
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
1062
+ inputs_embeds.device
1063
+ )
1064
+ combined_attention_mask = (
1065
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
1066
+ )
1067
+
1068
+ return combined_attention_mask
1069
+
1070
+ def _prepare_decoder_attention_mask_training(self, input_id, inputs_embeds, eod_token, reset_mask_flag ,reset_attention_mask=True, reset_position_ids=True):
1071
+
1072
+ micro_batch_size, seq_length = input_id.size()
1073
+
1074
+ attention_mask = torch.tril(torch.ones(
1075
+ (micro_batch_size, seq_length, seq_length), device=inputs_embeds.device)).view(
1076
+ micro_batch_size, 1, seq_length, seq_length)
1077
+
1078
+ position_ids = torch.arange(seq_length, dtype=torch.long,
1079
+ device=inputs_embeds.device)
1080
+ position_ids = position_ids.unsqueeze(0).expand_as(input_id)
1081
+
1082
+ if reset_position_ids:
1083
+ position_ids = position_ids.clone()
1084
+
1085
+ if reset_position_ids or reset_attention_mask:
1086
+ # Loop through the batches:
1087
+ for b in range(micro_batch_size):
1088
+
1089
+ # Find indecies where EOD token is.
1090
+ eod_index = position_ids[b, input_id[b] == eod_token]
1091
+
1092
+ # Detach indecies from positions if going to modify positions.
1093
+ if reset_position_ids:
1094
+ eod_index = eod_index.clone()
1095
+ # Loop through EOD indecies:
1096
+ prev_index = 0
1097
+ for j in range(eod_index.size()[0]):
1098
+ i = eod_index[j]
1099
+ # Mask attention loss.
1100
+ if reset_attention_mask:
1101
+ attention_mask[b, 0, (i + 1):, :(i + 1)] = 0
1102
+ # Reset positions.
1103
+ if reset_position_ids:
1104
+ position_ids[b, (i + 1):] -= (i + 1 - prev_index)
1105
+ prev_index = i + 1
1106
+
1107
+ inverted_mask = 1 - attention_mask
1108
+ output_attn_mask = inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min)
1109
+ if reset_mask_flag:
1110
+ output_attn_mask = output_attn_mask[:,:,-1:,:]
1111
+ return output_attn_mask, position_ids
1112
+
1113
+ @add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
1114
+ def forward(
1115
+ self,
1116
+ input_ids: torch.LongTensor = None,
1117
+ attention_mask: Optional[torch.Tensor] = None,
1118
+ position_ids: Optional[torch.LongTensor] = None,
1119
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1120
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1121
+ use_cache: Optional[bool] = None,
1122
+ output_attentions: Optional[bool] = None,
1123
+ output_hidden_states: Optional[bool] = None,
1124
+ output_router_logits: Optional[bool] = None,
1125
+ return_dict: Optional[bool] = None,
1126
+ ) -> Union[Tuple, BaseModelOutputWithPast, torch.Tensor]:
1127
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1128
+ output_router_logits = (
1129
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1130
+ )
1131
+ output_hidden_states = (
1132
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1133
+ )
1134
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1135
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1136
+ input_ids1 = copy.deepcopy(input_ids)
1137
+ reset_mask_flag = False
1138
+ if past_key_values:
1139
+ input_ids = input_ids
1140
+ input_ids = input_ids[:,-1:]
1141
+ if use_cache:
1142
+ reset_mask_flag = True
1143
+ # retrieve input_ids and inputs_embeds
1144
+ if input_ids is not None and inputs_embeds is not None:
1145
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1146
+ elif input_ids is not None:
1147
+ input_ids = input_ids
1148
+ batch_size, seq_length = input_ids.shape
1149
+ elif inputs_embeds is not None:
1150
+ inputs_embeds = inputs_embeds.transpose(0,1)
1151
+ batch_size, seq_length, _ = inputs_embeds.shape
1152
+ else:
1153
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1154
+
1155
+ seq_length_with_past = seq_length
1156
+ past_key_values_length = 0
1157
+
1158
+ if past_key_values is not None:
1159
+ #past_key_values_length = past_key_values[0][0].shape[2]
1160
+ #modify
1161
+ past_key_values_length = past_key_values[0][0].shape[0]
1162
+ seq_length_with_past = seq_length_with_past + past_key_values_length
1163
+
1164
+ # modify to reset position ids
1165
+ if past_key_values is not None:
1166
+ pos_start = position_ids[:,-1]+1
1167
+ pos_end = pos_start+past_key_values[0][0].shape[0]-position_ids.shape[1]+1
1168
+ position_ids_k = torch.arange(pos_start.item(), pos_end.item()).to(position_ids.device)
1169
+ position_ids_k = position_ids_k.unsqueeze(0)
1170
+ position_ids_k = torch.cat((position_ids, position_ids_k), dim=1)
1171
+ position_ids = position_ids[:,-1]+past_key_values[0][0].shape[0]-position_ids.shape[1]+1
1172
+ position_ids = position_ids.unsqueeze(0)
1173
+ #print(position_ids_k,position_ids)
1174
+ #print(position_ids_k.shape,position_ids.shape)
1175
+ else:
1176
+ position_ids_k = position_ids
1177
+ #print(position_ids)
1178
+ #import pdb
1179
+ #pdb.set_trace()
1180
+
1181
+ if position_ids is None:
1182
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1183
+ position_ids = torch.arange(
1184
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1185
+ )
1186
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1187
+ else:
1188
+ #position_ids = position_ids.view(-1, seq_length).long()
1189
+ pass
1190
+
1191
+ if inputs_embeds is None:
1192
+ inputs_embeds = self.embed_tokens(input_ids).transpose(0,1)
1193
+
1194
+ if self.training or self.reset_position_ids:
1195
+ attention_mask, _ = self._prepare_decoder_attention_mask_training(input_ids1, inputs_embeds, self.eod_token, reset_mask_flag, self.reset_attention_mask, self.reset_position_ids)
1196
+ else:
1197
+ if attention_mask is None:
1198
+ attention_mask = torch.ones(
1199
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
1200
+ )
1201
+ attention_mask = self._prepare_decoder_attention_mask(
1202
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1203
+ )
1204
+
1205
+ #rotary_pos_emb = self.rotary_pos_emb(self.max_position_embeddings)
1206
+ # Rotary positional embeddings (embedding is None for PP intermediate devices)
1207
+ rotary_pos_emb = None
1208
+ '''
1209
+ rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len(
1210
+ transformer_input=inputs_embeds
1211
+ )
1212
+ '''
1213
+ rotary_pos_emb = self.rotary_pos_emb(self.max_position_embeddings)
1214
+
1215
+ hidden_states = inputs_embeds
1216
+ if self.gradient_checkpointing and self.training:
1217
+ if use_cache:
1218
+ logger.warning_once(
1219
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1220
+ )
1221
+ use_cache = False
1222
+
1223
+ # decoder layers
1224
+ all_hidden_states = () if output_hidden_states else None
1225
+ all_self_attns = () if output_attentions else None
1226
+ next_decoder_cache = () if use_cache else None
1227
+ position_ids = position_ids.cpu()
1228
+ position_ids_k = position_ids_k.cpu()
1229
+ for idx, decoder_layer in enumerate(self.layers):
1230
+ if output_hidden_states:
1231
+ all_hidden_states += (hidden_states,)
1232
+
1233
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1234
+
1235
+ if self.gradient_checkpointing and self.training:
1236
+ def create_custom_forward(module):
1237
+ def custom_forward(*inputs):
1238
+ # None for past_key_value
1239
+ return module(*inputs, output_attentions, None)
1240
+
1241
+ return custom_forward
1242
+
1243
+ layer_outputs = torch.utils.checkpoint.checkpoint(
1244
+ create_custom_forward(decoder_layer),
1245
+ hidden_states,
1246
+ attention_mask,
1247
+ position_ids,
1248
+ None,
1249
+ )
1250
+ else:
1251
+ layer_outputs = decoder_layer(
1252
+ hidden_states,
1253
+ attention_mask=attention_mask,
1254
+ position_ids=position_ids,
1255
+ position_ids_k=position_ids_k,
1256
+ past_key_value=past_key_value,
1257
+ rotary_pos_emb=rotary_pos_emb,
1258
+ output_attentions=output_attentions,
1259
+ use_cache=use_cache,
1260
+ )
1261
+ hidden_states = layer_outputs[0]
1262
+
1263
+ if use_cache:
1264
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
1265
+
1266
+ if output_attentions:
1267
+ all_self_attns += (layer_outputs[1],)
1268
+ hidden_states = hidden_states#.to('cuda:0')
1269
+ torch.cuda.set_device(hidden_states.device)
1270
+ hidden_states = self.norm(hidden_states)
1271
+ # add hidden states from the last decoder layer
1272
+ if output_hidden_states:
1273
+ all_hidden_states += (hidden_states,)
1274
+ next_cache = next_decoder_cache if use_cache else None
1275
+ if not return_dict:
1276
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1277
+ return BaseModelOutputWithPast(
1278
+ last_hidden_state=hidden_states,
1279
+ past_key_values=next_cache,
1280
+ hidden_states=all_hidden_states,
1281
+ attentions=all_self_attns,
1282
+ )
1283
+
1284
+
1285
+ class YuanForCausalLM(YuanPreTrainedModel):
1286
+ def __init__(self, config):
1287
+ super().__init__(config)
1288
+ '''
1289
+ self.eod_token = config.eod_token
1290
+ self.sep_token = config.sep_token
1291
+ self.use_loss_mask = config.use_loss_mask
1292
+ self.model = YuanModel(config)
1293
+
1294
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1295
+
1296
+ # Initialize weights and apply final processing
1297
+ self.post_init()
1298
+ '''
1299
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1300
+ self.model = YuanModel(config)
1301
+ #self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1302
+ self.post_init()
1303
+
1304
+ def get_input_embeddings(self):
1305
+ return self.model.embed_tokens
1306
+
1307
+ def set_input_embeddings(self, value):
1308
+ self.model.embed_tokens = value
1309
+
1310
+ def get_output_embeddings(self):
1311
+ return self.lm_head
1312
+
1313
+ def set_output_embeddings(self, new_embeddings):
1314
+ self.lm_head = new_embeddings
1315
+
1316
+ def set_decoder(self, decoder):
1317
+ self.model = decoder
1318
+
1319
+ def get_decoder(self):
1320
+ return self.model
1321
+
1322
+ def get_loss_mask(self, input_ids, labels, eod_token, sep_token):
1323
+ micro_batch_size, seq_length = input_ids.size()
1324
+ loss_mask = torch.ones(input_ids.size(), dtype=torch.float, device=input_ids.device)
1325
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
1326
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
1327
+
1328
+
1329
+ """modify loss_mask to only calculate the loss of the answer (separated with [SEP])"""
1330
+
1331
+ for b in range(micro_batch_size):
1332
+ eod_indexs = position_ids[b, input_ids[b] == eod_token]
1333
+ sep_indexs = position_ids[b, input_ids[b] == sep_token]
1334
+
1335
+ if len(eod_indexs) == 0 or len(sep_indexs) == 0:
1336
+ loss_mask[b] = 1.0
1337
+ else:
1338
+ if eod_indexs[0] > sep_indexs[0]:
1339
+ loss_mask[b, 0:sep_indexs[0]] = 0
1340
+
1341
+ if len(eod_indexs) == len(sep_indexs):
1342
+ for ii, eod_index in enumerate(eod_indexs):
1343
+ start_index = eod_index
1344
+ if ii == (len(sep_indexs) - 1):
1345
+ stop_index = seq_length
1346
+ else:
1347
+ stop_index = sep_indexs[ii + 1]
1348
+ loss_mask[b, start_index:stop_index] = 0.0
1349
+ else:
1350
+ if len(eod_indexs) > len(sep_indexs):
1351
+ loss_mask[b,:] = 1.0
1352
+ else:
1353
+ for ii, eod_index in enumerate(eod_indexs):
1354
+ start_index = eod_index
1355
+ stop_index = sep_indexs[ii + 1]
1356
+
1357
+ loss_mask[b, start_index:stop_index] = 0.0
1358
+
1359
+ elif eod_indexs[0] < sep_indexs[0]:
1360
+
1361
+ if len(eod_indexs) == len(sep_indexs):
1362
+ for ii, eod_index in enumerate(eod_indexs):
1363
+ start_index = eod_index
1364
+ stop_index = sep_indexs[ii]
1365
+ loss_mask[b, start_index:stop_index] = 0.0
1366
+
1367
+ else:
1368
+ if len(eod_indexs) < len(sep_indexs):
1369
+ loss_mask[b,:] = 1.0
1370
+ else:
1371
+ for ii, eod_index in enumerate(eod_indexs):
1372
+ start_index = eod_index
1373
+ if ii >= len(sep_indexs):
1374
+ stop_index = seq_length
1375
+ else:
1376
+ stop_index = sep_indexs[ii]
1377
+ loss_mask[b, start_index:stop_index] = 0.0
1378
+
1379
+ loss_mask[input_ids == eod_token] = 1.0
1380
+ return loss_mask
1381
+ @add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
1382
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1383
+ def forward(
1384
+ self,
1385
+ input_ids: torch.LongTensor = None,
1386
+ attention_mask: Optional[torch.Tensor] = None,
1387
+ position_ids: Optional[torch.LongTensor] = None,
1388
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1389
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1390
+ labels: Optional[torch.LongTensor] = None,
1391
+ use_cache: Optional[bool] = None,
1392
+ output_attentions: Optional[bool] = None,
1393
+ output_hidden_states: Optional[bool] = None,
1394
+ return_dict: Optional[bool] = None,
1395
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1396
+ """
1397
+ ## modify delete routers
1398
+ Args:
1399
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1400
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1401
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1402
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1403
+
1404
+ Returns:
1405
+
1406
+ Example:
1407
+
1408
+ ```python
1409
+ >>> from transformers import AutoTokenizer, YuanForCausalLM
1410
+
1411
+ >>> model = YuanForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1412
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1413
+
1414
+ >>> prompt = "Hey, are you consciours? Can you talk to me?"
1415
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1416
+
1417
+ >>> # Generate
1418
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1419
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1420
+ "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
1421
+ ```"""
1422
+
1423
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1424
+
1425
+ output_hidden_states = (
1426
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1427
+ )
1428
+
1429
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1430
+
1431
+ outputs = self.model(
1432
+ input_ids=input_ids,
1433
+ attention_mask=attention_mask,
1434
+ position_ids=position_ids,
1435
+ past_key_values=past_key_values,
1436
+ inputs_embeds=inputs_embeds,
1437
+ use_cache=use_cache,
1438
+ output_attentions=output_attentions,
1439
+ output_hidden_states=output_hidden_states,
1440
+ return_dict=return_dict,
1441
+ )
1442
+
1443
+ hidden_states = outputs[0].transpose(0,1)
1444
+
1445
+ logits = self.lm_head(hidden_states)
1446
+
1447
+ loss = None
1448
+ if labels is not None:
1449
+ if self.use_loss_mask:
1450
+ loss_mask = self.get_loss_mask(input_ids, labels, self.eod_token, self.sep_token)
1451
+ # Shift so that tokens < n predict n
1452
+ shift_logits = logits[..., :-1, :].contiguous()
1453
+ shift_labels = labels[..., 1:].contiguous()
1454
+ # Flatten the tokens
1455
+ if self.use_loss_mask:
1456
+ loss_fct = CrossEntropyLoss(reduction='none')
1457
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1458
+ shift_labels = shift_labels.view(-1)
1459
+ # Enable model parallelism
1460
+ shift_labels = shift_labels.to(shift_logits.device)
1461
+ loss = loss_fct(shift_logits, shift_labels)
1462
+ loss = torch.sum(loss * loss_mask) / loss_mask.sum()
1463
+ else:
1464
+ loss_fct = CrossEntropyLoss()
1465
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1466
+ shift_labels = shift_labels.view(-1)
1467
+ # Enable model parallelism
1468
+ shift_labels = shift_labels.to(shift_logits.device)
1469
+ loss = loss_fct(shift_logits, shift_labels)
1470
+ if not return_dict:
1471
+ output = (logits,) + outputs[1:]
1472
+ return (loss,) + output if loss is not None else output
1473
+
1474
+ return CausalLMOutputWithPast(
1475
+ loss=loss,
1476
+ logits=logits,
1477
+ past_key_values=outputs.past_key_values,
1478
+ hidden_states=hidden_states,
1479
+ attentions=outputs.attentions,
1480
+ )
1481
+
1482
+ def prepare_inputs_for_generation(
1483
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1484
+ ):
1485
+
1486
+ position_ids = kwargs.get("position_ids", None)
1487
+ if attention_mask is not None and position_ids is None:
1488
+ # create position_ids on the fly for batch generation
1489
+ position_ids = attention_mask.long().cumsum(-1) - 1
1490
+ position_ids.masked_fill_(attention_mask == 0, 1)
1491
+ if past_key_values:
1492
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1493
+
1494
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1495
+ if inputs_embeds is not None and past_key_values is None:
1496
+ model_inputs = {"inputs_embeds": inputs_embeds}
1497
+ else:
1498
+ model_inputs = {"input_ids": input_ids}
1499
+
1500
+ model_inputs.update(
1501
+ {
1502
+ "position_ids": position_ids,
1503
+ "past_key_values": past_key_values,
1504
+ "use_cache": kwargs.get("use_cache"),
1505
+ "attention_mask": attention_mask,
1506
+ }
1507
+ )
1508
+ return model_inputs
1509
+
1510
+ @staticmethod
1511
+ def _reorder_cache(past_key_values, beam_idx):
1512
+ reordered_past = ()
1513
+ for layer_past in past_key_values:
1514
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
1515
+ return reordered_past
1516
+
1517
+
1518
+ @add_start_docstrings(
1519
+ """
1520
+ The Yuan Model transformer with a sequence classification head on top (linear layer).
1521
+
1522
+ [`YuanForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1523
+ (e.g. GPT-2) do.
1524
+
1525
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1526
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1527
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1528
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1529
+ each row of the batch).
1530
+ """,
1531
+ YUAN_START_DOCSTRING,
1532
+ )
1533
+ class YuanForSequenceClassification(YuanPreTrainedModel):
1534
+ #_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
1535
+
1536
+ def __init__(self, config):
1537
+ super().__init__(config)
1538
+ self.num_labels = config.num_labels
1539
+ self.model = YuanModel(config)
1540
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1541
+
1542
+ # Initialize weights and apply final processing
1543
+ self.post_init()
1544
+
1545
+ def get_input_embeddings(self):
1546
+ return self.model.embed_tokens
1547
+
1548
+ def set_input_embeddings(self, value):
1549
+ self.model.embed_tokens = value
1550
+
1551
+ @add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
1552
+ def forward(
1553
+ self,
1554
+ input_ids: torch.LongTensor = None,
1555
+ attention_mask: Optional[torch.Tensor] = None,
1556
+ position_ids: Optional[torch.LongTensor] = None,
1557
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1558
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1559
+ labels: Optional[torch.LongTensor] = None,
1560
+ use_cache: Optional[bool] = None,
1561
+ output_attentions: Optional[bool] = None,
1562
+ output_hidden_states: Optional[bool] = None,
1563
+ return_dict: Optional[bool] = None,
1564
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1565
+ r"""
1566
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1567
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1568
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1569
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1570
+ """
1571
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1572
+ transformer_outputs = self.model(
1573
+ input_ids,
1574
+ attention_mask=attention_mask,
1575
+ position_ids=position_ids,
1576
+ past_key_values=past_key_values,
1577
+ inputs_embeds=inputs_embeds,
1578
+ use_cache=use_cache,
1579
+ output_attentions=output_attentions,
1580
+ output_hidden_states=output_hidden_states,
1581
+ return_dict=return_dict,
1582
+ )
1583
+ hidden_states = transformer_outputs[0]
1584
+ logits = self.score(hidden_states)
1585
+
1586
+ if input_ids is not None:
1587
+ batch_size = input_ids.shape[0]
1588
+ else:
1589
+ batch_size = inputs_embeds.shape[0]
1590
+
1591
+ if self.config.pad_token_id is None and batch_size != 1:
1592
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1593
+ if self.config.pad_token_id is None:
1594
+ sequence_lengths = -1
1595
+ else:
1596
+ if input_ids is not None:
1597
+ sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
1598
+ else:
1599
+ sequence_lengths = -1
1600
+
1601
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1602
+
1603
+ loss = None
1604
+ if labels is not None:
1605
+ labels = labels.to(logits.device)
1606
+ if self.config.problem_type is None:
1607
+ if self.num_labels == 1:
1608
+ self.config.problem_type = "regression"
1609
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1610
+ self.config.problem_type = "single_label_classification"
1611
+ else:
1612
+ self.config.problem_type = "multi_label_classification"
1613
+
1614
+ if self.config.problem_type == "regression":
1615
+ loss_fct = MSELoss()
1616
+ if self.num_labels == 1:
1617
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1618
+ else:
1619
+ loss = loss_fct(pooled_logits, labels)
1620
+ elif self.config.problem_type == "single_label_classification":
1621
+ loss_fct = CrossEntropyLoss()
1622
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1623
+ elif self.config.problem_type == "multi_label_classification":
1624
+ loss_fct = BCEWithLogitsLoss()
1625
+ loss = loss_fct(pooled_logits, labels)
1626
+ if not return_dict:
1627
+ output = (pooled_logits,) + transformer_outputs[1:]
1628
+ return ((loss,) + output) if loss is not None else output
1629
+
1630
+ return SequenceClassifierOutputWithPast(
1631
+ loss=loss,
1632
+ logits=pooled_logits,
1633
+ past_key_values=transformer_outputs.past_key_values,
1634
+ hidden_states=transformer_outputs.hidden_states,
1635
+ attentions=transformer_outputs.attentions,
1636
+ )
1637
+
1638
+
1639
+
modeling_yuanvl_chat.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # YuanVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import warnings
8
+ from typing import (Any, Callable, Iterable, List, Literal, Mapping, Optional,
9
+ Set, Tuple, Type, TypedDict, Union)
10
+
11
+ import torch.utils.checkpoint
12
+ import transformers
13
+ import torch
14
+ from torch import nn
15
+ from torch.nn import CrossEntropyLoss
16
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
17
+ LlamaTokenizer)
18
+ from transformers.modeling_outputs import CausalLMOutputWithPast
19
+ from transformers.modeling_utils import PreTrainedModel
20
+ from transformers.utils import ModelOutput, logging
21
+
22
+ from transformer_engine.pytorch import RMSNorm
23
+ from transformers.activations import ACT2FN
24
+
25
+ from .configuration_yuanvl import YuanVLChatConfig
26
+ from .conversation import get_conv_template
27
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
28
+ from .modeling_yuanlm2 import YuanForCausalLM
29
+ from .utils import flatten_bn, merge_multimodal_embeddings
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ class InternVLImagePixelInputs(TypedDict):
34
+ type: Literal["pixel_values"]
35
+ data: Union[torch.Tensor, List[torch.Tensor]]
36
+ """
37
+ Shape: `(batch_size, 1 + num_patches, num_channels, height, width)`
38
+
39
+ Note that `num_patches` may be different for each batch, in which case
40
+ the data is passed as a list instead of a batched tensor.
41
+ """
42
+ patches_per_image: List[int]
43
+ """
44
+ List of number of total patches for each image in the batch.
45
+ """
46
+
47
+
48
+ class InternVLImageEmbeddingInputs(TypedDict):
49
+ type: Literal["image_embeds"]
50
+ data: Any # in vllm vision this is a NestedTensors
51
+ """
52
+ A tensor of shape `(num_images, total_image_feature_size, hidden_size)`
53
+ or a list of tensors of shape `(total_image_feature_size, hidden_size)`
54
+
55
+ `hidden_size` must match the hidden size of language model backbone.
56
+ """
57
+
58
+
59
+ InternVLImageInputs = Union[InternVLImagePixelInputs,
60
+ InternVLImageEmbeddingInputs]
61
+
62
+
63
+ def version_cmp(v1, v2, op='eq'):
64
+ import operator
65
+
66
+ from packaging import version
67
+ op_func = getattr(operator, op)
68
+ return op_func(version.parse(v1), version.parse(v2))
69
+
70
+ class YuanImageMLP(nn.Module):
71
+
72
+ def __init__(
73
+ self,
74
+ hidden_size: int,
75
+ intermediate_size: int,
76
+ output_size: int,
77
+ hidden_act: str,
78
+ ) -> None:
79
+ super().__init__()
80
+ #self.up_proj = ColumnParallelLinear(hidden_size, intermediate_size, bias=False,)
81
+ #self.gate_proj = ColumnParallelLinear(hidden_size, intermediate_size, bias=False,)
82
+ #self.down_proj = RowParallelLinear(intermediate_size, output_size, bias=False,)
83
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
84
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
85
+ self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
86
+
87
+ if hidden_act != "silu":
88
+ raise ValueError(f"Unsupported activation: {hidden_act}. Only silu is supported for now.")
89
+
90
+ self.act_fn = ACT2FN[hidden_act]
91
+
92
+ @torch.compile
93
+ def swiglu(self, y_1, y_2):
94
+ return self.act_fn(y_1) * y_2
95
+
96
+ def forward(self, x):
97
+ #import pdb
98
+ x1 = self.up_proj(x)
99
+ x2 = self.gate_proj(x)
100
+ x3 = self.swiglu(x1, x2)
101
+ #x3 = self.act_fn(x1)
102
+ #x2 = self.gate_proj(x)
103
+ x = self.down_proj(x3)
104
+ return x
105
+
106
+ class YuanVLChatModel(PreTrainedModel):
107
+ config_class = YuanVLChatConfig
108
+ main_input_name = 'pixel_values'
109
+ base_model_prefix = 'language_model'
110
+ _supports_flash_attn_2 = True
111
+ _no_split_modules = ['InternVisionModel', 'YuanDeocderLayer']
112
+
113
+ def __init__(self, config: YuanVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
114
+ super().__init__(config)
115
+
116
+ assert version_cmp(transformers.__version__, '4.37.0', 'ge')
117
+ image_size = config.force_image_size or config.vision_config.image_size
118
+ patch_size = config.vision_config.patch_size
119
+ self.patch_size = patch_size
120
+ self.select_layer = config.select_layer
121
+ self.template = config.template
122
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
123
+ self.downsample_ratio = config.downsample_ratio
124
+ self.ps_version = config.ps_version
125
+ use_flash_attn = use_flash_attn if has_flash_attn else False
126
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
127
+ config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
128
+
129
+ logger.info(f'num_image_token: {self.num_image_token}')
130
+ logger.info(f'ps_version: {self.ps_version}')
131
+ if vision_model is not None:
132
+ self.vision_model = vision_model
133
+ else:
134
+ self.vision_model = InternVisionModel(config.vision_config)
135
+ if language_model is not None:
136
+ self.language_model = language_model
137
+ else:
138
+ if config.llm_config.architectures[0] == 'YuanForCausalLM':
139
+ self.language_model = YuanForCausalLM(config.llm_config)
140
+ else:
141
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
142
+
143
+ self.pixel_unshuffle = torch.nn.PixelUnshuffle(downscale_factor=2)
144
+ #vit_hidden_size = config.vision_config.hidden_size
145
+ #llm_hidden_size = config.llm_config.hidden_size
146
+ #vit_mlp_ffn_hidden_size = config.vit_mlp_ffn_hidden_size
147
+ #layernorm_epsilon = config.llm_config.layernorm_epsilon
148
+ layernorm_epsilon = config.llm_config.rms_norm_eps
149
+
150
+ self.imagemlp_input_hiddensize = int(config.vision_config.hidden_size / self.downsample_ratio ** 2)
151
+ self.imagemlp_ffn_hidden_size = config.llm_config.ffn_hidden_size
152
+
153
+ self.imagemlp = YuanImageMLP(self.imagemlp_input_hiddensize, self.imagemlp_ffn_hidden_size,
154
+ output_size=config.llm_config.hidden_size, hidden_act="silu")
155
+ self.imagemlp_layernorm = RMSNorm(config.llm_config.hidden_size, eps=layernorm_epsilon)
156
+
157
+ '''
158
+ # modify internvl vision
159
+ vit_hidden_size = config.vision_config.hidden_size
160
+ llm_hidden_size = config.llm_config.hidden_size
161
+ self.mlp1 = nn.Sequential(
162
+ nn.LayerNorm(vit_hidden_size * int(1/self.downsample_ratio) ** 2),
163
+ nn.Linear(vit_hidden_size * int(1/self.downsample_ratio) ** 2, llm_hidden_size),
164
+ nn.GELU(),
165
+ nn.Linear(llm_hidden_size, llm_hidden_size)
166
+ )
167
+ '''
168
+
169
+ self.img_context_token_id = config.img_context_token_id
170
+ self.conv_template = get_conv_template(self.template)
171
+ self.system_message = self.conv_template.system_message
172
+
173
+ def _validate_pixel_values(self,
174
+ data: Union[torch.Tensor, List[torch.Tensor]]
175
+ ) -> Union[torch.Tensor, List[torch.Tensor]]:
176
+
177
+ h = w = self.config.vision_config.image_size
178
+ expected_dims = (3, h, w)
179
+
180
+ def _validate_shape(d: torch.Tensor):
181
+ actual_dims = tuple(d.shape)
182
+ if actual_dims != expected_dims:
183
+ # expected_expr = ("num_patches", *map(str, expected_dims))
184
+ expected_expr = (expected_dims)
185
+ raise ValueError("The expected shape of pixel values in each batch element "
186
+ f" is {expected_expr}. You supplied {tuple(d.shape)}.")
187
+ # data的数据类型可以是tensor,也可以是List[tensor]
188
+ # 从这一段上来看,image tensor的个数为 imbs*num_images
189
+ for d in data:
190
+ _validate_shape(d)
191
+ return data
192
+
193
+
194
+
195
+ def _parse_and_validate_image_input(self,
196
+ pixel_values: List[torch.Tensor] = None,
197
+ image_token_id: torch.Tensor = None,
198
+ image_embeds: torch.Tensor = None,
199
+ ) -> Optional[InternVLImagePixelInputs]:
200
+ # 没有图像数据
201
+ if pixel_values is None and image_embeds is None:
202
+ return None
203
+
204
+ # 传入数据有image_embeds
205
+ if image_embeds is not None:
206
+ if not isinstance(image_embeds, torch.Tensor):
207
+ raise ValueError("Incorrect type of image embeddings. "
208
+ f"Got type: {type(image_embeds)}")
209
+ return InternVLImageEmbeddingInputs(
210
+ type="image_embeds",
211
+ data=flatten_bn(image_embeds),
212
+ )
213
+
214
+ #self.img_context_token_id = image_token_id[0]
215
+ if pixel_values is not None:
216
+ if not isinstance(pixel_values, (torch.Tensor, list)):
217
+ raise ValueError("Incorrect type of pixel values. "
218
+ f"Got type: {type(pixel_values)}")
219
+ patches_per_image = []
220
+ # bsz/request循环
221
+ for request_pixel_values in pixel_values:
222
+ # 每个request的images循环
223
+ patches_per_image.append(request_pixel_values.shape[0])
224
+
225
+ # We need to flatten (B, N, P) to (B*N*P)
226
+ # so we call flatten_bn twice.
227
+ # (total_patches, 3, h, w)
228
+ return InternVLImagePixelInputs(
229
+ type="pixel_values",
230
+ data=self._validate_pixel_values(flatten_bn(pixel_values)),
231
+ patches_per_image=patches_per_image)
232
+ raise AssertionError("This line should be unreachable")
233
+
234
+ def _process_image_input(
235
+ self,
236
+ image_input: InternVLImageInputs,
237
+ ) -> Tuple[torch.Tensor] :
238
+ if image_input["type"] == "image_embeds":
239
+ return image_input["data"]
240
+ assert self.vision_model is not None
241
+ # (total_patches, tokens_per_image, llm_config.hidden_size)
242
+ image_embeds = self.extract_feature(image_input["data"])
243
+
244
+ patches_per_image = image_input["patches_per_image"]
245
+
246
+ # Only one image in the current batch
247
+ # bsz=1的情况,直接返回image_embeds
248
+ if len(patches_per_image) == 1:
249
+ # 返回一个tensor,[1, num_patches*256, text_config.hidden_size]
250
+ image_embeds = image_embeds.view(-1, self.config.llm_config.hidden_size).unsqueeze(1)
251
+ return image_embeds
252
+ # NOTE: Image embeddings are split into separate tensors for each image
253
+ # by the size of each embedding.
254
+ # feature_size 每个patch 256个token位置
255
+ feature_size = image_embeds.shape[1]
256
+ # (total_image_tokens, llm_config.hidden_size)
257
+ image_embeds = image_embeds.view(-1, self.config.llm_config.hidden_size)
258
+ image_feature_sizes = [num_patches * feature_size for num_patches in patches_per_image]
259
+ # 切分后得到一个Tuple,元组每个元胞表示一个image的image_embed, [num_patches * 256, llm_config.hidden_size]
260
+ image_embeds = image_embeds.split(image_feature_sizes)
261
+
262
+ return image_embeds
263
+
264
+
265
+
266
+ def get_multimodal_embeddings(self,
267
+ pixel_values: Optional[List[torch.Tensor]] = None,
268
+ image_token_id: Optional[List[torch.Tensor]] = None,
269
+ image_embeds: Optional[List[torch.Tensor]] = None,
270
+ image_input: InternVLImageInputs = None,
271
+ ):
272
+ image_input = self._parse_and_validate_image_input(pixel_values, image_token_id, image_embeds)
273
+ if image_input is None:
274
+ return None
275
+
276
+ # image_input: (total_patches, 3, h, w)
277
+ vision_embeddings = self._process_image_input(image_input)
278
+ return vision_embeddings
279
+
280
+ def get_input_embeddings(
281
+ self,
282
+ input_ids: torch.Tensor,
283
+ multimodal_embeddings: Optional[torch.Tensor]
284
+ ) -> torch.Tensor:
285
+ # 生成 token_embeddings
286
+ inputs_embeds = self.language_model.model.get_input_embeddings(input_ids)
287
+ # 将image embed放到img_context_token_id的位置
288
+ if multimodal_embeddings is not None:
289
+ assert self.img_context_token_id is not None
290
+ # input_ids: torch.Tensor
291
+ # inputs_embeds: torch.Tensor
292
+ # multimodal_embeddings: torch.Tensor
293
+ # placeholder_token_id: img_context_token_id
294
+ inputs_embeds = merge_multimodal_embeddings(
295
+ input_ids, inputs_embeds, multimodal_embeddings,
296
+ self.img_context_token_id)
297
+ return inputs_embeds
298
+
299
+ def forward(
300
+ self,
301
+ input_ids: torch.LongTensor = None,
302
+ attention_mask: torch.Tensor = None,
303
+ position_ids: torch.LongTensor = None,
304
+ past_key_values: List[torch.FloatTensor] = None,
305
+ inputs_embeds: Optional[torch.FloatTensor] = None,
306
+ labels: Optional[torch.LongTensor] = None,
307
+ use_cache: Optional[bool] = None,
308
+ output_attentions: Optional[bool] = None,
309
+ output_hidden_states: Optional[bool] = None,
310
+ return_dict: Optional[bool] = None,
311
+ pixel_values: Optional[List[torch.Tensor]] = None,
312
+ image_token_id: Optional[List[torch.Tensor]] = None,
313
+ image_embeds: Optional[List[torch.Tensor]] = None,
314
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
315
+
316
+ import pdb
317
+ pdb.set_trace()
318
+ if inputs_embeds is None:
319
+ # (images, patches * token_per_image)
320
+ vision_embeddings = self.get_multimodal_embeddings(pixel_values, image_token_id, image_embeds)
321
+ # (tokens, hidden_size)
322
+ inputs_embeds = self.get_input_embeddings(input_ids, vision_embeddings).permute(1, 0, 2)
323
+ input_ids = None
324
+
325
+ hidden_states = self.language_model.model(input_ids, attention_mask, position_ids, past_key_values,
326
+ inputs_embeds, labels, use_cache, output_attentions,
327
+ output_hidden_states, return_dict)
328
+
329
+ return hidden_states
330
+
331
+ def pixel_shuffle(self, x, scale_factor=0.5):
332
+ n, w, h, c = x.size()
333
+ # N, W, H, C --> N, W, H * scale, C // scale
334
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
335
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
336
+ x = x.permute(0, 2, 1, 3).contiguous()
337
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
338
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
339
+ int(c / (scale_factor * scale_factor)))
340
+ if self.ps_version == 'v1':
341
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
342
+ 'which results in a transposed image.')
343
+ else:
344
+ x = x.permute(0, 2, 1, 3).contiguous()
345
+ return x
346
+
347
+ # Internvl vision
348
+ def extract_feature(self, pixel_values):
349
+ # pixel_values: (imbs * num_image, ic, ih, iw)
350
+ pixel_values = pixel_values.to(torch.bfloat16)
351
+ output = self.vision_model(pixel_values=pixel_values)
352
+ vit_embeds=output[0]
353
+ # vit_embeds: (imbs * num_images, h*w, vit_dim)
354
+ vit_embeds = vit_embeds[:, 1:, :]
355
+
356
+ '''h = w = int(vit_embeds.shape[1]**0.5)
357
+ # vit_embeds: (imbs * num_images, vit_dim, h, w)
358
+ vit_embeds = vit_embeds.view(vit_embeds.shape[0], h, w, -1)
359
+ vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
360
+ '''
361
+ pn, phw, pc = vit_embeds.shape
362
+ ph = pw = int(phw**0.5)
363
+ vit_embeds = vit_embeds.view(pn, ph, pw, pc).permute(0, 3, 1, 2)
364
+ vit_embeds = self.pixel_unshuffle(vit_embeds)
365
+ pn, pc, ph, pw = vit_embeds.shape
366
+ vit_embeds = vit_embeds.view(pn, pc, ph * pw).permute(0, 2, 1)
367
+ num_images, cvs, chs = vit_embeds.shape
368
+ #_, cvs, chs = vit_embeds.shape
369
+ #assert self.imagemlp_ffn_hidden_size == chs
370
+ #vit_embeds = vit_embeds.contiguous().view(imbs, num_image * cvs, chs).permute(1, 0, 2).contiguous()
371
+ vit_embeds = vit_embeds.reshape(1, -1, vit_embeds.shape[-1]).permute(1, 0, 2)
372
+ vit_embeds = self.imagemlp(vit_embeds)
373
+ vit_embeds = self.imagemlp_layernorm(vit_embeds)
374
+ vit_embeds = vit_embeds.view(num_images, cvs, -1)
375
+ return vit_embeds
376
+
377
+ @torch.no_grad()
378
+ def generate(
379
+ self,
380
+ pixel_values: Optional[torch.FloatTensor] = None,
381
+ input_ids: Optional[torch.FloatTensor] = None,
382
+ attention_mask: Optional[torch.LongTensor] = None,
383
+ visual_features: Optional[torch.FloatTensor] = None,
384
+ generation_config: Optional[GenerationConfig] = None,
385
+ position_ids: Optional[torch.Tensor] = None,
386
+ output_hidden_states: Optional[bool] = None,
387
+ ) -> torch.LongTensor:
388
+
389
+
390
+ if pixel_values is not None:
391
+ if visual_features is not None:
392
+ vit_embeds = visual_features
393
+ else:
394
+ vit_embeds = self.get_multimodal_embeddings(pixel_values)
395
+ inputs_embeds = self.get_input_embeddings(input_ids, vit_embeds)
396
+ input_ids = None
397
+
398
+
399
+ outputs = self.language_model.generate(
400
+ inputs_embeds=inputs_embeds,
401
+ attention_mask=attention_mask,
402
+ generation_config=generation_config,
403
+ output_hidden_states=output_hidden_states,
404
+ position_ids=position_ids,
405
+ max_length=8192,
406
+ use_cache=True,
407
+ )
408
+
409
+
410
+ return outputs
mq_test_demo.py ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModel, AutoTokenizer
3
+ from PIL import Image
4
+ from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
5
+ CenterCrop, ColorJitter, Grayscale
6
+ import math
7
+
8
+ FILE_EXTENSIONS = ('.jpeg', '.txt', '.idx')
9
+ '''
10
+ args = {
11
+ "patch_size": 16,
12
+ "patch_num_width": 16,
13
+ "patch_num_height": 16,
14
+ "position_embedding_length": 4096,
15
+ "clip_model_name": 'InternViT-448',
16
+ "image_segment_method": 'dynamic',
17
+ "max_split_tile_num_multi_image": 1,
18
+ "clip_visual_size": 1024,
19
+ "clip_hidden_size": 1024,
20
+ "downsample_ratio": 0.5
21
+ }
22
+ '''
23
+ class args:
24
+ patch_size = 16
25
+ patch_num_width = 16
26
+ patch_num_height = 16
27
+ position_embedding_length = 4096
28
+ clip_model_name = 'InternViT-448'
29
+ image_segment_method = 'dynamic' ##'adaptive'
30
+ max_split_tile_num_multi_image = 1
31
+ max_split_tile_num_single_image = 9
32
+ clip_visual_size = 1024
33
+ clip_hidden_size = 1024
34
+ downsample_ratio = 0.5
35
+ shape_change_threshold = 0.5
36
+ bf16 = True
37
+ fp16 = False
38
+
39
+
40
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size, threshold):
41
+ best_ratio_diff = float('inf')
42
+ best_ratio = (1, 1)
43
+ area = width * height
44
+ for ratio in target_ratios:
45
+ target_aspect_ratio = ratio[0] / ratio[1]
46
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
47
+ size_diff_length = abs(((ratio[0]*image_size + ratio[1]*image_size)-(width+height)) / (width+height))
48
+ if ratio_diff < best_ratio_diff and size_diff_length <= threshold:
49
+ best_ratio_diff = ratio_diff
50
+ best_ratio = ratio
51
+ elif ratio_diff == best_ratio_diff:
52
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
53
+ best_ratio = ratio
54
+ return best_ratio
55
+
56
+ def build_transform(input_size):
57
+ #MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
58
+ transform = Compose([
59
+ Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
60
+ _convert_to_rgb,
61
+ ToTensor(),
62
+ Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
63
+ ])
64
+ return transform
65
+
66
+ def torch_extract_patches(image_tensor, patch_height, patch_width):
67
+ PATCH_SIZE = args.patch_size
68
+ PATCH_NUM_WIDTH = args.patch_num_width
69
+ PATCH_NUM_HEIGHT = args.patch_num_height
70
+ POSITION_EMBEDDING_LENGTH = args.position_embedding_length
71
+ print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
72
+ # 576
73
+ MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
74
+ #
75
+ TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
76
+ # 336 336
77
+ IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
78
+ IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
79
+ image_tensor = image_tensor.unsqueeze(0)
80
+ patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width))
81
+ patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1)
82
+ patches = patches.permute(0, 4, 2, 3, 1).reshape(
83
+ image_tensor.size(2) // patch_height,
84
+ image_tensor.size(3) // patch_width,
85
+ image_tensor.size(1) * patch_height * patch_width,
86
+ )
87
+ return patches.unsqueeze(0)
88
+
89
+ # 用于计算adapt需要输入图片的大小
90
+ def adapt_size(originHeight:int,originWeight:int):
91
+ ### 用于计算adapt的图片大小
92
+ # 参数说明
93
+ # originHeight: 原图高度
94
+ # originWidth: 原图宽度
95
+ # patchHeight: patch高度
96
+ # patchWidth: patch宽度
97
+ # maxPatches: patch数目上限
98
+ # 返回值说明:
99
+ # resized_height: 插值后图片高度
100
+ # resized_width: 插值后图片宽度
101
+ # resized_patch_height_num: 插值后图片垂直patch数目
102
+ # resized_patch_width_num: 插值后图片水平patch数目
103
+ PATCH_SIZE = args.patch_size
104
+ PATCH_NUM_WIDTH = args.patch_num_width
105
+ PATCH_NUM_HEIGHT = args.patch_num_height
106
+ POSITION_EMBEDDING_LENGTH = args.position_embedding_length
107
+ print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
108
+ # 576
109
+ MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
110
+ #
111
+ TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
112
+ # 336 336
113
+ IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
114
+ IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
115
+ patchHeight = PATCH_SIZE
116
+ patchWidth = PATCH_SIZE
117
+ maxPatches = MAX_PATCHES
118
+ scale = math.sqrt(maxPatches * (patchHeight / originHeight) * (patchWidth / originWeight))
119
+ resized_patch_height_num = max(min(math.floor(scale * originHeight / patchHeight), maxPatches), 1)
120
+ resized_patch_width_num = max(min(math.floor(scale * originWeight / patchWidth), maxPatches), 1)
121
+ resized_height = max(resized_patch_height_num * PATCH_SIZE, 1)
122
+ resized_width = max(resized_patch_width_num * PATCH_SIZE, 1)
123
+ return resized_height, resized_width, resized_patch_height_num, resized_patch_width_num
124
+
125
+ def cal_num_of_slices(origin_image_width, origin_image_height, max_num):
126
+ #import pdb
127
+ #pdb.set_trace()
128
+ PATCH_SIZE = args.patch_size
129
+ PATCH_NUM_WIDTH = args.patch_num_width
130
+ PATCH_NUM_HEIGHT = args.patch_num_height
131
+ POSITION_EMBEDDING_LENGTH = args.position_embedding_length
132
+ print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
133
+ # 576
134
+ MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
135
+ #
136
+ TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
137
+ # 336 336
138
+ IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
139
+ IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
140
+ scale = origin_image_width*origin_image_height/(IMAGE_WIDTH*IMAGE_HEIGHT)
141
+
142
+ scale = math.ceil(scale)
143
+ max_num_img=max_num
144
+ if scale > max_num_img:
145
+ scale = max_num_img
146
+ def factorize(n):
147
+ factors = []
148
+ for i in range(1, n + 1):
149
+ if n % i == 0:
150
+ factors.append((i/(n/i), i, n // i))
151
+ return factors
152
+ numbers = [1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15]
153
+ factor_dict = {}
154
+ for num in numbers:
155
+ factor_dict[num] = factorize(num)
156
+ log_origin_ratio = math.log(origin_image_width/origin_image_height)
157
+ available_ratios = []
158
+ if scale<=2:
159
+ available_ratios = factor_dict[scale] + factor_dict[scale + 1]
160
+ else :
161
+ available_ratios = factor_dict[scale-1] + factor_dict[scale]+factor_dict[scale+1]
162
+
163
+ min_dif = 1000
164
+ best_w = 0
165
+ best_h = 0
166
+ for (r,w_slice,h_slice) in available_ratios:
167
+ log_r = math.log(r)
168
+ if min_dif > abs(log_r - log_origin_ratio):
169
+ min_dif = abs(log_r - log_origin_ratio)
170
+ best_w = w_slice
171
+ best_h = h_slice
172
+ return best_w,best_h
173
+ # 做图片切片
174
+ def get_patch_nums(origin_image_width, origin_image_height, max_num):
175
+ # 输入原图的尺寸
176
+ # 返回:
177
+ # slice_w_num 切片的w方向有多少个patch
178
+ # slice_h_num 切片的h方向有多少个patch
179
+ # abstract_w_num 原图的w方向有多少个patch
180
+ # abstract_h_num 原图的h方向有多少个patch
181
+ PATCH_SIZE = args.patch_size
182
+ PATCH_NUM_WIDTH = args.patch_num_width
183
+ PATCH_NUM_HEIGHT = args.patch_num_height
184
+ POSITION_EMBEDDING_LENGTH = args.position_embedding_length
185
+ print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
186
+ # 576
187
+ MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
188
+ #
189
+ TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
190
+ # 336 336
191
+ IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
192
+ IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
193
+
194
+ best_w, best_h = cal_num_of_slices(origin_image_width,origin_image_height, max_num)
195
+ slice_width = origin_image_width//best_w
196
+ slice_height = origin_image_height//best_h
197
+ _,_,slice_h_num,slice_w_num = adapt_size(slice_height,slice_width)
198
+ _,_,abstract_h_num,abstract_w_num = adapt_size(origin_image_height,origin_image_width)
199
+ #print(slice_w_num,slice_h_num,abstract_w_num,abstract_h_num)
200
+ return slice_w_num,slice_h_num,abstract_w_num,abstract_h_num
201
+
202
+ def slice_image(image, max_num):
203
+
204
+ # slice the image according to our princeple
205
+ # return an array of slices
206
+ PATCH_SIZE = args.patch_size
207
+ PATCH_NUM_WIDTH = args.patch_num_width
208
+ PATCH_NUM_HEIGHT = args.patch_num_height
209
+ POSITION_EMBEDDING_LENGTH = args.position_embedding_length
210
+ print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
211
+ # 576
212
+ MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
213
+ #
214
+ TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
215
+ # 336 336
216
+ IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
217
+ IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
218
+
219
+ origin_image_width = image.size[0]
220
+ origin_image_height = image.size[1]
221
+
222
+ best_w, best_h = cal_num_of_slices(origin_image_width=origin_image_width, origin_image_height=origin_image_height, max_num=max_num )
223
+ slices = []
224
+ # print(best_w,best_h)
225
+
226
+ for j in range(best_h):
227
+ for i in range(best_w):
228
+
229
+ box = (i * origin_image_width//best_w, j * origin_image_height//best_h, (i + 1) * origin_image_width//best_w, (j + 1) * origin_image_height//best_h)
230
+ # 切割图片
231
+ region = image.crop(box).convert("RGB")
232
+ # 添加到列表
233
+ slices.append(region)
234
+
235
+ return slices
236
+ def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False, threshold=1):
237
+ orig_width, orig_height = image.size
238
+ aspect_ratio = orig_width / orig_height
239
+
240
+ # calculate the existing image aspect ratio
241
+ target_ratios = set(
242
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
243
+ i * j <= max_num and i * j >= min_num)
244
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
245
+ # find the closest aspect ratio to the target
246
+ target_aspect_ratio = find_closest_aspect_ratio(
247
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size, threshold)
248
+ # calculate the target width and height
249
+ target_width = image_size * target_aspect_ratio[0]
250
+ target_height = image_size * target_aspect_ratio[1]
251
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
252
+
253
+ # resize the image
254
+ resized_img = image.resize((target_width, target_height))
255
+ processed_images = []
256
+ for i in range(blocks):
257
+ box = (
258
+ (i % (target_width // image_size)) * image_size,
259
+ (i // (target_width // image_size)) * image_size,
260
+ ((i % (target_width // image_size)) + 1) * image_size,
261
+ ((i // (target_width // image_size)) + 1) * image_size
262
+ )
263
+ print(box)
264
+ # split the image
265
+ split_img = resized_img.crop(box)
266
+ processed_images.append(split_img)
267
+ assert len(processed_images) == blocks
268
+ if use_thumbnail and len(processed_images) != 1:
269
+ thumbnail_img = image.resize((image_size, image_size))
270
+ processed_images.append(thumbnail_img)
271
+ return processed_images
272
+
273
+ def process_image(image, image_size, max_num):
274
+ PATCH_SIZE = args.patch_size
275
+ PATCH_NUM_WIDTH = args.patch_num_width
276
+ PATCH_NUM_HEIGHT = args.patch_num_height
277
+ POSITION_EMBEDDING_LENGTH = args.position_embedding_length
278
+ print(PATCH_SIZE,PATCH_NUM_WIDTH,PATCH_NUM_HEIGHT,POSITION_EMBEDDING_LENGTH)
279
+ # 576
280
+ MAX_PATCHES = PATCH_NUM_WIDTH * PATCH_NUM_HEIGHT
281
+ #
282
+ TOKEN_LENGTH = 3 * PATCH_SIZE * PATCH_SIZE
283
+ # 336 336
284
+ IMAGE_WIDTH = PATCH_SIZE * PATCH_NUM_WIDTH
285
+ IMAGE_HEIGHT = PATCH_SIZE * PATCH_NUM_HEIGHT
286
+
287
+ origin_image_width = image.size[0]
288
+ origin_image_height = image.size[1]
289
+ image = image.convert("RGB")
290
+ slices = slice_image(image, max_num)
291
+ if len(slices) != 1:
292
+ thumbnail_img = image.resize((image_size, image_size))
293
+ slices.append(thumbnail_img)
294
+ # 计算resize之后的图片大小
295
+ resized_height, resized_width, resized_patch_height, resized_patch_width = \
296
+ adapt_size(origin_image_height,origin_image_width)
297
+ image = slices[0]
298
+ image_w = image.size[0]
299
+ image_h = image.size[1]
300
+ resized_height, resized_width, resized_patch_height, resized_patch_width = \
301
+ adapt_size(image_h,image_w)
302
+ image = ToTensor()(image)
303
+
304
+ image = torch.nn.functional.interpolate(
305
+ image.unsqueeze(0),
306
+ size=(resized_height, resized_width),
307
+ mode="bilinear",
308
+ align_corners=False,
309
+ antialias=True,
310
+ ).squeeze(0)
311
+ # 需要mask的patch数
312
+ num_patches_to_pad = MAX_PATCHES - resized_patch_height*resized_patch_width
313
+ # raprint("mask: ",num_patches_to_pad)
314
+ # 切割resize好的图片
315
+ image = torch_extract_patches(image,PATCH_SIZE, PATCH_SIZE)
316
+ image = image.reshape([resized_patch_width*resized_patch_height,TOKEN_LENGTH])
317
+ # 用0补全需要mask的图片部分
318
+ image = torch.nn.functional.pad(image, [0, 0, 0, num_patches_to_pad]).float() #torch.Size([196, 768])
319
+ image = image.reshape(PATCH_NUM_WIDTH, PATCH_NUM_HEIGHT, PATCH_SIZE, PATCH_SIZE, 3).permute(0, 2, 1, 3, 4).reshape(IMAGE_WIDTH, IMAGE_HEIGHT, 3).permute(2, 0 ,1)
320
+ #print(image.shape)
321
+ #image = torch.stack(image)
322
+ return slices
323
+
324
+ def _convert_to_rgb(image):
325
+ return image.convert('RGB')
326
+
327
+ def load_image(image_file, input_size=448, max_num=9):
328
+ image = Image.open(image_file).convert('RGB')
329
+ # image.save('seg_imge/'+image_file.split('/')[-1])
330
+ # print(max_num)
331
+ if args.clip_model_name == 'InternViT-448':
332
+ transform = build_transform(input_size=input_size)
333
+ #image_processor = CLIPImageProcessor.from_pretrained(args.clip_download_path)
334
+ #'/mnt/beegfs1/shenqiang/internvit-448/models--InternViT-300M-448px/'args.clip_download_path
335
+ if args.image_segment_method == 'adaptive':
336
+ images_processed = process_image(image, input_size, max_num)
337
+ elif args.image_segment_method == 'dynamic':
338
+ images_processed = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num, threshold=args.shape_change_threshold)
339
+ # pixel_values = [image_processor(images=image, return_tensors='pt').pixel_values.squeeze(0) for image in images_processed]
340
+ pixel_values = [transform(image) for image in images_processed]
341
+ else:
342
+ transform = build_transform(input_size=input_size)
343
+ if args.image_segment_method == 'adaptive':
344
+ images_processed = process_image(image, input_size, max_num)
345
+ elif args.image_segment_method == 'dynamic':
346
+ images_processed = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
347
+ pixel_values = [transform(image) for image in images_processed]
348
+
349
+ pixel_values = torch.stack(pixel_values)
350
+
351
+ return pixel_values
352
+
353
+ def preocess_imput(args, num_token_per_tile, image_path, question):
354
+ image_prompts = ''
355
+ if len(image_path) >= 2:
356
+ image_list = []
357
+ num_tile_per_image_list = []
358
+ for ipath in image_path:
359
+ images = load_image(ipath, max_num=args.max_split_tile_num_multi_image)
360
+ #images = load_image(ipath, max_num=args.max_split_tile_num_multi_image).view(1, -1, 3, 448, 448).cuda()
361
+ num_tile_this_image = len(images)
362
+ num_tile_per_image_list.append(num_tile_this_image)
363
+ image_list.append(images)
364
+ image_prompts = image_prompts + '<IMAGE>' + '<pad>' * num_tile_this_image * num_token_per_tile + '</IMAGE>'
365
+ num_tile_per_image_tensor = torch.Tensor(num_tile_per_image_list).long().cuda()
366
+ image_tensor = torch.cat(image_list, dim=0).view(1, -1, 3, 448, 448).cuda()
367
+
368
+ else:
369
+ #images_tensor = load_image(image_path, max_num=args.max_split_tile_num_single_image).view(1, -1, 3, 448, 448).cuda()
370
+ images = load_image(image_path[0], max_num=args.max_split_tile_num_single_image)
371
+ num_tile_this_image = len(images)
372
+ num_tile_per_image_tensor = torch.Tensor([num_tile_this_image]).long().cuda()
373
+ image_tensor = images.view(1, -1, 3, 448, 448).cuda()
374
+ image_prompts = image_prompts + '<IMAGE>' + '<pad>' * num_tile_this_image * num_token_per_tile + '</IMAGE>'
375
+
376
+ if args.fp16:
377
+ image_tensor = image_tensor.half()
378
+ elif args.bf16:
379
+ image_tensor = image_tensor.bfloat16()
380
+ else:
381
+ image_tensor = image_tensor.float()
382
+
383
+ images_input = {'num_tile_per_image_tensor': num_tile_per_image_tensor,
384
+ 'image_tensor': image_tensor}
385
+
386
+ prompts = ['<BOS>' + image_prompts + question[0] + '<sep>']
387
+
388
+ return prompts, images_input
389
+
390
+
391
+ def _build_yuanvl_attention_mask_and_position_ids(tokenizer, tokens, images_input=None):
392
+ """Build the attention mask and postition ids for the input tokens."""
393
+
394
+ # Since we are not interested in loss-mask and reset attention/position
395
+ # is also False, eod_token is not used so it is safe to set it to None.
396
+
397
+ bos_token, image_start_token, image_end_token, pad_token, sep_tpken, eod_token = (tokenizer(tok)['input_ids'][0] for tok in ['<BOS>','<IMAGE>', '</IMAGE>', '<pad>', '<sep>', '<eod>'])
398
+ #eod_token = tokenizer("<eod>")['input_ids'][0]
399
+
400
+ attention_mask, position_ids, image_info = get_ltor_masks_and_position_ids_yuanvl_inference(
401
+ tokens,
402
+ bos_token,
403
+ image_start_token,
404
+ image_end_token,
405
+ eod_token,
406
+ pad_token,
407
+ images_input)
408
+
409
+
410
+ '''attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
411
+ data=tokens,
412
+ eod_token=None,
413
+ reset_position_ids=False,
414
+ reset_attention_mask=False,
415
+ eod_mask_loss=False)'''
416
+
417
+ return attention_mask, position_ids, image_info
418
+
419
+ def get_ltor_masks_and_position_ids_yuanvl_inference(data,
420
+ bos_token,
421
+ image_start_token,
422
+ image_end_token,
423
+ eod_token,
424
+ pad_token,
425
+ images_input,
426
+ reset_attention_mask=False):
427
+ """Build masks and position id for left to right model."""
428
+ # Extract batch size and sequence length.
429
+ micro_batch_size, seq_length = data.size()
430
+ assert micro_batch_size == 1, 'yuanvl support mbs = 1 only'
431
+
432
+ # Attention mask (lower triangular).
433
+ if reset_attention_mask:
434
+ att_mask_batch = micro_batch_size
435
+ else:
436
+ att_mask_batch = 1
437
+ attention_mask = torch.tril(torch.ones(
438
+ (att_mask_batch, seq_length, seq_length), device=data.device)).view(
439
+ att_mask_batch, 1, seq_length, seq_length)
440
+
441
+
442
+ # Position ids.
443
+ position_ids = torch.arange(seq_length, dtype=torch.long,
444
+ device=data.device)
445
+ position_ids = position_ids.unsqueeze(0).expand_as(data)
446
+ #input_pad = []
447
+ #image_info = {}
448
+
449
+ #import pdb
450
+ #pdb.set_trace()
451
+ #if torch.distributed.get_rank() == 0:
452
+
453
+ #pdb.set_trace()
454
+ if images_input is not None:
455
+ num_tile_per_image_tensor = images_input['num_tile_per_image_tensor']
456
+ images_tensor = images_input['image_tensor']
457
+ input_pad = []
458
+ image_info = {}
459
+ position_ids_use = torch.zeros(data.shape).to(position_ids)
460
+ for b in range(micro_batch_size):
461
+ bos_index = position_ids[b, data[b] == bos_token]
462
+ pad_index = position_ids[b, data[b] == pad_token]
463
+ image_start_index = position_ids[b, data[b] == image_start_token]
464
+ image_end_index = position_ids[b, data[b] == image_end_token]
465
+ #eod_index = position_ids[b, data[b] == eod_token]
466
+ #assert len(bos_index) == len(eod_index)
467
+ num_image = len(num_tile_per_image_tensor)
468
+
469
+ #num_tile = pad_index.shape[0] // clip_visual_size
470
+ #image_info['num_image'] = num_image
471
+ image_info['num_tile'] = num_tile_per_image_tensor
472
+ #image_info['bos_pos'] = bos_index.tolist()
473
+ image_info['image_start_pos'] = image_start_index.tolist()
474
+ #image_info['image_end_pos'] = image_end_index.tolist()
475
+
476
+ #for j in range(image_index.size()[0]):
477
+ # start_idx = image_index[j]
478
+ # diff = seq_length - start_idx
479
+ # position_ids_use[b][start_idx : ] = torch.arange(diff, dtype=torch.long,
480
+ # device=data.device)
481
+ start_idx = image_end_index[-1]
482
+ diff = seq_length - start_idx
483
+ position_ids_use[b][start_idx : ] = torch.arange(diff, dtype=torch.long,
484
+ device=data.device)
485
+ else:
486
+ position_ids = torch.arange(seq_length, dtype=torch.long,
487
+ device=data.device)
488
+ position_ids = position_ids.unsqueeze(0)#.expand_as(data)
489
+ position_ids_use = position_ids
490
+ image_info = None
491
+ #image_info['eod_pos'] = eod_index.tolist()
492
+ #for j in range(bos_index.size()[0]):
493
+ # start_idx = bos_index[j]
494
+ # end_idx = eod_index[j]
495
+ # input_pad = input_pad + [bos_token] + [pad_token] * clip_visual_size + data[b][start_idx + 1 : end_idx + 1].tolist()
496
+ #data_nopad = data[b][:eod_index[j]+1].view(1, -1)
497
+ #input_pad = input_pad + [pad_token]
498
+
499
+
500
+ # Position ids.
501
+ #position_ids = torch.arange(seq_length + clip_visual_size * num_image, dtype=torch.long,
502
+ #position_ids = torch.arange(seq_length, dtype=torch.long,
503
+ # device=data.device)
504
+ #position_ids = position_ids.unsqueeze(0)#.expand_as(data)
505
+
506
+
507
+
508
+ # Convert attention mask to binary:
509
+ attention_mask = (attention_mask < 0.5)
510
+
511
+ '''xattn_position_ids = torch.arange(seq_length, dtype=torch.long,
512
+ device=data.device)
513
+ xattn_position_ids = xattn_position_ids.unsqueeze(0).expand_as(data)
514
+
515
+ for b in range(micro_batch_size):
516
+
517
+ bos_index = xattn_position_ids[b, data[b] == bos_token]
518
+
519
+ num_image = len(bos_index)
520
+
521
+ xattn_mask = torch.zeros((micro_batch_size, seq_length, num_image * clip_visual_size), device = data.device).view(micro_batch_size, 1, seq_length, num_image * clip_visual_size)
522
+
523
+ for j in range(bos_index.size()[0]):
524
+ sidx = bos_index[j]
525
+
526
+ image_sidx = j * clip_visual_size
527
+ image_eidx = (j + 1) * clip_visual_size
528
+
529
+ #xattn_mask[b, 0, (sidx + 1) : , image_sidx : image_eidx] = 1
530
+ xattn_mask[b, 0, sidx : , image_sidx : image_eidx] = 1
531
+ #xattn_mask[b, 0, sidx : (eidx + 1), image_sidx : image_eidx] = 1
532
+
533
+ xattn_mask = (xattn_mask < 0.5)'''
534
+
535
+ return attention_mask, position_ids_use, image_info
536
+
537
+ tokenizer_loadpath = "/mnt/beegfs3/zhaoxudong/code/yuanvl_hf_40B_stage2_pcase4_12pp/"
538
+ model_loadpath = "/mnt/beegfs3/zhaoxudong/code/yuanvl_hf_40B_stage2_pcase4_12pp/"
539
+
540
+
541
+ # 加载本地模型
542
+ model = AutoModel.from_pretrained(
543
+ model_loadpath,
544
+ torch_dtype=torch.bfloat16,
545
+ low_cpu_mem_usage=True,
546
+ use_flash_attn=False,
547
+ device_map="auto",
548
+ trust_remote_code=True).eval()
549
+
550
+
551
+ print("Creat model finish")
552
+
553
+ # 加载本地 Tokenizer
554
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_loadpath)
555
+
556
+
557
+ num_token_per_tile = int(args.clip_visual_size * args.downsample_ratio**2)
558
+
559
+ # demo 1
560
+ image_path = ['/mnt/beegfs3/zhaoxudong/code/image.jpeg']
561
+ question = ['Please describe the picture']
562
+ question = ['请描述这张图片的内容']
563
+
564
+ prompts, images_input = preocess_imput(args, num_token_per_tile, image_path, question)
565
+
566
+ input=tokenizer(prompts, return_tensors="pt")
567
+ input_ids = input['input_ids'].to("cuda")
568
+ pixel_values=images_input['image_tensor']
569
+
570
+ attention_mask, position_ids, image_info = _build_yuanvl_attention_mask_and_position_ids(
571
+ tokenizer, input_ids, images_input)
572
+
573
+ attention_mask = input['attention_mask'].to("cuda")
574
+
575
+ output = model.generate(pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
576
+ print(tokenizer.decode(output[0]))
preprocessor_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 448,
3
+ "do_center_crop": true,
4
+ "do_normalize": true,
5
+ "do_resize": true,
6
+ "image_processor_type": "CLIPImageProcessor",
7
+ "processor_class": "CLIPProcessor",
8
+ "tokenizer_class": "LlamaTokenizer",
9
+ "image_mean": [
10
+ 0.485,
11
+ 0.456,
12
+ 0.406
13
+ ],
14
+ "image_std": [
15
+ 0.229,
16
+ 0.224,
17
+ 0.225
18
+ ],
19
+ "resample": 3,
20
+ "size": 448
21
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,1086 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<s>",
4
+ "<eod>",
5
+ "<unk>",
6
+ "<sep>",
7
+ "<pad>",
8
+ "<mask>",
9
+ "<predict>",
10
+ "<FIM_SUFFIX>",
11
+ "<FIM_PREFIX>",
12
+ "<FIM_MIDDLE>",
13
+ "<commit_before>",
14
+ "<commit_msg>",
15
+ "<commit_after>",
16
+ "<jupyter_start>",
17
+ "<jupyter_text>",
18
+ "<jupyter_code>",
19
+ "<jupyter_output>",
20
+ "<empty_output>",
21
+ "<repo_name>",
22
+ "<file_sep>",
23
+ "<BOS>",
24
+ "<IMAGE>",
25
+ "</IMAGE>",
26
+ "<grounding>",
27
+ "<obj>",
28
+ "</obj>",
29
+ "<box>",
30
+ "</box>",
31
+ "<point>",
32
+ "</point>",
33
+ "<3dbox>",
34
+ "</3dbox>",
35
+ "<depth>",
36
+ "</depth>",
37
+ "s000",
38
+ "s001",
39
+ "s002",
40
+ "s003",
41
+ "s004",
42
+ "s005",
43
+ "s006",
44
+ "s007",
45
+ "s008",
46
+ "s009",
47
+ "s010",
48
+ "s011",
49
+ "s012",
50
+ "s013",
51
+ "s014",
52
+ "s015",
53
+ "s016",
54
+ "s017",
55
+ "s018",
56
+ "s019",
57
+ "s020",
58
+ "s021",
59
+ "s022",
60
+ "s023",
61
+ "s024",
62
+ "s025",
63
+ "s026",
64
+ "s027",
65
+ "s028",
66
+ "s029",
67
+ "s030",
68
+ "s031",
69
+ "s032",
70
+ "s033",
71
+ "s034",
72
+ "s035",
73
+ "s036",
74
+ "s037",
75
+ "s038",
76
+ "s039",
77
+ "s040",
78
+ "s041",
79
+ "s042",
80
+ "s043",
81
+ "s044",
82
+ "s045",
83
+ "s046",
84
+ "s047",
85
+ "s048",
86
+ "s049",
87
+ "s050",
88
+ "s051",
89
+ "s052",
90
+ "s053",
91
+ "s054",
92
+ "s055",
93
+ "s056",
94
+ "s057",
95
+ "s058",
96
+ "s059",
97
+ "s060",
98
+ "s061",
99
+ "s062",
100
+ "s063",
101
+ "s064",
102
+ "s065",
103
+ "s066",
104
+ "s067",
105
+ "s068",
106
+ "s069",
107
+ "s070",
108
+ "s071",
109
+ "s072",
110
+ "s073",
111
+ "s074",
112
+ "s075",
113
+ "s076",
114
+ "s077",
115
+ "s078",
116
+ "s079",
117
+ "s080",
118
+ "s081",
119
+ "s082",
120
+ "s083",
121
+ "s084",
122
+ "s085",
123
+ "s086",
124
+ "s087",
125
+ "s088",
126
+ "s089",
127
+ "s090",
128
+ "s091",
129
+ "s092",
130
+ "s093",
131
+ "s094",
132
+ "s095",
133
+ "s096",
134
+ "s097",
135
+ "s098",
136
+ "s099",
137
+ "s100",
138
+ "s101",
139
+ "s102",
140
+ "s103",
141
+ "s104",
142
+ "s105",
143
+ "s106",
144
+ "s107",
145
+ "s108",
146
+ "s109",
147
+ "s110",
148
+ "s111",
149
+ "s112",
150
+ "s113",
151
+ "s114",
152
+ "s115",
153
+ "s116",
154
+ "s117",
155
+ "s118",
156
+ "s119",
157
+ "s120",
158
+ "s121",
159
+ "s122",
160
+ "s123",
161
+ "s124",
162
+ "s125",
163
+ "s126",
164
+ "s127",
165
+ "s128",
166
+ "s129",
167
+ "s130",
168
+ "s131",
169
+ "s132",
170
+ "s133",
171
+ "s134",
172
+ "s135",
173
+ "s136",
174
+ "s137",
175
+ "s138",
176
+ "s139",
177
+ "s140",
178
+ "s141",
179
+ "s142",
180
+ "s143",
181
+ "s144",
182
+ "s145",
183
+ "s146",
184
+ "s147",
185
+ "s148",
186
+ "s149",
187
+ "s150",
188
+ "s151",
189
+ "s152",
190
+ "s153",
191
+ "s154",
192
+ "s155",
193
+ "s156",
194
+ "s157",
195
+ "s158",
196
+ "s159",
197
+ "s160",
198
+ "s161",
199
+ "s162",
200
+ "s163",
201
+ "s164",
202
+ "s165",
203
+ "s166",
204
+ "s167",
205
+ "s168",
206
+ "s169",
207
+ "s170",
208
+ "s171",
209
+ "s172",
210
+ "s173",
211
+ "s174",
212
+ "s175",
213
+ "s176",
214
+ "s177",
215
+ "s178",
216
+ "s179",
217
+ "s180",
218
+ "s181",
219
+ "s182",
220
+ "s183",
221
+ "s184",
222
+ "s185",
223
+ "s186",
224
+ "s187",
225
+ "s188",
226
+ "s189",
227
+ "s190",
228
+ "s191",
229
+ "s192",
230
+ "s193",
231
+ "s194",
232
+ "s195",
233
+ "s196",
234
+ "s197",
235
+ "s198",
236
+ "s199",
237
+ "s200",
238
+ "s201",
239
+ "s202",
240
+ "s203",
241
+ "s204",
242
+ "s205",
243
+ "s206",
244
+ "s207",
245
+ "s208",
246
+ "s209",
247
+ "s210",
248
+ "s211",
249
+ "s212",
250
+ "s213",
251
+ "s214",
252
+ "s215",
253
+ "s216",
254
+ "s217",
255
+ "s218",
256
+ "s219",
257
+ "s220",
258
+ "s221",
259
+ "s222",
260
+ "s223",
261
+ "s224",
262
+ "s225",
263
+ "s226",
264
+ "s227",
265
+ "s228",
266
+ "s229",
267
+ "s230",
268
+ "s231",
269
+ "s232",
270
+ "s233",
271
+ "s234",
272
+ "s235",
273
+ "s236",
274
+ "s237",
275
+ "s238",
276
+ "s239",
277
+ "s240",
278
+ "s241",
279
+ "s242",
280
+ "s243",
281
+ "s244",
282
+ "s245",
283
+ "s246",
284
+ "s247",
285
+ "s248",
286
+ "s249",
287
+ "s250",
288
+ "s251",
289
+ "s252",
290
+ "s253",
291
+ "s254",
292
+ "s255",
293
+ "s256",
294
+ "s257",
295
+ "s258",
296
+ "s259",
297
+ "s260",
298
+ "s261",
299
+ "s262",
300
+ "s263",
301
+ "s264",
302
+ "s265",
303
+ "s266",
304
+ "s267",
305
+ "s268",
306
+ "s269",
307
+ "s270",
308
+ "s271",
309
+ "s272",
310
+ "s273",
311
+ "s274",
312
+ "s275",
313
+ "s276",
314
+ "s277",
315
+ "s278",
316
+ "s279",
317
+ "s280",
318
+ "s281",
319
+ "s282",
320
+ "s283",
321
+ "s284",
322
+ "s285",
323
+ "s286",
324
+ "s287",
325
+ "s288",
326
+ "s289",
327
+ "s290",
328
+ "s291",
329
+ "s292",
330
+ "s293",
331
+ "s294",
332
+ "s295",
333
+ "s296",
334
+ "s297",
335
+ "s298",
336
+ "s299",
337
+ "s300",
338
+ "s301",
339
+ "s302",
340
+ "s303",
341
+ "s304",
342
+ "s305",
343
+ "s306",
344
+ "s307",
345
+ "s308",
346
+ "s309",
347
+ "s310",
348
+ "s311",
349
+ "s312",
350
+ "s313",
351
+ "s314",
352
+ "s315",
353
+ "s316",
354
+ "s317",
355
+ "s318",
356
+ "s319",
357
+ "s320",
358
+ "s321",
359
+ "s322",
360
+ "s323",
361
+ "s324",
362
+ "s325",
363
+ "s326",
364
+ "s327",
365
+ "s328",
366
+ "s329",
367
+ "s330",
368
+ "s331",
369
+ "s332",
370
+ "s333",
371
+ "s334",
372
+ "s335",
373
+ "s336",
374
+ "s337",
375
+ "s338",
376
+ "s339",
377
+ "s340",
378
+ "s341",
379
+ "s342",
380
+ "s343",
381
+ "s344",
382
+ "s345",
383
+ "s346",
384
+ "s347",
385
+ "s348",
386
+ "s349",
387
+ "s350",
388
+ "s351",
389
+ "s352",
390
+ "s353",
391
+ "s354",
392
+ "s355",
393
+ "s356",
394
+ "s357",
395
+ "s358",
396
+ "s359",
397
+ "s360",
398
+ "s361",
399
+ "s362",
400
+ "s363",
401
+ "s364",
402
+ "s365",
403
+ "s366",
404
+ "s367",
405
+ "s368",
406
+ "s369",
407
+ "s370",
408
+ "s371",
409
+ "s372",
410
+ "s373",
411
+ "s374",
412
+ "s375",
413
+ "s376",
414
+ "s377",
415
+ "s378",
416
+ "s379",
417
+ "s380",
418
+ "s381",
419
+ "s382",
420
+ "s383",
421
+ "s384",
422
+ "s385",
423
+ "s386",
424
+ "s387",
425
+ "s388",
426
+ "s389",
427
+ "s390",
428
+ "s391",
429
+ "s392",
430
+ "s393",
431
+ "s394",
432
+ "s395",
433
+ "s396",
434
+ "s397",
435
+ "s398",
436
+ "s399",
437
+ "s400",
438
+ "s401",
439
+ "s402",
440
+ "s403",
441
+ "s404",
442
+ "s405",
443
+ "s406",
444
+ "s407",
445
+ "s408",
446
+ "s409",
447
+ "s410",
448
+ "s411",
449
+ "s412",
450
+ "s413",
451
+ "s414",
452
+ "s415",
453
+ "s416",
454
+ "s417",
455
+ "s418",
456
+ "s419",
457
+ "s420",
458
+ "s421",
459
+ "s422",
460
+ "s423",
461
+ "s424",
462
+ "s425",
463
+ "s426",
464
+ "s427",
465
+ "s428",
466
+ "s429",
467
+ "s430",
468
+ "s431",
469
+ "s432",
470
+ "s433",
471
+ "s434",
472
+ "s435",
473
+ "s436",
474
+ "s437",
475
+ "s438",
476
+ "s439",
477
+ "s440",
478
+ "s441",
479
+ "s442",
480
+ "s443",
481
+ "s444",
482
+ "s445",
483
+ "s446",
484
+ "s447",
485
+ "s448",
486
+ "s449",
487
+ "s450",
488
+ "s451",
489
+ "s452",
490
+ "s453",
491
+ "s454",
492
+ "s455",
493
+ "s456",
494
+ "s457",
495
+ "s458",
496
+ "s459",
497
+ "s460",
498
+ "s461",
499
+ "s462",
500
+ "s463",
501
+ "s464",
502
+ "s465",
503
+ "s466",
504
+ "s467",
505
+ "s468",
506
+ "s469",
507
+ "s470",
508
+ "s471",
509
+ "s472",
510
+ "s473",
511
+ "s474",
512
+ "s475",
513
+ "s476",
514
+ "s477",
515
+ "s478",
516
+ "s479",
517
+ "s480",
518
+ "s481",
519
+ "s482",
520
+ "s483",
521
+ "s484",
522
+ "s485",
523
+ "s486",
524
+ "s487",
525
+ "s488",
526
+ "s489",
527
+ "s490",
528
+ "s491",
529
+ "s492",
530
+ "s493",
531
+ "s494",
532
+ "s495",
533
+ "s496",
534
+ "s497",
535
+ "s498",
536
+ "s499",
537
+ "s500",
538
+ "s501",
539
+ "s502",
540
+ "s503",
541
+ "s504",
542
+ "s505",
543
+ "s506",
544
+ "s507",
545
+ "s508",
546
+ "s509",
547
+ "s510",
548
+ "s511",
549
+ "s512",
550
+ "s513",
551
+ "s514",
552
+ "s515",
553
+ "s516",
554
+ "s517",
555
+ "s518",
556
+ "s519",
557
+ "s520",
558
+ "s521",
559
+ "s522",
560
+ "s523",
561
+ "s524",
562
+ "s525",
563
+ "s526",
564
+ "s527",
565
+ "s528",
566
+ "s529",
567
+ "s530",
568
+ "s531",
569
+ "s532",
570
+ "s533",
571
+ "s534",
572
+ "s535",
573
+ "s536",
574
+ "s537",
575
+ "s538",
576
+ "s539",
577
+ "s540",
578
+ "s541",
579
+ "s542",
580
+ "s543",
581
+ "s544",
582
+ "s545",
583
+ "s546",
584
+ "s547",
585
+ "s548",
586
+ "s549",
587
+ "s550",
588
+ "s551",
589
+ "s552",
590
+ "s553",
591
+ "s554",
592
+ "s555",
593
+ "s556",
594
+ "s557",
595
+ "s558",
596
+ "s559",
597
+ "s560",
598
+ "s561",
599
+ "s562",
600
+ "s563",
601
+ "s564",
602
+ "s565",
603
+ "s566",
604
+ "s567",
605
+ "s568",
606
+ "s569",
607
+ "s570",
608
+ "s571",
609
+ "s572",
610
+ "s573",
611
+ "s574",
612
+ "s575",
613
+ "s576",
614
+ "s577",
615
+ "s578",
616
+ "s579",
617
+ "s580",
618
+ "s581",
619
+ "s582",
620
+ "s583",
621
+ "s584",
622
+ "s585",
623
+ "s586",
624
+ "s587",
625
+ "s588",
626
+ "s589",
627
+ "s590",
628
+ "s591",
629
+ "s592",
630
+ "s593",
631
+ "s594",
632
+ "s595",
633
+ "s596",
634
+ "s597",
635
+ "s598",
636
+ "s599",
637
+ "s600",
638
+ "s601",
639
+ "s602",
640
+ "s603",
641
+ "s604",
642
+ "s605",
643
+ "s606",
644
+ "s607",
645
+ "s608",
646
+ "s609",
647
+ "s610",
648
+ "s611",
649
+ "s612",
650
+ "s613",
651
+ "s614",
652
+ "s615",
653
+ "s616",
654
+ "s617",
655
+ "s618",
656
+ "s619",
657
+ "s620",
658
+ "s621",
659
+ "s622",
660
+ "s623",
661
+ "s624",
662
+ "s625",
663
+ "s626",
664
+ "s627",
665
+ "s628",
666
+ "s629",
667
+ "s630",
668
+ "s631",
669
+ "s632",
670
+ "s633",
671
+ "s634",
672
+ "s635",
673
+ "s636",
674
+ "s637",
675
+ "s638",
676
+ "s639",
677
+ "s640",
678
+ "s641",
679
+ "s642",
680
+ "s643",
681
+ "s644",
682
+ "s645",
683
+ "s646",
684
+ "s647",
685
+ "s648",
686
+ "s649",
687
+ "s650",
688
+ "s651",
689
+ "s652",
690
+ "s653",
691
+ "s654",
692
+ "s655",
693
+ "s656",
694
+ "s657",
695
+ "s658",
696
+ "s659",
697
+ "s660",
698
+ "s661",
699
+ "s662",
700
+ "s663",
701
+ "s664",
702
+ "s665",
703
+ "s666",
704
+ "s667",
705
+ "s668",
706
+ "s669",
707
+ "s670",
708
+ "s671",
709
+ "s672",
710
+ "s673",
711
+ "s674",
712
+ "s675",
713
+ "s676",
714
+ "s677",
715
+ "s678",
716
+ "s679",
717
+ "s680",
718
+ "s681",
719
+ "s682",
720
+ "s683",
721
+ "s684",
722
+ "s685",
723
+ "s686",
724
+ "s687",
725
+ "s688",
726
+ "s689",
727
+ "s690",
728
+ "s691",
729
+ "s692",
730
+ "s693",
731
+ "s694",
732
+ "s695",
733
+ "s696",
734
+ "s697",
735
+ "s698",
736
+ "s699",
737
+ "s700",
738
+ "s701",
739
+ "s702",
740
+ "s703",
741
+ "s704",
742
+ "s705",
743
+ "s706",
744
+ "s707",
745
+ "s708",
746
+ "s709",
747
+ "s710",
748
+ "s711",
749
+ "s712",
750
+ "s713",
751
+ "s714",
752
+ "s715",
753
+ "s716",
754
+ "s717",
755
+ "s718",
756
+ "s719",
757
+ "s720",
758
+ "s721",
759
+ "s722",
760
+ "s723",
761
+ "s724",
762
+ "s725",
763
+ "s726",
764
+ "s727",
765
+ "s728",
766
+ "s729",
767
+ "s730",
768
+ "s731",
769
+ "s732",
770
+ "s733",
771
+ "s734",
772
+ "s735",
773
+ "s736",
774
+ "s737",
775
+ "s738",
776
+ "s739",
777
+ "s740",
778
+ "s741",
779
+ "s742",
780
+ "s743",
781
+ "s744",
782
+ "s745",
783
+ "s746",
784
+ "s747",
785
+ "s748",
786
+ "s749",
787
+ "s750",
788
+ "s751",
789
+ "s752",
790
+ "s753",
791
+ "s754",
792
+ "s755",
793
+ "s756",
794
+ "s757",
795
+ "s758",
796
+ "s759",
797
+ "s760",
798
+ "s761",
799
+ "s762",
800
+ "s763",
801
+ "s764",
802
+ "s765",
803
+ "s766",
804
+ "s767",
805
+ "s768",
806
+ "s769",
807
+ "s770",
808
+ "s771",
809
+ "s772",
810
+ "s773",
811
+ "s774",
812
+ "s775",
813
+ "s776",
814
+ "s777",
815
+ "s778",
816
+ "s779",
817
+ "s780",
818
+ "s781",
819
+ "s782",
820
+ "s783",
821
+ "s784",
822
+ "s785",
823
+ "s786",
824
+ "s787",
825
+ "s788",
826
+ "s789",
827
+ "s790",
828
+ "s791",
829
+ "s792",
830
+ "s793",
831
+ "s794",
832
+ "s795",
833
+ "s796",
834
+ "s797",
835
+ "s798",
836
+ "s799",
837
+ "s800",
838
+ "s801",
839
+ "s802",
840
+ "s803",
841
+ "s804",
842
+ "s805",
843
+ "s806",
844
+ "s807",
845
+ "s808",
846
+ "s809",
847
+ "s810",
848
+ "s811",
849
+ "s812",
850
+ "s813",
851
+ "s814",
852
+ "s815",
853
+ "s816",
854
+ "s817",
855
+ "s818",
856
+ "s819",
857
+ "s820",
858
+ "s821",
859
+ "s822",
860
+ "s823",
861
+ "s824",
862
+ "s825",
863
+ "s826",
864
+ "s827",
865
+ "s828",
866
+ "s829",
867
+ "s830",
868
+ "s831",
869
+ "s832",
870
+ "s833",
871
+ "s834",
872
+ "s835",
873
+ "s836",
874
+ "s837",
875
+ "s838",
876
+ "s839",
877
+ "s840",
878
+ "s841",
879
+ "s842",
880
+ "s843",
881
+ "s844",
882
+ "s845",
883
+ "s846",
884
+ "s847",
885
+ "s848",
886
+ "s849",
887
+ "s850",
888
+ "s851",
889
+ "s852",
890
+ "s853",
891
+ "s854",
892
+ "s855",
893
+ "s856",
894
+ "s857",
895
+ "s858",
896
+ "s859",
897
+ "s860",
898
+ "s861",
899
+ "s862",
900
+ "s863",
901
+ "s864",
902
+ "s865",
903
+ "s866",
904
+ "s867",
905
+ "s868",
906
+ "s869",
907
+ "s870",
908
+ "s871",
909
+ "s872",
910
+ "s873",
911
+ "s874",
912
+ "s875",
913
+ "s876",
914
+ "s877",
915
+ "s878",
916
+ "s879",
917
+ "s880",
918
+ "s881",
919
+ "s882",
920
+ "s883",
921
+ "s884",
922
+ "s885",
923
+ "s886",
924
+ "s887",
925
+ "s888",
926
+ "s889",
927
+ "s890",
928
+ "s891",
929
+ "s892",
930
+ "s893",
931
+ "s894",
932
+ "s895",
933
+ "s896",
934
+ "s897",
935
+ "s898",
936
+ "s899",
937
+ "s900",
938
+ "s901",
939
+ "s902",
940
+ "s903",
941
+ "s904",
942
+ "s905",
943
+ "s906",
944
+ "s907",
945
+ "s908",
946
+ "s909",
947
+ "s910",
948
+ "s911",
949
+ "s912",
950
+ "s913",
951
+ "s914",
952
+ "s915",
953
+ "s916",
954
+ "s917",
955
+ "s918",
956
+ "s919",
957
+ "s920",
958
+ "s921",
959
+ "s922",
960
+ "s923",
961
+ "s924",
962
+ "s925",
963
+ "s926",
964
+ "s927",
965
+ "s928",
966
+ "s929",
967
+ "s930",
968
+ "s931",
969
+ "s932",
970
+ "s933",
971
+ "s934",
972
+ "s935",
973
+ "s936",
974
+ "s937",
975
+ "s938",
976
+ "s939",
977
+ "s940",
978
+ "s941",
979
+ "s942",
980
+ "s943",
981
+ "s944",
982
+ "s945",
983
+ "s946",
984
+ "s947",
985
+ "s948",
986
+ "s949",
987
+ "s950",
988
+ "s951",
989
+ "s952",
990
+ "s953",
991
+ "s954",
992
+ "s955",
993
+ "s956",
994
+ "s957",
995
+ "s958",
996
+ "s959",
997
+ "s960",
998
+ "s961",
999
+ "s962",
1000
+ "s963",
1001
+ "s964",
1002
+ "s965",
1003
+ "s966",
1004
+ "s967",
1005
+ "s968",
1006
+ "s969",
1007
+ "s970",
1008
+ "s971",
1009
+ "s972",
1010
+ "s973",
1011
+ "s974",
1012
+ "s975",
1013
+ "s976",
1014
+ "s977",
1015
+ "s978",
1016
+ "s979",
1017
+ "s980",
1018
+ "s981",
1019
+ "s982",
1020
+ "s983",
1021
+ "s984",
1022
+ "s985",
1023
+ "s986",
1024
+ "s987",
1025
+ "s988",
1026
+ "s989",
1027
+ "s990",
1028
+ "s991",
1029
+ "s992",
1030
+ "s993",
1031
+ "s994",
1032
+ "s995",
1033
+ "s996",
1034
+ "s997",
1035
+ "s998",
1036
+ "s999",
1037
+ "<eop>",
1038
+ "<eog>",
1039
+ "<|begin_of_sentence|>",
1040
+ "<|end_of_sentence|>",
1041
+ "<|User|>",
1042
+ "<|Assistant|>",
1043
+ "<think>",
1044
+ "</think>",
1045
+ "<search_result>",
1046
+ "</search_result>",
1047
+ "<search_query>",
1048
+ "</search_query>",
1049
+ "<code_query>",
1050
+ "</code_query>",
1051
+ "<code_result>",
1052
+ "</code_result>",
1053
+ "<infer>",
1054
+ "</infer>",
1055
+ "<inferresult>",
1056
+ "</inferresult>",
1057
+ "<tool_calls>",
1058
+ "</tool_calls>",
1059
+ "<tool_response>",
1060
+ "</tool_response>",
1061
+ "<final_answer>",
1062
+ "</final_answer>"
1063
+ ],
1064
+ "bos_token": {
1065
+ "content": "<s>",
1066
+ "lstrip": false,
1067
+ "normalized": false,
1068
+ "rstrip": false,
1069
+ "single_word": false
1070
+ },
1071
+ "eos_token": {
1072
+ "content": "<eod>",
1073
+ "lstrip": false,
1074
+ "normalized": false,
1075
+ "rstrip": false,
1076
+ "single_word": false
1077
+ },
1078
+ "pad_token": "<eod>",
1079
+ "unk_token": {
1080
+ "content": "<unk>",
1081
+ "lstrip": false,
1082
+ "normalized": false,
1083
+ "rstrip": false,
1084
+ "single_word": false
1085
+ }
1086
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36f79e0c70f73cdd2a8dd0fbe7bfe290da158eea746778d289e4ad76c8b383d9
3
+ size 2155861
tokenizer_config.json ADDED
@@ -0,0 +1,1081 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {},
6
+ "additional_special_tokens": [
7
+ "<s>",
8
+ "<eod>",
9
+ "<unk>",
10
+ "<sep>",
11
+ "<pad>",
12
+ "<mask>",
13
+ "<predict>",
14
+ "<FIM_SUFFIX>",
15
+ "<FIM_PREFIX>",
16
+ "<FIM_MIDDLE>",
17
+ "<commit_before>",
18
+ "<commit_msg>",
19
+ "<commit_after>",
20
+ "<jupyter_start>",
21
+ "<jupyter_text>",
22
+ "<jupyter_code>",
23
+ "<jupyter_output>",
24
+ "<empty_output>",
25
+ "<repo_name>",
26
+ "<file_sep>",
27
+ "<BOS>",
28
+ "<IMAGE>",
29
+ "</IMAGE>",
30
+ "<grounding>",
31
+ "<obj>",
32
+ "</obj>",
33
+ "<box>",
34
+ "</box>",
35
+ "<point>",
36
+ "</point>",
37
+ "<3dbox>",
38
+ "</3dbox>",
39
+ "<depth>",
40
+ "</depth>",
41
+ "s000",
42
+ "s001",
43
+ "s002",
44
+ "s003",
45
+ "s004",
46
+ "s005",
47
+ "s006",
48
+ "s007",
49
+ "s008",
50
+ "s009",
51
+ "s010",
52
+ "s011",
53
+ "s012",
54
+ "s013",
55
+ "s014",
56
+ "s015",
57
+ "s016",
58
+ "s017",
59
+ "s018",
60
+ "s019",
61
+ "s020",
62
+ "s021",
63
+ "s022",
64
+ "s023",
65
+ "s024",
66
+ "s025",
67
+ "s026",
68
+ "s027",
69
+ "s028",
70
+ "s029",
71
+ "s030",
72
+ "s031",
73
+ "s032",
74
+ "s033",
75
+ "s034",
76
+ "s035",
77
+ "s036",
78
+ "s037",
79
+ "s038",
80
+ "s039",
81
+ "s040",
82
+ "s041",
83
+ "s042",
84
+ "s043",
85
+ "s044",
86
+ "s045",
87
+ "s046",
88
+ "s047",
89
+ "s048",
90
+ "s049",
91
+ "s050",
92
+ "s051",
93
+ "s052",
94
+ "s053",
95
+ "s054",
96
+ "s055",
97
+ "s056",
98
+ "s057",
99
+ "s058",
100
+ "s059",
101
+ "s060",
102
+ "s061",
103
+ "s062",
104
+ "s063",
105
+ "s064",
106
+ "s065",
107
+ "s066",
108
+ "s067",
109
+ "s068",
110
+ "s069",
111
+ "s070",
112
+ "s071",
113
+ "s072",
114
+ "s073",
115
+ "s074",
116
+ "s075",
117
+ "s076",
118
+ "s077",
119
+ "s078",
120
+ "s079",
121
+ "s080",
122
+ "s081",
123
+ "s082",
124
+ "s083",
125
+ "s084",
126
+ "s085",
127
+ "s086",
128
+ "s087",
129
+ "s088",
130
+ "s089",
131
+ "s090",
132
+ "s091",
133
+ "s092",
134
+ "s093",
135
+ "s094",
136
+ "s095",
137
+ "s096",
138
+ "s097",
139
+ "s098",
140
+ "s099",
141
+ "s100",
142
+ "s101",
143
+ "s102",
144
+ "s103",
145
+ "s104",
146
+ "s105",
147
+ "s106",
148
+ "s107",
149
+ "s108",
150
+ "s109",
151
+ "s110",
152
+ "s111",
153
+ "s112",
154
+ "s113",
155
+ "s114",
156
+ "s115",
157
+ "s116",
158
+ "s117",
159
+ "s118",
160
+ "s119",
161
+ "s120",
162
+ "s121",
163
+ "s122",
164
+ "s123",
165
+ "s124",
166
+ "s125",
167
+ "s126",
168
+ "s127",
169
+ "s128",
170
+ "s129",
171
+ "s130",
172
+ "s131",
173
+ "s132",
174
+ "s133",
175
+ "s134",
176
+ "s135",
177
+ "s136",
178
+ "s137",
179
+ "s138",
180
+ "s139",
181
+ "s140",
182
+ "s141",
183
+ "s142",
184
+ "s143",
185
+ "s144",
186
+ "s145",
187
+ "s146",
188
+ "s147",
189
+ "s148",
190
+ "s149",
191
+ "s150",
192
+ "s151",
193
+ "s152",
194
+ "s153",
195
+ "s154",
196
+ "s155",
197
+ "s156",
198
+ "s157",
199
+ "s158",
200
+ "s159",
201
+ "s160",
202
+ "s161",
203
+ "s162",
204
+ "s163",
205
+ "s164",
206
+ "s165",
207
+ "s166",
208
+ "s167",
209
+ "s168",
210
+ "s169",
211
+ "s170",
212
+ "s171",
213
+ "s172",
214
+ "s173",
215
+ "s174",
216
+ "s175",
217
+ "s176",
218
+ "s177",
219
+ "s178",
220
+ "s179",
221
+ "s180",
222
+ "s181",
223
+ "s182",
224
+ "s183",
225
+ "s184",
226
+ "s185",
227
+ "s186",
228
+ "s187",
229
+ "s188",
230
+ "s189",
231
+ "s190",
232
+ "s191",
233
+ "s192",
234
+ "s193",
235
+ "s194",
236
+ "s195",
237
+ "s196",
238
+ "s197",
239
+ "s198",
240
+ "s199",
241
+ "s200",
242
+ "s201",
243
+ "s202",
244
+ "s203",
245
+ "s204",
246
+ "s205",
247
+ "s206",
248
+ "s207",
249
+ "s208",
250
+ "s209",
251
+ "s210",
252
+ "s211",
253
+ "s212",
254
+ "s213",
255
+ "s214",
256
+ "s215",
257
+ "s216",
258
+ "s217",
259
+ "s218",
260
+ "s219",
261
+ "s220",
262
+ "s221",
263
+ "s222",
264
+ "s223",
265
+ "s224",
266
+ "s225",
267
+ "s226",
268
+ "s227",
269
+ "s228",
270
+ "s229",
271
+ "s230",
272
+ "s231",
273
+ "s232",
274
+ "s233",
275
+ "s234",
276
+ "s235",
277
+ "s236",
278
+ "s237",
279
+ "s238",
280
+ "s239",
281
+ "s240",
282
+ "s241",
283
+ "s242",
284
+ "s243",
285
+ "s244",
286
+ "s245",
287
+ "s246",
288
+ "s247",
289
+ "s248",
290
+ "s249",
291
+ "s250",
292
+ "s251",
293
+ "s252",
294
+ "s253",
295
+ "s254",
296
+ "s255",
297
+ "s256",
298
+ "s257",
299
+ "s258",
300
+ "s259",
301
+ "s260",
302
+ "s261",
303
+ "s262",
304
+ "s263",
305
+ "s264",
306
+ "s265",
307
+ "s266",
308
+ "s267",
309
+ "s268",
310
+ "s269",
311
+ "s270",
312
+ "s271",
313
+ "s272",
314
+ "s273",
315
+ "s274",
316
+ "s275",
317
+ "s276",
318
+ "s277",
319
+ "s278",
320
+ "s279",
321
+ "s280",
322
+ "s281",
323
+ "s282",
324
+ "s283",
325
+ "s284",
326
+ "s285",
327
+ "s286",
328
+ "s287",
329
+ "s288",
330
+ "s289",
331
+ "s290",
332
+ "s291",
333
+ "s292",
334
+ "s293",
335
+ "s294",
336
+ "s295",
337
+ "s296",
338
+ "s297",
339
+ "s298",
340
+ "s299",
341
+ "s300",
342
+ "s301",
343
+ "s302",
344
+ "s303",
345
+ "s304",
346
+ "s305",
347
+ "s306",
348
+ "s307",
349
+ "s308",
350
+ "s309",
351
+ "s310",
352
+ "s311",
353
+ "s312",
354
+ "s313",
355
+ "s314",
356
+ "s315",
357
+ "s316",
358
+ "s317",
359
+ "s318",
360
+ "s319",
361
+ "s320",
362
+ "s321",
363
+ "s322",
364
+ "s323",
365
+ "s324",
366
+ "s325",
367
+ "s326",
368
+ "s327",
369
+ "s328",
370
+ "s329",
371
+ "s330",
372
+ "s331",
373
+ "s332",
374
+ "s333",
375
+ "s334",
376
+ "s335",
377
+ "s336",
378
+ "s337",
379
+ "s338",
380
+ "s339",
381
+ "s340",
382
+ "s341",
383
+ "s342",
384
+ "s343",
385
+ "s344",
386
+ "s345",
387
+ "s346",
388
+ "s347",
389
+ "s348",
390
+ "s349",
391
+ "s350",
392
+ "s351",
393
+ "s352",
394
+ "s353",
395
+ "s354",
396
+ "s355",
397
+ "s356",
398
+ "s357",
399
+ "s358",
400
+ "s359",
401
+ "s360",
402
+ "s361",
403
+ "s362",
404
+ "s363",
405
+ "s364",
406
+ "s365",
407
+ "s366",
408
+ "s367",
409
+ "s368",
410
+ "s369",
411
+ "s370",
412
+ "s371",
413
+ "s372",
414
+ "s373",
415
+ "s374",
416
+ "s375",
417
+ "s376",
418
+ "s377",
419
+ "s378",
420
+ "s379",
421
+ "s380",
422
+ "s381",
423
+ "s382",
424
+ "s383",
425
+ "s384",
426
+ "s385",
427
+ "s386",
428
+ "s387",
429
+ "s388",
430
+ "s389",
431
+ "s390",
432
+ "s391",
433
+ "s392",
434
+ "s393",
435
+ "s394",
436
+ "s395",
437
+ "s396",
438
+ "s397",
439
+ "s398",
440
+ "s399",
441
+ "s400",
442
+ "s401",
443
+ "s402",
444
+ "s403",
445
+ "s404",
446
+ "s405",
447
+ "s406",
448
+ "s407",
449
+ "s408",
450
+ "s409",
451
+ "s410",
452
+ "s411",
453
+ "s412",
454
+ "s413",
455
+ "s414",
456
+ "s415",
457
+ "s416",
458
+ "s417",
459
+ "s418",
460
+ "s419",
461
+ "s420",
462
+ "s421",
463
+ "s422",
464
+ "s423",
465
+ "s424",
466
+ "s425",
467
+ "s426",
468
+ "s427",
469
+ "s428",
470
+ "s429",
471
+ "s430",
472
+ "s431",
473
+ "s432",
474
+ "s433",
475
+ "s434",
476
+ "s435",
477
+ "s436",
478
+ "s437",
479
+ "s438",
480
+ "s439",
481
+ "s440",
482
+ "s441",
483
+ "s442",
484
+ "s443",
485
+ "s444",
486
+ "s445",
487
+ "s446",
488
+ "s447",
489
+ "s448",
490
+ "s449",
491
+ "s450",
492
+ "s451",
493
+ "s452",
494
+ "s453",
495
+ "s454",
496
+ "s455",
497
+ "s456",
498
+ "s457",
499
+ "s458",
500
+ "s459",
501
+ "s460",
502
+ "s461",
503
+ "s462",
504
+ "s463",
505
+ "s464",
506
+ "s465",
507
+ "s466",
508
+ "s467",
509
+ "s468",
510
+ "s469",
511
+ "s470",
512
+ "s471",
513
+ "s472",
514
+ "s473",
515
+ "s474",
516
+ "s475",
517
+ "s476",
518
+ "s477",
519
+ "s478",
520
+ "s479",
521
+ "s480",
522
+ "s481",
523
+ "s482",
524
+ "s483",
525
+ "s484",
526
+ "s485",
527
+ "s486",
528
+ "s487",
529
+ "s488",
530
+ "s489",
531
+ "s490",
532
+ "s491",
533
+ "s492",
534
+ "s493",
535
+ "s494",
536
+ "s495",
537
+ "s496",
538
+ "s497",
539
+ "s498",
540
+ "s499",
541
+ "s500",
542
+ "s501",
543
+ "s502",
544
+ "s503",
545
+ "s504",
546
+ "s505",
547
+ "s506",
548
+ "s507",
549
+ "s508",
550
+ "s509",
551
+ "s510",
552
+ "s511",
553
+ "s512",
554
+ "s513",
555
+ "s514",
556
+ "s515",
557
+ "s516",
558
+ "s517",
559
+ "s518",
560
+ "s519",
561
+ "s520",
562
+ "s521",
563
+ "s522",
564
+ "s523",
565
+ "s524",
566
+ "s525",
567
+ "s526",
568
+ "s527",
569
+ "s528",
570
+ "s529",
571
+ "s530",
572
+ "s531",
573
+ "s532",
574
+ "s533",
575
+ "s534",
576
+ "s535",
577
+ "s536",
578
+ "s537",
579
+ "s538",
580
+ "s539",
581
+ "s540",
582
+ "s541",
583
+ "s542",
584
+ "s543",
585
+ "s544",
586
+ "s545",
587
+ "s546",
588
+ "s547",
589
+ "s548",
590
+ "s549",
591
+ "s550",
592
+ "s551",
593
+ "s552",
594
+ "s553",
595
+ "s554",
596
+ "s555",
597
+ "s556",
598
+ "s557",
599
+ "s558",
600
+ "s559",
601
+ "s560",
602
+ "s561",
603
+ "s562",
604
+ "s563",
605
+ "s564",
606
+ "s565",
607
+ "s566",
608
+ "s567",
609
+ "s568",
610
+ "s569",
611
+ "s570",
612
+ "s571",
613
+ "s572",
614
+ "s573",
615
+ "s574",
616
+ "s575",
617
+ "s576",
618
+ "s577",
619
+ "s578",
620
+ "s579",
621
+ "s580",
622
+ "s581",
623
+ "s582",
624
+ "s583",
625
+ "s584",
626
+ "s585",
627
+ "s586",
628
+ "s587",
629
+ "s588",
630
+ "s589",
631
+ "s590",
632
+ "s591",
633
+ "s592",
634
+ "s593",
635
+ "s594",
636
+ "s595",
637
+ "s596",
638
+ "s597",
639
+ "s598",
640
+ "s599",
641
+ "s600",
642
+ "s601",
643
+ "s602",
644
+ "s603",
645
+ "s604",
646
+ "s605",
647
+ "s606",
648
+ "s607",
649
+ "s608",
650
+ "s609",
651
+ "s610",
652
+ "s611",
653
+ "s612",
654
+ "s613",
655
+ "s614",
656
+ "s615",
657
+ "s616",
658
+ "s617",
659
+ "s618",
660
+ "s619",
661
+ "s620",
662
+ "s621",
663
+ "s622",
664
+ "s623",
665
+ "s624",
666
+ "s625",
667
+ "s626",
668
+ "s627",
669
+ "s628",
670
+ "s629",
671
+ "s630",
672
+ "s631",
673
+ "s632",
674
+ "s633",
675
+ "s634",
676
+ "s635",
677
+ "s636",
678
+ "s637",
679
+ "s638",
680
+ "s639",
681
+ "s640",
682
+ "s641",
683
+ "s642",
684
+ "s643",
685
+ "s644",
686
+ "s645",
687
+ "s646",
688
+ "s647",
689
+ "s648",
690
+ "s649",
691
+ "s650",
692
+ "s651",
693
+ "s652",
694
+ "s653",
695
+ "s654",
696
+ "s655",
697
+ "s656",
698
+ "s657",
699
+ "s658",
700
+ "s659",
701
+ "s660",
702
+ "s661",
703
+ "s662",
704
+ "s663",
705
+ "s664",
706
+ "s665",
707
+ "s666",
708
+ "s667",
709
+ "s668",
710
+ "s669",
711
+ "s670",
712
+ "s671",
713
+ "s672",
714
+ "s673",
715
+ "s674",
716
+ "s675",
717
+ "s676",
718
+ "s677",
719
+ "s678",
720
+ "s679",
721
+ "s680",
722
+ "s681",
723
+ "s682",
724
+ "s683",
725
+ "s684",
726
+ "s685",
727
+ "s686",
728
+ "s687",
729
+ "s688",
730
+ "s689",
731
+ "s690",
732
+ "s691",
733
+ "s692",
734
+ "s693",
735
+ "s694",
736
+ "s695",
737
+ "s696",
738
+ "s697",
739
+ "s698",
740
+ "s699",
741
+ "s700",
742
+ "s701",
743
+ "s702",
744
+ "s703",
745
+ "s704",
746
+ "s705",
747
+ "s706",
748
+ "s707",
749
+ "s708",
750
+ "s709",
751
+ "s710",
752
+ "s711",
753
+ "s712",
754
+ "s713",
755
+ "s714",
756
+ "s715",
757
+ "s716",
758
+ "s717",
759
+ "s718",
760
+ "s719",
761
+ "s720",
762
+ "s721",
763
+ "s722",
764
+ "s723",
765
+ "s724",
766
+ "s725",
767
+ "s726",
768
+ "s727",
769
+ "s728",
770
+ "s729",
771
+ "s730",
772
+ "s731",
773
+ "s732",
774
+ "s733",
775
+ "s734",
776
+ "s735",
777
+ "s736",
778
+ "s737",
779
+ "s738",
780
+ "s739",
781
+ "s740",
782
+ "s741",
783
+ "s742",
784
+ "s743",
785
+ "s744",
786
+ "s745",
787
+ "s746",
788
+ "s747",
789
+ "s748",
790
+ "s749",
791
+ "s750",
792
+ "s751",
793
+ "s752",
794
+ "s753",
795
+ "s754",
796
+ "s755",
797
+ "s756",
798
+ "s757",
799
+ "s758",
800
+ "s759",
801
+ "s760",
802
+ "s761",
803
+ "s762",
804
+ "s763",
805
+ "s764",
806
+ "s765",
807
+ "s766",
808
+ "s767",
809
+ "s768",
810
+ "s769",
811
+ "s770",
812
+ "s771",
813
+ "s772",
814
+ "s773",
815
+ "s774",
816
+ "s775",
817
+ "s776",
818
+ "s777",
819
+ "s778",
820
+ "s779",
821
+ "s780",
822
+ "s781",
823
+ "s782",
824
+ "s783",
825
+ "s784",
826
+ "s785",
827
+ "s786",
828
+ "s787",
829
+ "s788",
830
+ "s789",
831
+ "s790",
832
+ "s791",
833
+ "s792",
834
+ "s793",
835
+ "s794",
836
+ "s795",
837
+ "s796",
838
+ "s797",
839
+ "s798",
840
+ "s799",
841
+ "s800",
842
+ "s801",
843
+ "s802",
844
+ "s803",
845
+ "s804",
846
+ "s805",
847
+ "s806",
848
+ "s807",
849
+ "s808",
850
+ "s809",
851
+ "s810",
852
+ "s811",
853
+ "s812",
854
+ "s813",
855
+ "s814",
856
+ "s815",
857
+ "s816",
858
+ "s817",
859
+ "s818",
860
+ "s819",
861
+ "s820",
862
+ "s821",
863
+ "s822",
864
+ "s823",
865
+ "s824",
866
+ "s825",
867
+ "s826",
868
+ "s827",
869
+ "s828",
870
+ "s829",
871
+ "s830",
872
+ "s831",
873
+ "s832",
874
+ "s833",
875
+ "s834",
876
+ "s835",
877
+ "s836",
878
+ "s837",
879
+ "s838",
880
+ "s839",
881
+ "s840",
882
+ "s841",
883
+ "s842",
884
+ "s843",
885
+ "s844",
886
+ "s845",
887
+ "s846",
888
+ "s847",
889
+ "s848",
890
+ "s849",
891
+ "s850",
892
+ "s851",
893
+ "s852",
894
+ "s853",
895
+ "s854",
896
+ "s855",
897
+ "s856",
898
+ "s857",
899
+ "s858",
900
+ "s859",
901
+ "s860",
902
+ "s861",
903
+ "s862",
904
+ "s863",
905
+ "s864",
906
+ "s865",
907
+ "s866",
908
+ "s867",
909
+ "s868",
910
+ "s869",
911
+ "s870",
912
+ "s871",
913
+ "s872",
914
+ "s873",
915
+ "s874",
916
+ "s875",
917
+ "s876",
918
+ "s877",
919
+ "s878",
920
+ "s879",
921
+ "s880",
922
+ "s881",
923
+ "s882",
924
+ "s883",
925
+ "s884",
926
+ "s885",
927
+ "s886",
928
+ "s887",
929
+ "s888",
930
+ "s889",
931
+ "s890",
932
+ "s891",
933
+ "s892",
934
+ "s893",
935
+ "s894",
936
+ "s895",
937
+ "s896",
938
+ "s897",
939
+ "s898",
940
+ "s899",
941
+ "s900",
942
+ "s901",
943
+ "s902",
944
+ "s903",
945
+ "s904",
946
+ "s905",
947
+ "s906",
948
+ "s907",
949
+ "s908",
950
+ "s909",
951
+ "s910",
952
+ "s911",
953
+ "s912",
954
+ "s913",
955
+ "s914",
956
+ "s915",
957
+ "s916",
958
+ "s917",
959
+ "s918",
960
+ "s919",
961
+ "s920",
962
+ "s921",
963
+ "s922",
964
+ "s923",
965
+ "s924",
966
+ "s925",
967
+ "s926",
968
+ "s927",
969
+ "s928",
970
+ "s929",
971
+ "s930",
972
+ "s931",
973
+ "s932",
974
+ "s933",
975
+ "s934",
976
+ "s935",
977
+ "s936",
978
+ "s937",
979
+ "s938",
980
+ "s939",
981
+ "s940",
982
+ "s941",
983
+ "s942",
984
+ "s943",
985
+ "s944",
986
+ "s945",
987
+ "s946",
988
+ "s947",
989
+ "s948",
990
+ "s949",
991
+ "s950",
992
+ "s951",
993
+ "s952",
994
+ "s953",
995
+ "s954",
996
+ "s955",
997
+ "s956",
998
+ "s957",
999
+ "s958",
1000
+ "s959",
1001
+ "s960",
1002
+ "s961",
1003
+ "s962",
1004
+ "s963",
1005
+ "s964",
1006
+ "s965",
1007
+ "s966",
1008
+ "s967",
1009
+ "s968",
1010
+ "s969",
1011
+ "s970",
1012
+ "s971",
1013
+ "s972",
1014
+ "s973",
1015
+ "s974",
1016
+ "s975",
1017
+ "s976",
1018
+ "s977",
1019
+ "s978",
1020
+ "s979",
1021
+ "s980",
1022
+ "s981",
1023
+ "s982",
1024
+ "s983",
1025
+ "s984",
1026
+ "s985",
1027
+ "s986",
1028
+ "s987",
1029
+ "s988",
1030
+ "s989",
1031
+ "s990",
1032
+ "s991",
1033
+ "s992",
1034
+ "s993",
1035
+ "s994",
1036
+ "s995",
1037
+ "s996",
1038
+ "s997",
1039
+ "s998",
1040
+ "s999",
1041
+ "<eop>",
1042
+ "<eog>",
1043
+ "<|begin_of_sentence|>",
1044
+ "<|end_of_sentence|>",
1045
+ "<|User|>",
1046
+ "<|Assistant|>",
1047
+ "<think>",
1048
+ "</think>",
1049
+ "<search_result>",
1050
+ "</search_result>",
1051
+ "<search_query>",
1052
+ "</search_query>",
1053
+ "<code_query>",
1054
+ "</code_query>",
1055
+ "<code_result>",
1056
+ "</code_result>",
1057
+ "<infer>",
1058
+ "</infer>",
1059
+ "<inferresult>",
1060
+ "</inferresult>",
1061
+ "<tool_calls>",
1062
+ "</tool_calls>",
1063
+ "<tool_response>",
1064
+ "</tool_response>",
1065
+ "<final_answer>",
1066
+ "</final_answer>"
1067
+ ],
1068
+ "bos_token": "<s>",
1069
+ "pad_token": null,
1070
+ "clean_up_tokenization_spaces": true,
1071
+ "eos_token": "<eod>",
1072
+ "legacy": true,
1073
+ "model_max_length": 1000000000000000019884624838656,
1074
+ "sp_model_kwargs": {},
1075
+ "spaces_between_special_tokens": false,
1076
+ "tokenizer_class": "LlamaTokenizer",
1077
+ "unk_token": "<unk>",
1078
+ "use_default_system_prompt": false,
1079
+ "max_token_id": 136000,
1080
+ "chat_template": "{% for message in messages if message.role == 'user' and message.content is iterable and message.content is not string %}\n {% for item in message.content if item.type == 'image' %}\n {{- '<image>' -}}\n {% endfor %}\n{% endfor %}\n\n{{- '<|begin_of_sentence|>' -}}\n\n{%- set system_message = namespace(value=none) -%}\n{%- for message in messages if message.role == 'system' -%}\n {%- set system_message.value = message.content -%}\n{%- endfor -%}\n{%- if system_message.value -%}\n {{- system_message.value -}}\n{%- endif -%}\n\n{%- for message in messages -%}\n {%- if message.role == \"user\" -%}\n {{- '<|User|>' -}}\n {%- if message.content is string -%}\n {{- message.content -}}\n {%- elif message.content is iterable and message.content is not string -%}\n {%- for item in message.content if item.type == \"text\" -%}\n {{- item.text -}}\n {%- endfor -%}\n {%- endif -%}\n\n {%- elif message.role == \"assistant\" -%}\n {%- set thinking_tag = \"\" -%}\n {%- if enable_thinking is defined -%}\n {%- set thinking_tag = \"</think>\" if not enable_thinking else \"<think>\" -%}\n {%- endif -%}\n {{- '<|Assistant|>' + thinking_tag -}}\n\n {%- if message.content is string -%}\n {{- message.content -}}\n {%- elif message.content is iterable and message.content is not string -%}\n {%- for item in message.content if item.type == \"text\" -%}\n {{- item.text -}}\n {%- endfor -%}\n {%- endif -%}\n\n {{- '<|end_of_sentence|>' -}}\n {%- endif -%}\n{%- endfor -%}\n\n{%- if add_generation_prompt -%}\n {{- '<|Assistant|>' -}}\n {%- if enable_thinking is defined -%}\n {{- \"</think>\" if not enable_thinking else \"<think>\" -}}\n {%- endif -%}\n{%- endif -%}"
1081
+ }
utils.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (Callable, Dict, Iterable, List, Literal, Mapping, Optional, Protocol, Set, Tuple, Union, overload)
2
+ import torch
3
+ from torch.func import functional_call
4
+
5
+ @overload
6
+ def flatten_bn(x: torch.Tensor) -> torch.Tensor:
7
+ ...
8
+
9
+
10
+ @overload
11
+ def flatten_bn(x: List[torch.Tensor]) -> List[torch.Tensor]:
12
+ ...
13
+
14
+
15
+ @overload
16
+ def flatten_bn(
17
+ x: Union[List[torch.Tensor], torch.Tensor],
18
+ *,
19
+ concat: Literal[True],
20
+ ) -> torch.Tensor:
21
+ ...
22
+
23
+
24
+ @overload
25
+ def flatten_bn(
26
+ x: Union[List[torch.Tensor], torch.Tensor],
27
+ *,
28
+ concat: bool = False,
29
+ ) -> Union[List[torch.Tensor], torch.Tensor]:
30
+ ...
31
+
32
+
33
+ def flatten_bn(
34
+ x: Union[List[torch.Tensor], torch.Tensor],
35
+ *,
36
+ concat: bool = False,
37
+ ) -> Union[List[torch.Tensor], torch.Tensor]:
38
+ """
39
+ Flatten the ``B`` and ``N`` dimensions of batched multimodal inputs.
40
+
41
+ The input tensor should have shape ``(B, N, ...)```.
42
+ """
43
+ if isinstance(x, torch.Tensor):
44
+ return x.flatten(0, 1)
45
+
46
+ if concat:
47
+ return torch.cat(x)
48
+
49
+ return [x_n for x_b in x for x_n in x_b]
50
+
51
+ def _flatten_embeddings(embeddings: torch.Tensor) -> torch.Tensor:
52
+ """
53
+ Recursively flattens and concatenates NestedTensors on all but the last
54
+ dimension.
55
+ """
56
+
57
+ if isinstance(embeddings, torch.Tensor):
58
+ # Flatten all but the last dimension.
59
+ return embeddings.flatten(0, -2)
60
+
61
+ return torch.cat(tuple(_flatten_embeddings(t) for t in embeddings))
62
+
63
+ def _embedding_count_expression(embeddings: torch.Tensor) -> str:
64
+ """
65
+ Constructs a debugging representation of the number of embeddings in the
66
+ Tensors.
67
+ """
68
+
69
+ if isinstance(embeddings, torch.Tensor):
70
+ return " x ".join([str(dim) for dim in embeddings.shape[:-1]])
71
+
72
+ return " + ".join(
73
+ _embedding_count_expression(inner) for inner in embeddings)
74
+
75
+ def _merge_multimodal_embeddings(
76
+ inputs_embeds: torch.Tensor,
77
+ is_multimodal: torch.Tensor,
78
+ multimodal_embeddings: torch.Tensor,
79
+ ) -> torch.Tensor:
80
+ """
81
+ Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the
82
+ positions in ``inputs_embeds`` corresponding to placeholder tokens in
83
+ ``input_ids``.
84
+
85
+ Note:
86
+ This updates ``inputs_embeds`` in place.
87
+ """
88
+ num_expected_tokens = is_multimodal.sum().item()
89
+ assert isinstance(num_expected_tokens, int)
90
+ # [total_patches, text_config.hidden_size]
91
+ flattened = _flatten_embeddings(multimodal_embeddings)
92
+ if flattened.shape[0] != num_expected_tokens:
93
+ expr = _embedding_count_expression(multimodal_embeddings)
94
+ raise ValueError(
95
+ f"Attempted to assign {expr} = {flattened.shape[0]} "
96
+ f"multimodal tokens to {num_expected_tokens} placeholders")
97
+
98
+ inputs_embeds[is_multimodal] = flattened
99
+ return inputs_embeds
100
+
101
+ def merge_multimodal_embeddings(
102
+ input_ids: torch.Tensor,
103
+ inputs_embeds: torch.Tensor,
104
+ multimodal_embeddings: torch.Tensor,
105
+ placeholder_token_id: Union[int, List[int]],
106
+ ) -> torch.Tensor:
107
+ """
108
+ Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the
109
+ positions in ``inputs_embeds`` corresponding to placeholder tokens in
110
+ ``input_ids``.
111
+
112
+ ``placeholder_token_id`` can be a list of token ids (e.g, token ids
113
+ of img_start, img_break, and img_end tokens) when needed: This means
114
+ the order of these tokens in the ``input_ids`` MUST MATCH the order of
115
+ their embeddings in ``multimodal_embeddings`` since we need to
116
+ slice-merge instead of individually scattering.
117
+
118
+ For example, if input_ids is "TTTTTSIIIBIIIBIIIETTT", where
119
+ - T is text token
120
+ - S is image start token
121
+ - I is image embedding token
122
+ - B is image break token
123
+ - E is image end token.
124
+
125
+ Then the image embeddings (that correspond to I's) from vision encoder
126
+ must be padded with embeddings of S, B, and E in the same order of
127
+ input_ids for a correct embedding merge.
128
+
129
+ Note:
130
+ This updates ``inputs_embeds`` in place.
131
+ """
132
+ if isinstance(placeholder_token_id, list):
133
+ placeholder_token_id = torch.tensor(placeholder_token_id,
134
+ device=input_ids.device)
135
+ return _merge_multimodal_embeddings(
136
+ inputs_embeds,
137
+ torch.isin(input_ids, placeholder_token_id),
138
+ multimodal_embeddings,
139
+ )
140
+ return _merge_multimodal_embeddings(
141
+ inputs_embeds,
142
+ (input_ids == placeholder_token_id),
143
+ multimodal_embeddings,
144
+ )