Yuan-lab commited on
Commit
2e05013
·
verified ·
1 Parent(s): 1d0d672

Upload 10 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
Param_Calculation.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Documentation for Parameter Calculation
2
+
3
+ Thank you for your interest in our Yuan3.0 Model!
4
+ Our community is open to everyone and welcomes all kinds of comments.
5
+ This document is an explanation of Parameter Calculation script.
6
+
7
+ ## Setup for Parameter Calculation
8
+
9
+ ### 1.Download Yuan3.0 Model Locally
10
+
11
+ ### 2.Modify the "MODEL PATH" to Your Local Download Path
12
+
13
+ ```bash
14
+ vim Param_Calculation.py
15
+ # modify the above line
16
+ MODEL_PATH = "/path/to/Yuan3.0-Model"
17
+ ```
18
+
19
+ ### 3.Run Parameter Calculation script
20
+
21
+ ```bash
22
+ python Param_Calculation.py
23
+ ```
24
+
Param_Calculation.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import torch
4
+
5
+ # ================= 配置区域 =================
6
+ # 添加Yuan3.0模型完整路径,将如下路径替换成你的路径
7
+ MODEL_PATH = "/path/to/Yuan3.0-Model"
8
+
9
+ # 将模型目录设为 Python 搜索路径的第一优先级
10
+ if MODEL_PATH not in sys.path:
11
+ sys.path.insert(0, MODEL_PATH)
12
+
13
+ # 设置环境变量,强制离线模式,禁止 HF 联网或访问远程缓存校验
14
+ os.environ["TRANSFORMERS_OFFLINE"] = "1"
15
+ os.environ["HF_DATASETS_OFFLINE"] = "1"
16
+ os.environ["HF_EVALUATE_OFFLINE"] = "1"
17
+
18
+ from transformers import AutoModel, AutoTokenizer, AutoConfig
19
+
20
+ print(f"🚀 开始从本地加载模型:{MODEL_PATH}")
21
+
22
+ # 加载模型
23
+ model = AutoModel.from_pretrained(
24
+ MODEL_PATH,
25
+ torch_dtype=torch.bfloat16,
26
+ low_cpu_mem_usage=True,
27
+ use_flash_attn=False,
28
+ device_map="cpu",
29
+ local_files_only=True,
30
+ trust_remote_code=True,
31
+ )
32
+
33
+ print("\n" + "="*30)
34
+ print("--Yuan3.0 Model Parameter--")
35
+ print("="*30)
36
+
37
+ # 统计参数
38
+ vit_params = 0
39
+ yuan_params = 0
40
+ total_params = model.num_parameters()
41
+ for n, p in model.named_parameters():
42
+ if 'vision_model' in n:
43
+ vit_params += p.numel()
44
+ else:
45
+ yuan_params += p.numel()
46
+
47
+ print(f"Vit Model Parameters: {vit_params:,}")
48
+ print(f"Yuan Model Parameters: {yuan_params:,}")
49
+ print(f"Total Parameters: {total_params:,}")
50
+ print("="*30)
51
+
modeling_intern_vit.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2023 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import torch.utils.checkpoint
11
+ from einops import rearrange
12
+ from timm.models.layers import DropPath
13
+ from torch import nn
14
+ from transformers.activations import ACT2FN
15
+ from transformers.modeling_outputs import (BaseModelOutput,
16
+ BaseModelOutputWithPooling)
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import logging
19
+
20
+ from .configuration_intern_vit import InternVisionConfig
21
+
22
+ try:
23
+ from flash_attention import FlashAttention
24
+ has_flash_attn = True
25
+ except:
26
+ print('internvit FlashAttention is not installed.')
27
+ has_flash_attn = False
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class InternRMSNorm(nn.Module):
34
+ def __init__(self, hidden_size, eps=1e-6):
35
+ super().__init__()
36
+ self.weight = nn.Parameter(torch.ones(hidden_size))
37
+ self.variance_epsilon = eps
38
+
39
+ def forward(self, hidden_states):
40
+ input_dtype = hidden_states.dtype
41
+ hidden_states = hidden_states.to(torch.float32)
42
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
43
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
44
+ return self.weight * hidden_states.to(input_dtype)
45
+
46
+
47
+ try:
48
+ from apex.normalization import FusedRMSNorm
49
+
50
+ InternRMSNorm = FusedRMSNorm # noqa
51
+
52
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
53
+ except ImportError:
54
+ # using the normal InternRMSNorm
55
+ pass
56
+ except Exception:
57
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
58
+ pass
59
+
60
+
61
+ NORM2FN = {
62
+ 'rms_norm': InternRMSNorm,
63
+ 'layer_norm': nn.LayerNorm,
64
+ }
65
+
66
+
67
+ class InternVisionEmbeddings(nn.Module):
68
+ def __init__(self, config: InternVisionConfig):
69
+ super().__init__()
70
+ self.config = config
71
+ self.embed_dim = config.hidden_size
72
+ self.image_size = config.image_size
73
+ self.patch_size = config.patch_size
74
+
75
+ self.class_embedding = nn.Parameter(
76
+ torch.randn(1, 1, self.embed_dim),
77
+ )
78
+
79
+ self.patch_embedding = nn.Conv2d(
80
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
81
+ )
82
+
83
+ self.num_patches = (self.image_size // self.patch_size) ** 2
84
+ self.num_positions = self.num_patches + 1
85
+
86
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
87
+
88
+ def _get_pos_embed(self, pos_embed, H, W):
89
+ target_dtype = pos_embed.dtype
90
+ pos_embed = pos_embed.float().reshape(
91
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
92
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\
93
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
94
+ return pos_embed
95
+
96
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
97
+ target_dtype = self.patch_embedding.weight.dtype
98
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
99
+ batch_size, _, height, width = patch_embeds.shape
100
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
101
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
102
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
103
+ position_embedding = torch.cat([
104
+ self.position_embedding[:, :1, :],
105
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
106
+ ], dim=1)
107
+ embeddings = embeddings + position_embedding.to(target_dtype)
108
+ return embeddings
109
+
110
+
111
+ class InternAttention(nn.Module):
112
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
113
+
114
+ def __init__(self, config: InternVisionConfig):
115
+ super().__init__()
116
+ self.config = config
117
+ self.embed_dim = config.hidden_size
118
+ self.num_heads = config.num_attention_heads
119
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
120
+ if config.use_flash_attn and not has_flash_attn:
121
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
122
+ self.head_dim = self.embed_dim // self.num_heads
123
+ if self.head_dim * self.num_heads != self.embed_dim:
124
+ raise ValueError(
125
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
126
+ f' {self.num_heads}).'
127
+ )
128
+
129
+ self.scale = self.head_dim ** -0.5
130
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
131
+ self.attn_drop = nn.Dropout(config.attention_dropout)
132
+ self.proj_drop = nn.Dropout(config.dropout)
133
+
134
+ self.qk_normalization = config.qk_normalization
135
+
136
+ if self.qk_normalization:
137
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
138
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
139
+
140
+ if self.use_flash_attn:
141
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
142
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
143
+
144
+ def _naive_attn(self, x):
145
+ B, N, C = x.shape
146
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
147
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
148
+
149
+ if self.qk_normalization:
150
+ B_, H_, N_, D_ = q.shape
151
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
152
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
153
+
154
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
155
+ attn = attn.softmax(dim=-1)
156
+ attn = self.attn_drop(attn)
157
+
158
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
159
+ x = self.proj(x)
160
+ x = self.proj_drop(x)
161
+ return x
162
+
163
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
164
+ qkv = self.qkv(x)
165
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
166
+
167
+ if self.qk_normalization:
168
+ q, k, v = qkv.unbind(2)
169
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
170
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
171
+ qkv = torch.stack([q, k, v], dim=2)
172
+
173
+ context, _ = self.inner_attn(
174
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
175
+ )
176
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
177
+ outs = self.proj_drop(outs)
178
+ return outs
179
+
180
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
181
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
182
+ return x
183
+
184
+
185
+ class InternMLP(nn.Module):
186
+ def __init__(self, config: InternVisionConfig):
187
+ super().__init__()
188
+ self.config = config
189
+ self.act = ACT2FN[config.hidden_act]
190
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
191
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
192
+
193
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
194
+ hidden_states = self.fc1(hidden_states)
195
+ hidden_states = self.act(hidden_states)
196
+ hidden_states = self.fc2(hidden_states)
197
+ return hidden_states
198
+
199
+
200
+ class InternVisionEncoderLayer(nn.Module):
201
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
202
+ super().__init__()
203
+ self.embed_dim = config.hidden_size
204
+ self.intermediate_size = config.intermediate_size
205
+ self.norm_type = config.norm_type
206
+
207
+ self.attn = InternAttention(config)
208
+ self.mlp = InternMLP(config)
209
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
210
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
211
+
212
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
213
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
214
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
215
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
216
+
217
+ def forward(
218
+ self,
219
+ hidden_states: torch.Tensor,
220
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
221
+ """
222
+ Args:
223
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
224
+ """
225
+
226
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
227
+
228
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
229
+
230
+ return hidden_states
231
+
232
+
233
+ class InternVisionEncoder(nn.Module):
234
+ """
235
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
236
+ [`InternEncoderLayer`].
237
+
238
+ Args:
239
+ config (`InternConfig`):
240
+ The corresponding vision configuration for the `InternEncoder`.
241
+ """
242
+
243
+ def __init__(self, config: InternVisionConfig):
244
+ super().__init__()
245
+ self.config = config
246
+ # stochastic depth decay rule
247
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
248
+ self.layers = nn.ModuleList([
249
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
250
+ self.gradient_checkpointing = True
251
+
252
+ def forward(
253
+ self,
254
+ inputs_embeds,
255
+ output_hidden_states: Optional[bool] = None,
256
+ return_dict: Optional[bool] = None,
257
+ ) -> Union[Tuple, BaseModelOutput]:
258
+ r"""
259
+ Args:
260
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
261
+ Embedded representation of the inputs. Should be float, not int tokens.
262
+ output_hidden_states (`bool`, *optional*):
263
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
264
+ for more detail.
265
+ return_dict (`bool`, *optional*):
266
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
267
+ """
268
+ output_hidden_states = (
269
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
270
+ )
271
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
272
+
273
+ encoder_states = () if output_hidden_states else None
274
+ hidden_states = inputs_embeds
275
+
276
+ for idx, encoder_layer in enumerate(self.layers):
277
+ if output_hidden_states:
278
+ encoder_states = encoder_states + (hidden_states,)
279
+ if self.gradient_checkpointing and self.training:
280
+ layer_outputs = torch.utils.checkpoint.checkpoint(
281
+ encoder_layer,
282
+ hidden_states)
283
+ else:
284
+ layer_outputs = encoder_layer(
285
+ hidden_states,
286
+ )
287
+ hidden_states = layer_outputs
288
+
289
+ if output_hidden_states:
290
+ encoder_states = encoder_states + (hidden_states,)
291
+
292
+ if not return_dict:
293
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
294
+ return BaseModelOutput(
295
+ last_hidden_state=hidden_states, hidden_states=encoder_states
296
+ )
297
+
298
+
299
+ class InternVisionModel(PreTrainedModel):
300
+ main_input_name = 'pixel_values'
301
+ config_class = InternVisionConfig
302
+ _no_split_modules = ['InternVisionEncoderLayer']
303
+
304
+ def __init__(self, config: InternVisionConfig):
305
+ super().__init__(config)
306
+ self.config = config
307
+
308
+ self.embeddings = InternVisionEmbeddings(config)
309
+ self.encoder = InternVisionEncoder(config)
310
+
311
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
312
+ pos_emb = self.embeddings.position_embedding
313
+ _, num_positions, embed_dim = pos_emb.shape
314
+ cls_emb = pos_emb[:, :1, :]
315
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
316
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
317
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
318
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
319
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
320
+ self.embeddings.image_size = new_size
321
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
322
+
323
+ def get_input_embeddings(self):
324
+ return self.embeddings
325
+
326
+ def forward(
327
+ self,
328
+ pixel_values: Optional[torch.FloatTensor] = None,
329
+ output_hidden_states: Optional[bool] = None,
330
+ return_dict: Optional[bool] = None,
331
+ pixel_embeds: Optional[torch.FloatTensor] = None,
332
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
333
+ output_hidden_states = (
334
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
335
+ )
336
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
337
+
338
+ if pixel_values is None and pixel_embeds is None:
339
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
340
+
341
+ if pixel_embeds is not None:
342
+ hidden_states = pixel_embeds
343
+ else:
344
+ if len(pixel_values.shape) == 4:
345
+ hidden_states = self.embeddings(pixel_values)
346
+ else:
347
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
348
+ encoder_outputs = self.encoder(
349
+ inputs_embeds=hidden_states,
350
+ output_hidden_states=output_hidden_states,
351
+ return_dict=return_dict,
352
+ )
353
+ last_hidden_state = encoder_outputs.last_hidden_state
354
+ pooled_output = last_hidden_state[:, 0, :]
355
+
356
+ if not return_dict:
357
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
358
+
359
+ return BaseModelOutputWithPooling(
360
+ last_hidden_state=last_hidden_state,
361
+ pooler_output=pooled_output,
362
+ hidden_states=encoder_outputs.hidden_states,
363
+ attentions=encoder_outputs.attentions,
364
+ )
modeling_yuanlm2.py ADDED
@@ -0,0 +1,1502 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Yuan model."""
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+ import torch.nn.functional as F
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import einsum, nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+ from transformers.activations import ACT2FN
29
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
30
+ from transformers.modeling_utils import PreTrainedModel
31
+ from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
32
+ from .configuration_yuan import YuanConfig
33
+ from einops import rearrange
34
+ from transformer_engine.pytorch import RMSNorm
35
+ import pdb
36
+ import copy
37
+ try:
38
+ import grouped_gemm as gg
39
+ except ImportError:
40
+ gg = None
41
+ try:
42
+ from flash_attn import flash_attn_varlen_func as flash_attn_unpadded_func
43
+ from flash_attn import flash_attn_func
44
+ except ImportError:
45
+ flash_attn_unpadded_func = None
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+ _CONFIG_FOR_DOC = "YuanConfig"
51
+
52
+ class YuanRotaryEmbedding(nn.Module):
53
+ def __init__(self, dim, base=10000, dtype=torch.float32, rotary_interleaved=False, seq_len_interpolation_factor=None):
54
+ super().__init__()
55
+ self.base = base
56
+ self.dim = dim
57
+ self.rotary_interleaved = rotary_interleaved
58
+ self.seq_len_interpolation_factor = seq_len_interpolation_factor
59
+
60
+ def get_rotary_seq_len(
61
+ self,
62
+ inference_param=None,
63
+ transformer_input: torch.Tensor=None,
64
+ transformer_config=None,
65
+ ):
66
+ if inference_param is not None:
67
+ rotary_seq_len = inference_param.max_sequence_length
68
+ else:
69
+ rotary_seq_len = transformer_input.size[0]
70
+ if transformer_config.sequence_parallel:
71
+ rotary_seq_len *= transformer_config.tensor_model_parallel_size
72
+
73
+ return rotary_seq_len
74
+
75
+ def forward(self, max_seq_len, offset=0):
76
+
77
+ """Forward pass of RoPE embedding.
78
+
79
+ Args:
80
+ max_seq_len (int): Maximum size of sequence
81
+ offset (int, optional): _description_. Defaults to 0.
82
+
83
+ Returns:
84
+ Tensor: Embeddings after applying RoPE.
85
+ """
86
+ inv_freq = (1.0 / ( self.base**(torch.arange(0, self.dim, 2, dtype=torch.float32, device=torch.cuda.current_device()) / self.dim))).to(torch.float32)
87
+
88
+ #max_seq_len_int = max_seq_len.item() if max_seq_len.numel() == 1 else max_seq_len.max().item()
89
+ seq = (
90
+ torch.arange(max_seq_len, device=inv_freq.device, dtype=inv_freq.dtype)
91
+ + offset
92
+ )
93
+
94
+ if self.seq_len_interpolation_factor is not None:
95
+ seq *= 1 / self.seq_len_interpolation_factor
96
+
97
+ freqs = torch.outer(seq, inv_freq)
98
+ # first part even vector components, second part odd vector components,
99
+ # 2 * dim in dimension size
100
+ if not self.rotary_interleaved:
101
+ emb = torch.cat((freqs, freqs), dim=-1)
102
+ else:
103
+ emb = torch.stack((freqs.view(-1, 1), freqs.view(-1, 1)), dim=-1).view(
104
+ freqs.shape[0], -1
105
+ )
106
+ # emb [seq_length, .., dim]
107
+ emb = emb[:, None, None, :]
108
+ #emb = emb[:, None, :]
109
+ return emb
110
+
111
+
112
+ def _rotate_half(x, rotary_interleaved):
113
+ """huggingface version
114
+ change sign so the last dimension becomes [-odd, +even]
115
+
116
+ x1, x2 = torch.chunk(x, 2, dim=-1)
117
+ return torch.cat((-x2, x1), dim=-1)
118
+ """
119
+ if not rotary_interleaved:
120
+ x1, x2 = torch.chunk(x, 2, dim=-1)
121
+ return torch.cat((-x2, x1), dim=-1)
122
+ else:
123
+ x1 = x[:, :, :, ::2]
124
+ x2 = x[:, :, :, 1::2]
125
+ x_new = torch.stack((-x2, x1), dim=-1)
126
+ return x_new.view(x_new.shape[0], x_new.shape[1], x_new.shape[2], -1)
127
+
128
+ def apply_rotary_pos_emb(t, freqs, position_ids, rotary_interleaved=False):
129
+
130
+ rot_dim = freqs.shape[-1]
131
+ #if position_ids.shape[1] > 1:
132
+ freqs = freqs[position_ids]
133
+ freqs = freqs.view(t.shape[1],freqs.shape[1],freqs.shape[2],freqs.shape[4]).transpose(0,1)
134
+ # ideally t_pass is empty so rotary pos embedding is applied to all tensor t
135
+ t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
136
+
137
+ # first part is cosine component
138
+ # second part is sine component, need to change signs with _rotate_half method
139
+ t_type = t.dtype
140
+ cos_ = torch.cos(freqs).to(t.dtype)
141
+ sin_ = torch.sin(freqs).to(t.dtype)
142
+
143
+ t = (t * cos_) + (_rotate_half(t, rotary_interleaved) * sin_)
144
+ return torch.cat((t, t_pass), dim=-1)
145
+
146
+ class LocalizedFiltering(torch.nn.Module):
147
+ """
148
+ Mega's Exponential Moving Average layer, largely left unmodified from the original repo with the exception of
149
+ variable names and moving away from the stateful representation of incremental decoding state. See
150
+ "https://arxiv.org/abs/2209.10655" for more details.
151
+ """
152
+
153
+ def __init__(self, hidden_size, lf_conv2d_group, lf_conv2d_num_pad, use_lfa_bias):
154
+ super().__init__()
155
+
156
+ self.embed_dim = hidden_size
157
+ self.lf_conv2d_group = lf_conv2d_group
158
+ self.lf_conv2d_num_pad = lf_conv2d_num_pad
159
+ self.use_lfa_bias = use_lfa_bias
160
+ if self.lf_conv2d_num_pad == 1:
161
+ self.training = True
162
+ self.conv1 = torch.nn.Conv2d(self.embed_dim, self.embed_dim // 2, (2, 1), stride=(1, 1), padding=(self.lf_conv2d_num_pad, 0), groups=self.lf_conv2d_group, bias=use_lfa_bias)
163
+ self.conv2 = torch.nn.Conv2d(self.embed_dim // 2, self.embed_dim, (2, 1), stride=(1, 1), padding=(self.lf_conv2d_num_pad, 0), groups=self.lf_conv2d_group, bias=use_lfa_bias)
164
+ self.output_layernorm = RMSNorm(self.embed_dim, eps=1e-6)
165
+
166
+ def _train_forward(self, inputs):
167
+ inputs = inputs.transpose(0,1)
168
+ seq_len, bsz, embed_dim = inputs.size()
169
+ if embed_dim != self.embed_dim:
170
+ raise ValueError(
171
+ f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}"
172
+ )
173
+ residual = inputs
174
+
175
+ inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
176
+ output1 = self.conv1(inputs)
177
+ output1 = output1[:, :, :seq_len, :]
178
+
179
+ output2 = self.conv2(output1)
180
+ output2 = output2[:, :, :seq_len, :].permute(2, 3, 0, 1).contiguous()
181
+ output2 = output2.view(seq_len, bsz, embed_dim)
182
+ assert output2.shape == residual.shape
183
+
184
+ torch.cuda.set_device(output2.device)
185
+ lf_output = self.output_layernorm(output2 + residual)
186
+ lf_output = lf_output.transpose(0,1)
187
+ return lf_output
188
+
189
+ def _inference_forward(self, inputs, before_hidden_states):
190
+
191
+ if before_hidden_states is None:
192
+ residual = inputs
193
+ seq_len, bsz, embed_dim = inputs.size()
194
+
195
+ inputs = inputs.view(seq_len, 1, bsz, embed_dim).permute(2, 3, 0, 1)
196
+
197
+ pad_zero1 = torch.zeros(bsz, embed_dim, 1, 1).to(inputs)
198
+ inputs = torch.cat((pad_zero1, inputs), dim=2).contiguous()
199
+ output1 = self.conv1(inputs)
200
+
201
+ pad_zero2 = torch.zeros(bsz, embed_dim // 2, 1, 1).to(output1)
202
+ output1 = torch.cat((pad_zero2, output1), dim=2).contiguous()
203
+ output2 = self.conv2(output1)
204
+
205
+ output2 = output2.permute(2, 3, 0, 1).contiguous()
206
+
207
+ output2 = output2.view(seq_len, bsz, embed_dim)
208
+
209
+ assert output2.shape == residual.shape
210
+
211
+ lf_output = self.output_layernorm(output2 + residual)
212
+
213
+ else:
214
+ residual = inputs
215
+
216
+ seq_len, bsz, embed_dim = inputs.size()
217
+ seq_len_before, _, _ = before_hidden_states.size()
218
+
219
+ assert seq_len == 1 and seq_len_before == 2
220
+
221
+ inputs = torch.cat((before_hidden_states, inputs), dim=0)
222
+ inputs = inputs.view(3, 1, bsz, embed_dim).permute(2, 3, 0, 1)
223
+
224
+ output1 = self.conv1(inputs)
225
+ output2 = self.conv2(output1)
226
+ output2 = output2.view(1, bsz, embed_dim)
227
+
228
+ assert output2.shape == residual.shape
229
+
230
+ lf_output = self.output_layernorm(output2 + residual)
231
+
232
+ return lf_output
233
+
234
+ def forward(
235
+ self,
236
+ inputs,
237
+ before_hidden_states = None,
238
+ ) -> torch.Tensor:
239
+ # assert self.lf_conv2d_num_pad == 1
240
+ if self.training:
241
+ lf_output = self._train_forward(inputs)
242
+ else:
243
+ lf_output = self._inference_forward(inputs, before_hidden_states)
244
+
245
+ return lf_output
246
+
247
+
248
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
249
+ def _make_causal_mask(
250
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
251
+ ):
252
+ """
253
+ Make causal mask used for bi-directional self-attention.
254
+ """
255
+ bsz, tgt_len = input_ids_shape
256
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
257
+ mask_cond = torch.arange(mask.size(-1), device=device)
258
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
259
+ mask = mask.to(dtype)
260
+
261
+ if past_key_values_length > 0:
262
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
263
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
264
+
265
+
266
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
267
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
268
+ """
269
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
270
+ """
271
+ bsz, src_len = mask.size()
272
+ tgt_len = tgt_len if tgt_len is not None else src_len
273
+
274
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
275
+
276
+ inverted_mask = 1.0 - expanded_mask
277
+
278
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
279
+
280
+
281
+ class YuanRMSNorm(nn.Module):
282
+ def __init__(self, hidden_size, eps=1e-6):
283
+ """
284
+ YuanRMSNorm is equivalent to LlamaRMSNorm
285
+ """
286
+ super().__init__()
287
+ self.weight = nn.Parameter(torch.ones(hidden_size))
288
+ self.variance_epsilon = eps
289
+
290
+ def forward(self, hidden_states):
291
+ input_dtype = hidden_states.dtype
292
+ hidden_states = hidden_states.to(torch.float32)
293
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
294
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
295
+ return self.weight * hidden_states.to(input_dtype)
296
+
297
+ # flash attn
298
+ class FlashSelfAttention(torch.nn.Module):
299
+ """Implement the scaled dot product attention with softmax.
300
+ Arguments
301
+ ---------
302
+ softmax_scale: The temperature to use for the softmax attention.
303
+ (default: 1/sqrt(d_keys) where d_keys is computed at
304
+ runtime)
305
+ attention_dropout: The dropout rate to apply to the attention
306
+ (default: 0.0)
307
+ """
308
+ def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
309
+ device=None, dtype=None):
310
+ super().__init__()
311
+ assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, '
312
+ 'e.g., with pip install flash-attn')
313
+ assert rearrange is not None, 'Please install einops first, e.g., with pip install einops'
314
+ self.causal = causal
315
+ self.softmax_scale = softmax_scale
316
+ self.dropout_p = attention_dropout
317
+
318
+ def forward(self, q, k, v):
319
+ """Implements the multihead softmax attention.
320
+ Arguments
321
+ ---------
322
+ q, k, v: The tensor containing the query, key, and value. (B, S, H, D)
323
+ """
324
+
325
+ assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q,k,v)))
326
+ assert all((i.is_cuda for i in (q,k,v)))
327
+
328
+ batch_size, seqlen_q = q.shape[1], q.shape[0]
329
+ seqlen_k = k.shape[0]
330
+ q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]]
331
+ cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q.device)
332
+ if self.training:
333
+ # during training q,k,v always have same seqlen
334
+ assert seqlen_k == seqlen_q
335
+ is_causal = self.causal
336
+ cu_seqlens_k = cu_seqlens_q
337
+ dropout_p = self.dropout_p
338
+ else:
339
+ # turn off FA causal mask after first inference autoregressive iteration
340
+ # only on first autoregressive step q,k,v have same seqlen
341
+ is_causal = seqlen_q == seqlen_k
342
+ cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=q.device)
343
+ #cu_seqlens_q = [cu_seqlens_q[0], cu_seqlens_q[-1]]
344
+ #cu_seqlens_k = [cu_seqlens_k[0], cu_seqlens_k[-1]]
345
+ dropout_p = 0
346
+
347
+ output = flash_attn_unpadded_func(q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, dropout_p, softmax_scale=self.softmax_scale, causal=is_causal)
348
+
349
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
350
+ return output
351
+
352
+ class ParallelAttention_router(nn.Module):
353
+ def __init__(self, config, num_experts):
354
+ super(ParallelAttention_router, self).__init__()
355
+ layer_number=0
356
+ self.layer_number = max(1, layer_number)
357
+
358
+ self.hidden_size = config.hidden_size
359
+ self.projection_size = num_experts
360
+
361
+ self.num_attention_router_heads = config.moe_config['num_attention_router_heads']
362
+ self.hidden_size_per_attention_head = config.max_position_embeddings // self.num_attention_router_heads
363
+ self.query_key_value = nn.Linear(self.hidden_size, self.projection_size*3, bias=False)
364
+
365
+ def forward(self, hidden_states, attention_mask=None, enc_position_ids=None,
366
+ encoder_output=None, inference_params=None,
367
+ rotary_pos_emb=None):
368
+ is_first_step = False
369
+ before_hidden_states = None
370
+
371
+ #mixed_x_layer = torch.matmul(hidden_states, self.query_key_value)
372
+ mixed_x_layer = self.query_key_value(hidden_states)
373
+ (query_layer, key_layer, value_layer) = torch.split(mixed_x_layer, self.projection_size, -1)
374
+ b, s, z = query_layer.shape
375
+
376
+ # use fp32 router
377
+ query_layer = query_layer.float().view(b,s,z,1)
378
+ key_layer = key_layer.float().view(b,s,z,1)
379
+ value_layer = value_layer.float().view(b,s,z,1)
380
+
381
+ attn_weights = torch.matmul(query_layer, key_layer.transpose(2, 3))
382
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
383
+ attn_output = torch.matmul(attn_weights, value_layer)
384
+ router_output = attn_output.view(-1, z)
385
+ return router_output
386
+
387
+ class YuanExpertMLP(nn.Module):
388
+ def __init__(self, config):
389
+ super(YuanExpertMLP, self).__init__()
390
+ self.gated_linear_unit = config.moe_config['gated_linear_unit']
391
+ #self.ffn_hidden_size = config.moe_config['ffn_hidden_size']
392
+ self.ffn_hidden_size = config.ffn_hidden_size
393
+
394
+
395
+ if self.gated_linear_unit:
396
+ self.w1 = nn.Linear(config.hidden_size, self.ffn_hidden_size*2, bias=False)
397
+
398
+ else:
399
+ self.w1 = nn.Linear(config.hidden_size, self.ffn_hidden_size, bias=False)
400
+
401
+ self.act_fn = ACT2FN[config.hidden_act]
402
+ self.w2 = nn.Linear(self.ffn_hidden_size, config.hidden_size, bias=False)
403
+
404
+
405
+ def forward(self, x):
406
+ x = self.w1(x)
407
+ if self.gated_linear_unit:
408
+ x = torch.chunk(x, 2, dim=-1)
409
+ x = self.act_fn(x[0]) * x[1]
410
+ else:
411
+ x = self.act_fn(x)
412
+ x = self.w2(x)
413
+ return x
414
+
415
+
416
+
417
+ class YuanMLP(nn.Module):
418
+ def __init__(
419
+ self,
420
+ hidden_size: int,
421
+ intermediate_size: int,
422
+ hidden_act: str
423
+ ):
424
+ super().__init__()
425
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
426
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
427
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
428
+ self.act_fn = ACT2FN[hidden_act]
429
+
430
+ def forward(self, x):
431
+ return self.down_proj(self.gate_proj(x) * self.act_fn(self.up_proj(x)))
432
+
433
+
434
+ class YuanAttention(nn.Module):
435
+ """Localized Filtering-based Attention 'YUAN 2.0: A Large Language Model with Localized Filtering-based Attention' paper"""
436
+
437
+ def __init__(self, config: YuanConfig):
438
+ super().__init__()
439
+ self.config = config
440
+ self.hidden_size = config.hidden_size
441
+ self.num_heads = config.num_attention_heads
442
+ self.lf_conv2d_group = config.lf_conv2d_group
443
+ self.lf_conv2d_num_pad = config.lf_conv2d_num_pad
444
+ self.use_lfa_bias = config.use_lfa_bias
445
+ try:
446
+ self.attention_projection_size = config.attention_projection_size
447
+ except:
448
+ self.attention_projection_size = None
449
+
450
+ if self.attention_projection_size is None:
451
+ self.head_dim = self.hidden_size // self.num_heads
452
+ else:
453
+ self.head_dim = self.attention_projection_size // self.num_heads
454
+
455
+ self.max_position_embeddings = config.max_position_embeddings
456
+ self.causal_mask = config.causal_mask
457
+ self.attn_mask_type = config.attn_mask_type
458
+ self.softmax_scale = 1.0 / math.sqrt(self.head_dim)
459
+ self.use_flash_attention = config.use_flash_attention
460
+ try:
461
+ self.use_shareqk = config.use_shareqk
462
+ except Exception as e:
463
+ self.use_shareqk=False
464
+ self.dropout = 0.0
465
+ self.attention_projection_size = config.attention_projection_size
466
+ self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
467
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
468
+
469
+ if self.use_shareqk:
470
+ self.qk_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
471
+ self.qk_weight = nn.Parameter(torch.Tensor(2, self.hidden_size))
472
+ self.qk_bias = nn.Parameter(torch.Tensor(2, self.hidden_size))
473
+ else:
474
+ self.lf_gate = LocalizedFiltering(self.hidden_size, self.lf_conv2d_group, self.lf_conv2d_num_pad, self.use_lfa_bias)
475
+ self.get_query_key = nn.Linear(self.hidden_size, 2 * self.attention_projection_size, bias=False)
476
+ self.core_attention = FlashSelfAttention(causal=True, attention_dropout=config.attn_dropout, softmax_scale=self.softmax_scale)
477
+
478
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
479
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
480
+
481
+ def forward(
482
+ self,
483
+ hidden_states: torch.Tensor,
484
+ attention_mask: Optional[torch.Tensor] = None,
485
+ position_ids: Optional[torch.LongTensor] = None,
486
+ position_ids_k: Optional[torch.LongTensor] = None,
487
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
488
+ rotary_pos_emb: Optional[Tuple[torch.Tensor]] = None,
489
+ output_attentions: bool = False,
490
+ use_cache: bool = False,
491
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
492
+
493
+ q_len, bsz, _ = hidden_states.size()
494
+ hidden_states = hidden_states#.to('cuda:1')
495
+ is_first_step = False
496
+ if use_cache:
497
+ if past_key_value is None:
498
+ before_hidden_states = None
499
+ is_first_step = True
500
+ if q_len > 1:
501
+ inference_hidden_states_memory = hidden_states[-2:, :, :]
502
+ else:
503
+ inference_hidden_states_memory = torch.cat((torch.zeros_like(hidden_states), hidden_states), dim=0)
504
+ else:
505
+ before_hidden_states = past_key_value[2]
506
+ inference_hidden_states_memory = torch.cat((before_hidden_states[-1:, :, :], hidden_states), dim=0)
507
+ value_states = self.v_proj(hidden_states).view(q_len, bsz, self.num_heads, self.head_dim)
508
+ if self.use_shareqk:
509
+ qk_states = self.qk_proj(hidden_states).view(q_len, bsz, self.num_heads*self.head_dim)
510
+ query_key = qk_states.unsqueeze(2) * self.qk_weight + self.qk_bias
511
+ query_states, key_states = torch.unbind(query_key, dim=2)
512
+
513
+ query_states = query_states.view(q_len, bsz, self.num_heads, self.head_dim).transpose(1, 2)
514
+ key_states = key_states.view(q_len, bsz, self.num_heads, self.head_dim).transpose(1, 2)
515
+ else:
516
+ hidden_states = self.lf_gate(hidden_states, before_hidden_states)
517
+ mixed_qk_layer = self.get_query_key(hidden_states)
518
+ #mixed_qk_layer = torch.matmul(hidden_states, qk_tensor)
519
+ new_tensor_shape = mixed_qk_layer.size()[:-1] + (self.num_heads, 2 * self.head_dim)
520
+ mixed_qk_layer = mixed_qk_layer.view(*new_tensor_shape)
521
+ (query_states, key_states) = torch.split(mixed_qk_layer, self.head_dim, dim=-1)
522
+
523
+
524
+ kv_seq_len = key_states.shape[1]
525
+ if past_key_value is not None:
526
+ kv_seq_len += past_key_value[0].shape[1]
527
+
528
+ # duplicate the pos_emb for self attention
529
+ if rotary_pos_emb is not None:
530
+ if position_ids.shape[1] == 1:
531
+ q_seq_start = position_ids[0,-1]
532
+ #seq_start = past_key_value[0].shape[0]
533
+ q_seq_end = q_seq_start + 1
534
+ k_seq_end = q_seq_end
535
+ else:
536
+ q_seq_start = 0
537
+ q_seq_end = q_seq_start+key_states.shape[0]
538
+ k_seq_end = q_seq_end
539
+
540
+ rotary_pos_shape = rotary_pos_emb.shape
541
+ if isinstance(rotary_pos_emb, tuple):
542
+ rotary_pos_emb = rotary_pos_emb
543
+ else:
544
+ rotary_pos_emb = ((rotary_pos_emb,) * 2)
545
+ q_pos_emb, k_pos_emb = rotary_pos_emb
546
+ if past_key_value is not None:
547
+ # reuse k, v, self_attention
548
+ key_states = torch.cat([past_key_value[0], key_states], dim=0)
549
+ value_states = torch.cat([past_key_value[1], value_states], dim=0)
550
+ past_key_value = (key_states, value_states, inference_hidden_states_memory) if use_cache else None
551
+ query_states = apply_rotary_pos_emb(query_states, q_pos_emb, position_ids)
552
+ key_states = apply_rotary_pos_emb(key_states, k_pos_emb, position_ids_k)
553
+
554
+ attn_weights = None
555
+ attn_output = self.core_attention(query_states, key_states, value_states)
556
+ q_len, bsz, _, _ = attn_output.shape
557
+ attn_output = attn_output.reshape(q_len, bsz, -1)
558
+
559
+ attn_output = self.o_proj(attn_output)
560
+
561
+ return attn_output, attn_weights, past_key_value
562
+
563
+ class MoEDroplessTokenDispatcher:
564
+ def __init__(self, num_experts: int, config: YuanConfig) -> None:
565
+ self.num_experts = num_experts
566
+ assert self.num_experts > 0, "Expected at least one expert"
567
+ self.router_topk = config.moe_config['moe_top_k']
568
+
569
+ def token_permutation(
570
+ self, hidden_states: torch.Tensor, max_prob: torch.Tensor, max_ind: torch.Tensor
571
+ ):
572
+ self.hidden_shape = hidden_states.shape
573
+ hidden_states = hidden_states.view(-1, self.hidden_shape[-1])
574
+
575
+ if self.router_topk > 1:
576
+ global_local_map = torch.ones_like(max_ind).bool()
577
+ local_indices = max_ind.masked_select(global_local_map)
578
+ local_probs = max_prob.masked_select(global_local_map)
579
+ global_local_map = global_local_map.nonzero()[:, 0]
580
+ global_local_map = global_local_map.view(-1, 1).expand(-1, hidden_states.shape[-1])
581
+ local_hidden_states = torch.gather(hidden_states, 0, global_local_map)
582
+
583
+ indices = torch.argsort(local_indices, dim=0)
584
+ tokens_per_expert = torch.histc(
585
+ local_indices,
586
+ bins=self.num_experts,
587
+ min=0,
588
+ max=self.num_experts - 1,
589
+ )
590
+ tokens_per_expert = tokens_per_expert.cpu().to(torch.long)
591
+
592
+ indices = indices.view(-1, 1).expand(-1, hidden_states.shape[-1])
593
+ permuted_local_hidden_states = torch.gather(local_hidden_states, 0, indices)
594
+ return (permuted_local_hidden_states, tokens_per_expert, local_probs, indices, global_local_map)
595
+
596
+ def token_unpermutation(
597
+ self,
598
+ hidden_states: torch.Tensor,
599
+ scores: torch.Tensor,
600
+ indices: torch.Tensor,
601
+ global_local_map: torch.Tensor = None,
602
+ ):
603
+ scores = scores.to(dtype=hidden_states.dtype)
604
+ unpermuted_local_hidden = torch.zeros_like(hidden_states)
605
+ assert indices.shape == hidden_states.shape, f'{indices.shape}, {hidden_states.shape}'
606
+ unpermuted_local_hidden = unpermuted_local_hidden.scatter(0, indices, hidden_states)
607
+
608
+ if self.router_topk > 1:
609
+ unpermuted_local_hidden = unpermuted_local_hidden * scores.view(-1, 1)
610
+ unpermuted_local_bias = None
611
+ output_total = unpermuted_local_hidden
612
+ output_bias_total = unpermuted_local_bias
613
+
614
+ if self.router_topk > 1:
615
+ global_num_tokens = self.hidden_shape[0] * self.hidden_shape[1]
616
+ global_hidden_shape = [global_num_tokens, hidden_states.shape[-1]]
617
+ unpermuted_global_hidden = torch.zeros(
618
+ global_hidden_shape,
619
+ dtype=hidden_states.dtype,
620
+ device=hidden_states.device,
621
+ )
622
+ output_total = unpermuted_global_hidden.scatter_add(
623
+ 0, global_local_map, unpermuted_local_hidden
624
+ )
625
+
626
+ output_total = output_total.view(self.hidden_shape)
627
+
628
+ return output_total
629
+
630
+ class GroupedMLP(nn.Module):
631
+ """An efficient implementation of the Experts layer using CUTLASS GroupedGEMM.
632
+
633
+ This class is designed to execute multiple experts in parallel, thereby maximizing computational efficiency.
634
+ """
635
+
636
+ def __init__(self, num_experts: int, config: YuanConfig):
637
+ super().__init__()
638
+ self.num_experts = num_experts
639
+ self.config = config
640
+
641
+ def glu(x):
642
+ x = torch.chunk(x, 2, dim=-1)
643
+ return torch.nn.functional.silu(x[0]) * x[1]
644
+
645
+ self.activation_func = glu
646
+ self.ffn_hidden_size = config.ffn_hidden_size
647
+ fc1_output_size_per_partition = self.ffn_hidden_size * 2
648
+ fc2_input_size = self.ffn_hidden_size
649
+
650
+ self.w1 = nn.ModuleList([nn.Linear(self.config.hidden_size, self.ffn_hidden_size * 2, bias=False) for _ in range(num_experts)])
651
+ self.w2 = nn.ModuleList([nn.Linear(self.ffn_hidden_size, self.config.hidden_size, bias=False) for _ in range(num_experts)])
652
+ def forward(self, permuted_hidden_states, tokens_per_expert):
653
+ torch.cuda.set_device(permuted_hidden_states.device)
654
+ permuted_hidden_states = permuted_hidden_states#.to('cuda:0')
655
+
656
+ fc2_outputs = []
657
+ start_idx = 0
658
+ for i in range(self.num_experts):
659
+ if tokens_per_expert[i] == 0:
660
+ continue
661
+ end_idx = start_idx + tokens_per_expert[i]
662
+ # Use custom attributes for each expert's Linear layers
663
+
664
+ fc1_output = self.w1[i](permuted_hidden_states[start_idx:end_idx])
665
+ intermediate_parallel = self.activation_func(fc1_output)
666
+ fc2_output = self.w2[i](intermediate_parallel)
667
+ fc2_outputs.append(fc2_output)
668
+ start_idx = end_idx
669
+ fc2_output = torch.cat(fc2_outputs, dim=0)
670
+ return fc2_output#.to('cuda:1')
671
+
672
+ class YuanMoeLayer(nn.Module):
673
+ def __init__(self, config:YuanConfig, num_layer):
674
+ super().__init__()
675
+ self.config = config
676
+ if 'per_layer_experts_blocks' in config.moe_config:
677
+ assert config.moe_config['per_layer_experts_blocks'] != None
678
+ self.num_experts = config.moe_config['per_layer_experts_blocks'][num_layer]
679
+ elif 'moe_num_experts' in config.moe_config:
680
+ assert config.moe_config['moe_num_experts'] != None
681
+ self.num_experts = config.moe_config['moe_num_experts']
682
+ # self.num_experts = config.moe_config['moe_num_experts']
683
+ self.top_k = config.moe_config['moe_top_k']
684
+ self.norm_topk_prob = config.moe_config['norm_topk_prob']
685
+ self.hidden_size = config.hidden_size
686
+
687
+ expert_indices_offset = (0)
688
+
689
+ if config.moe_config['router_type'] == 'attn_router':
690
+ self.router = ParallelAttention_router(config, self.num_experts)
691
+ else:
692
+ self.router = nn.Linear(config.hidden_size, self.num_experts, bias=False)
693
+
694
+ self.token_dispatcher = MoEDroplessTokenDispatcher(self.num_experts, config=self.config)
695
+ self.experts = GroupedMLP(self.num_experts, self.config)
696
+
697
+ def routing(self, logits: torch.Tensor) -> torch.Tensor:
698
+ top_logits, indices = torch.topk(logits, k=self.top_k, dim=1)
699
+ scores = torch.softmax(top_logits, dim=-1, dtype=torch.float32).type_as(logits)
700
+ return scores, indices
701
+
702
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
703
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
704
+ logits = self.router(hidden_states)
705
+ scores, indices = self.routing(logits)
706
+ scores = scores.to(hidden_states.dtype)
707
+ (dispatched_input, tokens_per_expert, scores, indices, global_local_map, ) = self.token_dispatcher.token_permutation(hidden_states, scores, indices)
708
+ expert_output = self.experts(dispatched_input, tokens_per_expert)
709
+ output = self.token_dispatcher.token_unpermutation(expert_output, scores, indices, global_local_map)
710
+ return output
711
+
712
+ class YuanDecoderLayer(nn.Module):
713
+ def __init__(self, config: YuanConfig, num_layer):
714
+ super().__init__()
715
+ self.hidden_size = config.hidden_size
716
+ self.self_attn = YuanAttention(config=config)
717
+ self.num_layer = num_layer
718
+
719
+ if hasattr(config, 'moe_config'): #moe_config['num_moe_experts'] > 0 or hasattr(config.moe_config, 'per_layer_experts_blocks'):
720
+ self.mlp = YuanMoeLayer(config, num_layer)
721
+ else:
722
+ self.mlp = YuanMLP(
723
+ hidden_size=self.hidden_size,
724
+ intermediate_size=config.intermediate_size,
725
+ hidden_act=config.hidden_act,
726
+ )
727
+
728
+
729
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
730
+ self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
731
+
732
+ def forward(
733
+ self,
734
+ hidden_states: torch.Tensor,
735
+ attention_mask: Optional[torch.Tensor] = None,
736
+ position_ids: Optional[torch.LongTensor] = None,
737
+ position_ids_k: Optional[torch.LongTensor] = None,
738
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
739
+ rotary_pos_emb: Optional[Tuple[torch.Tensor]] = None,
740
+ output_attentions: Optional[bool] = False,
741
+ use_cache: Optional[bool] = False,
742
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
743
+ """
744
+ Args:
745
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
746
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
747
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
748
+ output_attentions (`bool`, *optional*):
749
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
750
+ returned tensors for more detail.
751
+ use_cache (`bool`, *optional*):
752
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
753
+ (see `past_key_values`).
754
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
755
+ """
756
+ residual = hidden_states#.to('cuda:1')
757
+ torch.cuda.set_device(hidden_states.device)
758
+ hidden_states = self.input_layernorm(hidden_states) #.to('cuda:0')).to('cuda:1')
759
+
760
+ # Self Attention
761
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
762
+ hidden_states=hidden_states,
763
+ attention_mask=attention_mask,
764
+ position_ids=position_ids,
765
+ position_ids_k=position_ids_k,
766
+ past_key_value=past_key_value,
767
+ rotary_pos_emb=rotary_pos_emb,
768
+ output_attentions=output_attentions,
769
+ use_cache=use_cache,
770
+ )
771
+
772
+ hidden_states = residual + hidden_states.permute(1, 0, 2)
773
+
774
+ # Fully Connected
775
+ residual = hidden_states#.to('cuda:1')
776
+ torch.cuda.set_device(hidden_states.device)
777
+ hidden_states = self.post_attention_layernorm(hidden_states) #.to('cuda:0')).to('cuda:1')
778
+ hidden_states = self.mlp(hidden_states)# .to('cuda:1')
779
+ hidden_states = residual + hidden_states
780
+ outputs = (hidden_states,)
781
+
782
+ if output_attentions:
783
+ outputs += (self_attn_weights,)
784
+
785
+ if use_cache:
786
+ outputs += (present_key_value,)
787
+
788
+ return outputs
789
+
790
+
791
+ YUAN_START_DOCSTRING = r"""
792
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
793
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
794
+ etc.)
795
+
796
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
797
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
798
+ and behavior.
799
+
800
+ Parameters:
801
+ config ([`YuanConfig`]):
802
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
803
+ load the weights associated with the model, only the configuration. Check out the
804
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
805
+ """
806
+
807
+
808
+ @add_start_docstrings(
809
+ "The bare Yuan Model outputting raw hidden-states without any specific head on top.",
810
+ YUAN_START_DOCSTRING,
811
+ )
812
+ class YuanPreTrainedModel(PreTrainedModel):
813
+ config_class = YuanConfig
814
+ base_model_prefix = "model"
815
+ supports_gradient_checkpointing = True
816
+ _no_split_modules = ["YuanDecoderLayer"]
817
+ _skip_keys_device_placement = "past_key_values"
818
+ _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
819
+
820
+ def _init_weights(self, module):
821
+ std = self.config.initializer_range
822
+ if isinstance(module, nn.Linear):
823
+ module.weight.data.normal_(mean=0.0, std=std)
824
+ if module.bias is not None:
825
+ module.bias.data.zero_()
826
+ elif isinstance(module, nn.Embedding):
827
+ module.weight.data.normal_(mean=0.0, std=std)
828
+ if module.padding_idx is not None:
829
+ module.weight.data[module.padding_idx].zero_()
830
+
831
+ def _set_gradient_checkpointing(self, module, value=False):
832
+ if isinstance(module, YuanModel):
833
+ module.gradient_checkpointing = value
834
+
835
+
836
+ YUAN_INPUTS_DOCSTRING = r"""
837
+ Args:
838
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
839
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
840
+ it.
841
+
842
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
843
+ [`PreTrainedTokenizer.__call__`] for details.
844
+
845
+ [What are input IDs?](../glossary#input-ids)
846
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
847
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
848
+
849
+ - 1 for tokens that are **not masked**,
850
+ - 0 for tokens that are **masked**.
851
+
852
+ [What are attention masks?](../glossary#attention-mask)
853
+
854
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
855
+ [`PreTrainedTokenizer.__call__`] for details.
856
+
857
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
858
+ `past_key_values`).
859
+
860
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
861
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
862
+ information on the default strategy.
863
+
864
+ - 1 indicates the head is **not masked**,
865
+ - 0 indicates the head is **masked**.
866
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
867
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
868
+ config.n_positions - 1]`.
869
+
870
+ [What are position IDs?](../glossary#position-ids)
871
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
872
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
873
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
874
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
875
+
876
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
877
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
878
+
879
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
880
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
881
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
882
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
883
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
884
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
885
+ model's internal embedding lookup matrix.
886
+ use_cache (`bool`, *optional*):
887
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
888
+ `past_key_values`).
889
+ output_attentions (`bool`, *optional*):
890
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
891
+ tensors for more detail.
892
+ output_hidden_states (`bool`, *optional*):
893
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
894
+ more detail.
895
+ return_dict (`bool`, *optional*):
896
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
897
+ """
898
+
899
+
900
+ @add_start_docstrings(
901
+ "The bare Yuan Model outputting raw hidden-states without any specific head on top.",
902
+ YUAN_START_DOCSTRING,
903
+ )
904
+ class YuanModel(YuanPreTrainedModel):
905
+ """
906
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`YuanDecoderLayer`]
907
+
908
+ Args:
909
+ config: YuanConfig
910
+ """
911
+
912
+ def __init__(self, config: YuanConfig):
913
+ super().__init__(config)
914
+ self.padding_idx = config.pad_token_id
915
+ self.vocab_size = config.vocab_size
916
+
917
+ #TODO: control it by config
918
+ self.eod_token = config.eod_token
919
+ self.reset_attention_mask = config.reset_attention_mask
920
+ self.reset_position_ids = config.reset_position_ids
921
+ self.max_position_embeddings = config.max_position_embeddings
922
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
923
+ self.layers = nn.ModuleList([YuanDecoderLayer(config, i) for i in range(config.num_hidden_layers)])
924
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
925
+ self.gradient_checkpointing = False
926
+ # Initialize weights and apply final processing
927
+ self.post_init()
928
+
929
+ self.seq_length = config.max_position_embeddings
930
+ rotary_dim = config.hidden_size // config.num_attention_heads
931
+ if config.rotary_percent < 1.0:
932
+ rotary_dim = int(rotary_dim * config.rotary_percent)
933
+ self.rotary_pos_emb = YuanRotaryEmbedding(rotary_dim, base=config.rotary_base, dtype=config.torch_dtype)
934
+
935
+
936
+ def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
937
+ return self.embed_tokens(input_ids)
938
+
939
+ def set_input_embeddings(self, value):
940
+ self.embed_tokens = value
941
+
942
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
943
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
944
+ # create causal mask
945
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
946
+ combined_attention_mask = None
947
+ if input_shape[-1] > 1:
948
+ combined_attention_mask = _make_causal_mask(
949
+ input_shape,
950
+ inputs_embeds.dtype,
951
+ device=inputs_embeds.device,
952
+ past_key_values_length=past_key_values_length,
953
+ )
954
+
955
+ if attention_mask is not None:
956
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
957
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
958
+ inputs_embeds.device
959
+ )
960
+ combined_attention_mask = (
961
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
962
+ )
963
+
964
+ return combined_attention_mask
965
+
966
+ def _prepare_decoder_attention_mask_training(self, input_id, inputs_embeds, eod_token, reset_mask_flag ,reset_attention_mask=True, reset_position_ids=True):
967
+
968
+ micro_batch_size, seq_length = input_id.size()
969
+
970
+ attention_mask = torch.tril(torch.ones(
971
+ (micro_batch_size, seq_length, seq_length), device=inputs_embeds.device)).view(
972
+ micro_batch_size, 1, seq_length, seq_length)
973
+
974
+ position_ids = torch.arange(seq_length, dtype=torch.long,
975
+ device=inputs_embeds.device)
976
+ position_ids = position_ids.unsqueeze(0).expand_as(input_id)
977
+
978
+ if reset_position_ids:
979
+ position_ids = position_ids.clone()
980
+
981
+ if reset_position_ids or reset_attention_mask:
982
+ # Loop through the batches:
983
+ for b in range(micro_batch_size):
984
+
985
+ # Find indecies where EOD token is.
986
+ eod_index = position_ids[b, input_id[b] == eod_token]
987
+
988
+ # Detach indecies from positions if going to modify positions.
989
+ if reset_position_ids:
990
+ eod_index = eod_index.clone()
991
+ # Loop through EOD indecies:
992
+ prev_index = 0
993
+ for j in range(eod_index.size()[0]):
994
+ i = eod_index[j]
995
+ # Mask attention loss.
996
+ if reset_attention_mask:
997
+ attention_mask[b, 0, (i + 1):, :(i + 1)] = 0
998
+ # Reset positions.
999
+ if reset_position_ids:
1000
+ position_ids[b, (i + 1):] -= (i + 1 - prev_index)
1001
+ prev_index = i + 1
1002
+
1003
+ inverted_mask = 1 - attention_mask
1004
+ output_attn_mask = inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min)
1005
+ if reset_mask_flag:
1006
+ output_attn_mask = output_attn_mask[:,:,-1:,:]
1007
+ return output_attn_mask, position_ids
1008
+
1009
+ @add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
1010
+ def forward(
1011
+ self,
1012
+ input_ids: torch.LongTensor = None,
1013
+ attention_mask: Optional[torch.Tensor] = None,
1014
+ position_ids: Optional[torch.LongTensor] = None,
1015
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1016
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1017
+ use_cache: Optional[bool] = None,
1018
+ output_attentions: Optional[bool] = None,
1019
+ output_hidden_states: Optional[bool] = None,
1020
+ output_router_logits: Optional[bool] = None,
1021
+ return_dict: Optional[bool] = None,
1022
+ ) -> Union[Tuple, BaseModelOutputWithPast, torch.Tensor]:
1023
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1024
+ output_router_logits = (
1025
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1026
+ )
1027
+ output_hidden_states = (
1028
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1029
+ )
1030
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1031
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1032
+ input_ids1 = copy.deepcopy(input_ids)
1033
+ reset_mask_flag = False
1034
+ if past_key_values:
1035
+ input_ids = input_ids
1036
+ input_ids = input_ids[:,-1:]
1037
+ if use_cache:
1038
+ reset_mask_flag = True
1039
+ # retrieve input_ids and inputs_embeds
1040
+ if input_ids is not None and inputs_embeds is not None:
1041
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1042
+ elif input_ids is not None:
1043
+ input_ids = input_ids
1044
+ batch_size, seq_length = input_ids.shape
1045
+ elif inputs_embeds is not None:
1046
+ inputs_embeds = inputs_embeds.transpose(0,1)
1047
+ batch_size, seq_length, _ = inputs_embeds.shape
1048
+ else:
1049
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1050
+
1051
+ seq_length_with_past = seq_length
1052
+ past_key_values_length = 0
1053
+
1054
+ if past_key_values is not None:
1055
+ past_key_values_length = past_key_values[0][0].shape[0]
1056
+ seq_length_with_past = seq_length_with_past + past_key_values_length
1057
+
1058
+ # modify to reset position ids
1059
+ if past_key_values is not None:
1060
+ pos_start = position_ids[:,-1]+1
1061
+ pos_end = pos_start+past_key_values[0][0].shape[0]-position_ids.shape[1]+1
1062
+ position_ids_k = torch.arange(pos_start.item(), pos_end.item()).to(position_ids.device)
1063
+ position_ids_k = position_ids_k.unsqueeze(0)
1064
+ position_ids_k = torch.cat((position_ids, position_ids_k), dim=1)
1065
+ position_ids = position_ids[:,-1]+past_key_values[0][0].shape[0]-position_ids.shape[1]+1
1066
+ position_ids = position_ids.unsqueeze(0)
1067
+ else:
1068
+ position_ids_k = position_ids
1069
+
1070
+ if position_ids is None:
1071
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1072
+ position_ids = torch.arange(
1073
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1074
+ )
1075
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1076
+ else:
1077
+ pass
1078
+
1079
+ if inputs_embeds is None:
1080
+ inputs_embeds = self.embed_tokens(input_ids).transpose(0,1)
1081
+
1082
+ if self.training or self.reset_position_ids:
1083
+ attention_mask, _ = self._prepare_decoder_attention_mask_training(input_ids1, inputs_embeds, self.eod_token, reset_mask_flag, self.reset_attention_mask, self.reset_position_ids)
1084
+ else:
1085
+ if attention_mask is None:
1086
+ attention_mask = torch.ones(
1087
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
1088
+ )
1089
+ attention_mask = self._prepare_decoder_attention_mask(
1090
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1091
+ )
1092
+
1093
+ rotary_pos_emb = None
1094
+ rotary_pos_emb = self.rotary_pos_emb(self.max_position_embeddings)
1095
+
1096
+ hidden_states = inputs_embeds
1097
+ if self.gradient_checkpointing and self.training:
1098
+ if use_cache:
1099
+ logger.warning_once(
1100
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1101
+ )
1102
+ use_cache = False
1103
+
1104
+ # decoder layers
1105
+ all_hidden_states = () if output_hidden_states else None
1106
+ all_self_attns = () if output_attentions else None
1107
+ next_decoder_cache = () if use_cache else None
1108
+ position_ids = position_ids.cpu()
1109
+ position_ids_k = position_ids_k.cpu()
1110
+ for idx, decoder_layer in enumerate(self.layers):
1111
+ if output_hidden_states:
1112
+ all_hidden_states += (hidden_states,)
1113
+
1114
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1115
+
1116
+ if self.gradient_checkpointing and self.training:
1117
+ def create_custom_forward(module):
1118
+ def custom_forward(*inputs):
1119
+ # None for past_key_value
1120
+ return module(*inputs, output_attentions, None)
1121
+
1122
+ return custom_forward
1123
+
1124
+ layer_outputs = torch.utils.checkpoint.checkpoint(
1125
+ create_custom_forward(decoder_layer),
1126
+ hidden_states,
1127
+ attention_mask,
1128
+ position_ids,
1129
+ None,
1130
+ )
1131
+ else:
1132
+ layer_outputs = decoder_layer(
1133
+ hidden_states,
1134
+ attention_mask=attention_mask,
1135
+ position_ids=position_ids,
1136
+ position_ids_k=position_ids_k,
1137
+ past_key_value=past_key_value,
1138
+ rotary_pos_emb=rotary_pos_emb,
1139
+ output_attentions=output_attentions,
1140
+ use_cache=use_cache,
1141
+ )
1142
+ hidden_states = layer_outputs[0]
1143
+
1144
+ if use_cache:
1145
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
1146
+
1147
+ if output_attentions:
1148
+ all_self_attns += (layer_outputs[1],)
1149
+ hidden_states = hidden_states#.to('cuda:0')
1150
+ torch.cuda.set_device(hidden_states.device)
1151
+ hidden_states = self.norm(hidden_states)
1152
+ # add hidden states from the last decoder layer
1153
+ if output_hidden_states:
1154
+ all_hidden_states += (hidden_states,)
1155
+ next_cache = next_decoder_cache if use_cache else None
1156
+ if not return_dict:
1157
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1158
+ return BaseModelOutputWithPast(
1159
+ last_hidden_state=hidden_states,
1160
+ past_key_values=next_cache,
1161
+ hidden_states=all_hidden_states,
1162
+ attentions=all_self_attns,
1163
+ )
1164
+
1165
+
1166
+ class YuanForCausalLM(YuanPreTrainedModel):
1167
+ def __init__(self, config):
1168
+ super().__init__(config)
1169
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1170
+ self.model = YuanModel(config)
1171
+ self.post_init()
1172
+
1173
+ def get_input_embeddings(self):
1174
+ return self.model.embed_tokens
1175
+
1176
+ def set_input_embeddings(self, value):
1177
+ self.model.embed_tokens = value
1178
+
1179
+ def get_output_embeddings(self):
1180
+ return self.lm_head
1181
+
1182
+ def set_output_embeddings(self, new_embeddings):
1183
+ self.lm_head = new_embeddings
1184
+
1185
+ def set_decoder(self, decoder):
1186
+ self.model = decoder
1187
+
1188
+ def get_decoder(self):
1189
+ return self.model
1190
+
1191
+ def get_loss_mask(self, input_ids, labels, eod_token, sep_token):
1192
+ micro_batch_size, seq_length = input_ids.size()
1193
+ loss_mask = torch.ones(input_ids.size(), dtype=torch.float, device=input_ids.device)
1194
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
1195
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
1196
+
1197
+
1198
+ """modify loss_mask to only calculate the loss of the answer (separated with [SEP])"""
1199
+
1200
+ for b in range(micro_batch_size):
1201
+ eod_indexs = position_ids[b, input_ids[b] == eod_token]
1202
+ sep_indexs = position_ids[b, input_ids[b] == sep_token]
1203
+
1204
+ if len(eod_indexs) == 0 or len(sep_indexs) == 0:
1205
+ loss_mask[b] = 1.0
1206
+ else:
1207
+ if eod_indexs[0] > sep_indexs[0]:
1208
+ loss_mask[b, 0:sep_indexs[0]] = 0
1209
+
1210
+ if len(eod_indexs) == len(sep_indexs):
1211
+ for ii, eod_index in enumerate(eod_indexs):
1212
+ start_index = eod_index
1213
+ if ii == (len(sep_indexs) - 1):
1214
+ stop_index = seq_length
1215
+ else:
1216
+ stop_index = sep_indexs[ii + 1]
1217
+ loss_mask[b, start_index:stop_index] = 0.0
1218
+ else:
1219
+ if len(eod_indexs) > len(sep_indexs):
1220
+ loss_mask[b,:] = 1.0
1221
+ else:
1222
+ for ii, eod_index in enumerate(eod_indexs):
1223
+ start_index = eod_index
1224
+ stop_index = sep_indexs[ii + 1]
1225
+
1226
+ loss_mask[b, start_index:stop_index] = 0.0
1227
+
1228
+ elif eod_indexs[0] < sep_indexs[0]:
1229
+
1230
+ if len(eod_indexs) == len(sep_indexs):
1231
+ for ii, eod_index in enumerate(eod_indexs):
1232
+ start_index = eod_index
1233
+ stop_index = sep_indexs[ii]
1234
+ loss_mask[b, start_index:stop_index] = 0.0
1235
+
1236
+ else:
1237
+ if len(eod_indexs) < len(sep_indexs):
1238
+ loss_mask[b,:] = 1.0
1239
+ else:
1240
+ for ii, eod_index in enumerate(eod_indexs):
1241
+ start_index = eod_index
1242
+ if ii >= len(sep_indexs):
1243
+ stop_index = seq_length
1244
+ else:
1245
+ stop_index = sep_indexs[ii]
1246
+ loss_mask[b, start_index:stop_index] = 0.0
1247
+
1248
+ loss_mask[input_ids == eod_token] = 1.0
1249
+ return loss_mask
1250
+ @add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
1251
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1252
+ def forward(
1253
+ self,
1254
+ input_ids: torch.LongTensor = None,
1255
+ attention_mask: Optional[torch.Tensor] = None,
1256
+ position_ids: Optional[torch.LongTensor] = None,
1257
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1258
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1259
+ labels: Optional[torch.LongTensor] = None,
1260
+ use_cache: Optional[bool] = None,
1261
+ output_attentions: Optional[bool] = None,
1262
+ output_hidden_states: Optional[bool] = None,
1263
+ return_dict: Optional[bool] = None,
1264
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1265
+ """
1266
+ ## modify delete routers
1267
+ Args:
1268
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1269
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1270
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1271
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1272
+
1273
+ Returns:
1274
+
1275
+ Example:
1276
+
1277
+ ```python
1278
+ >>> from transformers import AutoTokenizer, YuanForCausalLM
1279
+
1280
+ >>> model = YuanForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1281
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1282
+
1283
+ >>> prompt = "Hey, are you consciours? Can you talk to me?"
1284
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1285
+
1286
+ >>> # Generate
1287
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1288
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1289
+ "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
1290
+ ```"""
1291
+
1292
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1293
+
1294
+ output_hidden_states = (
1295
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1296
+ )
1297
+
1298
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1299
+
1300
+ outputs = self.model(
1301
+ input_ids=input_ids,
1302
+ attention_mask=attention_mask,
1303
+ position_ids=position_ids,
1304
+ past_key_values=past_key_values,
1305
+ inputs_embeds=inputs_embeds,
1306
+ use_cache=use_cache,
1307
+ output_attentions=output_attentions,
1308
+ output_hidden_states=output_hidden_states,
1309
+ return_dict=return_dict,
1310
+ )
1311
+
1312
+ hidden_states = outputs[0].transpose(0,1)
1313
+
1314
+ logits = self.lm_head(hidden_states)
1315
+
1316
+ loss = None
1317
+ if labels is not None:
1318
+ if self.use_loss_mask:
1319
+ loss_mask = self.get_loss_mask(input_ids, labels, self.eod_token, self.sep_token)
1320
+ # Shift so that tokens < n predict n
1321
+ shift_logits = logits[..., :-1, :].contiguous()
1322
+ shift_labels = labels[..., 1:].contiguous()
1323
+ # Flatten the tokens
1324
+ if self.use_loss_mask:
1325
+ loss_fct = CrossEntropyLoss(reduction='none')
1326
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1327
+ shift_labels = shift_labels.view(-1)
1328
+ # Enable model parallelism
1329
+ shift_labels = shift_labels.to(shift_logits.device)
1330
+ loss = loss_fct(shift_logits, shift_labels)
1331
+ loss = torch.sum(loss * loss_mask) / loss_mask.sum()
1332
+ else:
1333
+ loss_fct = CrossEntropyLoss()
1334
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1335
+ shift_labels = shift_labels.view(-1)
1336
+ # Enable model parallelism
1337
+ shift_labels = shift_labels.to(shift_logits.device)
1338
+ loss = loss_fct(shift_logits, shift_labels)
1339
+ if not return_dict:
1340
+ output = (logits,) + outputs[1:]
1341
+ return (loss,) + output if loss is not None else output
1342
+
1343
+ return CausalLMOutputWithPast(
1344
+ loss=loss,
1345
+ logits=logits,
1346
+ past_key_values=outputs.past_key_values,
1347
+ hidden_states=hidden_states,
1348
+ attentions=outputs.attentions,
1349
+ )
1350
+
1351
+ def prepare_inputs_for_generation(
1352
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1353
+ ):
1354
+
1355
+ position_ids = kwargs.get("position_ids", None)
1356
+ if attention_mask is not None and position_ids is None:
1357
+ # create position_ids on the fly for batch generation
1358
+ position_ids = attention_mask.long().cumsum(-1) - 1
1359
+ position_ids.masked_fill_(attention_mask == 0, 1)
1360
+ if past_key_values:
1361
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1362
+
1363
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1364
+ if inputs_embeds is not None and past_key_values is None:
1365
+ model_inputs = {"inputs_embeds": inputs_embeds}
1366
+ else:
1367
+ model_inputs = {"input_ids": input_ids}
1368
+
1369
+ model_inputs.update(
1370
+ {
1371
+ "position_ids": position_ids,
1372
+ "past_key_values": past_key_values,
1373
+ "use_cache": kwargs.get("use_cache"),
1374
+ "attention_mask": attention_mask,
1375
+ }
1376
+ )
1377
+ return model_inputs
1378
+
1379
+ @staticmethod
1380
+ def _reorder_cache(past_key_values, beam_idx):
1381
+ reordered_past = ()
1382
+ for layer_past in past_key_values:
1383
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
1384
+ return reordered_past
1385
+
1386
+
1387
+ @add_start_docstrings(
1388
+ """
1389
+ The Yuan Model transformer with a sequence classification head on top (linear layer).
1390
+
1391
+ [`YuanForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1392
+ (e.g. GPT-2) do.
1393
+
1394
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1395
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1396
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1397
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1398
+ each row of the batch).
1399
+ """,
1400
+ YUAN_START_DOCSTRING,
1401
+ )
1402
+ class YuanForSequenceClassification(YuanPreTrainedModel):
1403
+ #_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
1404
+
1405
+ def __init__(self, config):
1406
+ super().__init__(config)
1407
+ self.num_labels = config.num_labels
1408
+ self.model = YuanModel(config)
1409
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1410
+
1411
+ # Initialize weights and apply final processing
1412
+ self.post_init()
1413
+
1414
+ def get_input_embeddings(self):
1415
+ return self.model.embed_tokens
1416
+
1417
+ def set_input_embeddings(self, value):
1418
+ self.model.embed_tokens = value
1419
+
1420
+ @add_start_docstrings_to_model_forward(YUAN_INPUTS_DOCSTRING)
1421
+ def forward(
1422
+ self,
1423
+ input_ids: torch.LongTensor = None,
1424
+ attention_mask: Optional[torch.Tensor] = None,
1425
+ position_ids: Optional[torch.LongTensor] = None,
1426
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1427
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1428
+ labels: Optional[torch.LongTensor] = None,
1429
+ use_cache: Optional[bool] = None,
1430
+ output_attentions: Optional[bool] = None,
1431
+ output_hidden_states: Optional[bool] = None,
1432
+ return_dict: Optional[bool] = None,
1433
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1434
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1435
+ transformer_outputs = self.model(
1436
+ input_ids,
1437
+ attention_mask=attention_mask,
1438
+ position_ids=position_ids,
1439
+ past_key_values=past_key_values,
1440
+ inputs_embeds=inputs_embeds,
1441
+ use_cache=use_cache,
1442
+ output_attentions=output_attentions,
1443
+ output_hidden_states=output_hidden_states,
1444
+ return_dict=return_dict,
1445
+ )
1446
+ hidden_states = transformer_outputs[0]
1447
+ logits = self.score(hidden_states)
1448
+
1449
+ if input_ids is not None:
1450
+ batch_size = input_ids.shape[0]
1451
+ else:
1452
+ batch_size = inputs_embeds.shape[0]
1453
+
1454
+ if self.config.pad_token_id is None and batch_size != 1:
1455
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1456
+ if self.config.pad_token_id is None:
1457
+ sequence_lengths = -1
1458
+ else:
1459
+ if input_ids is not None:
1460
+ sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
1461
+ else:
1462
+ sequence_lengths = -1
1463
+
1464
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1465
+
1466
+ loss = None
1467
+ if labels is not None:
1468
+ labels = labels.to(logits.device)
1469
+ if self.config.problem_type is None:
1470
+ if self.num_labels == 1:
1471
+ self.config.problem_type = "regression"
1472
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1473
+ self.config.problem_type = "single_label_classification"
1474
+ else:
1475
+ self.config.problem_type = "multi_label_classification"
1476
+
1477
+ if self.config.problem_type == "regression":
1478
+ loss_fct = MSELoss()
1479
+ if self.num_labels == 1:
1480
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1481
+ else:
1482
+ loss = loss_fct(pooled_logits, labels)
1483
+ elif self.config.problem_type == "single_label_classification":
1484
+ loss_fct = CrossEntropyLoss()
1485
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1486
+ elif self.config.problem_type == "multi_label_classification":
1487
+ loss_fct = BCEWithLogitsLoss()
1488
+ loss = loss_fct(pooled_logits, labels)
1489
+ if not return_dict:
1490
+ output = (pooled_logits,) + transformer_outputs[1:]
1491
+ return ((loss,) + output) if loss is not None else output
1492
+
1493
+ return SequenceClassifierOutputWithPast(
1494
+ loss=loss,
1495
+ logits=pooled_logits,
1496
+ past_key_values=transformer_outputs.past_key_values,
1497
+ hidden_states=transformer_outputs.hidden_states,
1498
+ attentions=transformer_outputs.attentions,
1499
+ )
1500
+
1501
+
1502
+
modeling_yuanvl_chat.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # YuanVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import warnings
8
+ from typing import (Any, Callable, Iterable, List, Literal, Mapping, Optional,
9
+ Set, Tuple, Type, TypedDict, Union)
10
+
11
+ import torch.utils.checkpoint
12
+ import transformers
13
+ import torch
14
+ from torch import nn
15
+ from torch.nn import CrossEntropyLoss
16
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
17
+ LlamaTokenizer)
18
+ from transformers.modeling_outputs import CausalLMOutputWithPast
19
+ from transformers.modeling_utils import PreTrainedModel
20
+ from transformers.utils import ModelOutput, logging
21
+
22
+ from transformer_engine.pytorch import RMSNorm
23
+ from transformers.activations import ACT2FN
24
+
25
+ from .configuration_yuanvl import YuanVLChatConfig
26
+ from .conversation import get_conv_template
27
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
28
+ from .modeling_yuanlm2 import YuanForCausalLM
29
+ from .utils import flatten_bn, merge_multimodal_embeddings
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ class InternVLImagePixelInputs(TypedDict):
35
+ type: Literal["pixel_values"]
36
+ data: Union[torch.Tensor, List[torch.Tensor]]
37
+ """
38
+ Shape: `(batch_size, 1 + num_patches, num_channels, height, width)`
39
+
40
+ Note that `num_patches` may be different for each batch, in which case
41
+ the data is passed as a list instead of a batched tensor.
42
+ """
43
+ patches_per_image: List[int]
44
+ """
45
+ List of number of total patches for each image in the batch.
46
+ """
47
+
48
+
49
+ class InternVLImageEmbeddingInputs(TypedDict):
50
+ type: Literal["image_embeds"]
51
+ data: Any # in vllm vision this is a NestedTensors
52
+ """
53
+ A tensor of shape `(num_images, total_image_feature_size, hidden_size)`
54
+ or a list of tensors of shape `(total_image_feature_size, hidden_size)`
55
+
56
+ `hidden_size` must match the hidden size of language model backbone.
57
+ """
58
+
59
+
60
+ InternVLImageInputs = Union[InternVLImagePixelInputs,
61
+ InternVLImageEmbeddingInputs]
62
+
63
+
64
+ def version_cmp(v1, v2, op='eq'):
65
+ import operator
66
+
67
+ from packaging import version
68
+ op_func = getattr(operator, op)
69
+ return op_func(version.parse(v1), version.parse(v2))
70
+
71
+ class YuanImageMLP(nn.Module):
72
+
73
+ def __init__(
74
+ self,
75
+ hidden_size: int,
76
+ intermediate_size: int,
77
+ output_size: int,
78
+ hidden_act: str,
79
+ ) -> None:
80
+ super().__init__()
81
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
82
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
83
+ self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
84
+
85
+ if hidden_act != "silu":
86
+ raise ValueError(f"Unsupported activation: {hidden_act}. Only silu is supported for now.")
87
+
88
+ self.act_fn = ACT2FN[hidden_act]
89
+
90
+ @torch.compile
91
+ def swiglu(self, y_1, y_2):
92
+ return self.act_fn(y_1) * y_2
93
+
94
+ def forward(self, x):
95
+ x1 = self.up_proj(x)
96
+ x2 = self.gate_proj(x)
97
+ x3 = self.swiglu(x1, x2)
98
+ x = self.down_proj(x3)
99
+ return x
100
+
101
+ class YuanVLChatModel(PreTrainedModel):
102
+ config_class = YuanVLChatConfig
103
+ main_input_name = 'pixel_values'
104
+ base_model_prefix = 'language_model'
105
+ _supports_flash_attn_2 = True
106
+ _no_split_modules = ['InternVisionModel', 'YuanDeocderLayer']
107
+
108
+ def __init__(self, config: YuanVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
109
+ super().__init__(config)
110
+
111
+ assert version_cmp(transformers.__version__, '4.37.0', 'ge')
112
+ image_size = config.force_image_size or config.vision_config.image_size
113
+ patch_size = config.vision_config.patch_size
114
+ self.patch_size = patch_size
115
+ self.select_layer = config.select_layer
116
+ self.template = config.template
117
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
118
+ self.downsample_ratio = config.downsample_ratio
119
+ self.ps_version = config.ps_version
120
+ use_flash_attn = use_flash_attn if has_flash_attn else False
121
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
122
+ config.llm_config._attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
123
+
124
+ logger.info(f'num_image_token: {self.num_image_token}')
125
+ logger.info(f'ps_version: {self.ps_version}')
126
+ if vision_model is not None:
127
+ self.vision_model = vision_model
128
+ else:
129
+ self.vision_model = InternVisionModel(config.vision_config)
130
+ if language_model is not None:
131
+ self.language_model = language_model
132
+ else:
133
+ if config.llm_config.architectures[0] == 'YuanForCausalLM':
134
+ self.language_model = YuanForCausalLM(config.llm_config)
135
+ else:
136
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
137
+
138
+ self.pixel_unshuffle = torch.nn.PixelUnshuffle(downscale_factor=2)
139
+ layernorm_epsilon = config.llm_config.rms_norm_eps
140
+
141
+ self.imagemlp_input_hiddensize = int(config.vision_config.hidden_size / self.downsample_ratio ** 2)
142
+ self.imagemlp_ffn_hidden_size = config.llm_config.ffn_hidden_size
143
+
144
+ self.imagemlp = YuanImageMLP(self.imagemlp_input_hiddensize, self.imagemlp_ffn_hidden_size,
145
+ output_size=config.llm_config.hidden_size, hidden_act="silu")
146
+ self.imagemlp_layernorm = RMSNorm(config.llm_config.hidden_size, eps=layernorm_epsilon)
147
+
148
+ self.img_context_token_id = config.img_context_token_id
149
+ self.conv_template = get_conv_template(self.template)
150
+ self.system_message = self.conv_template.system_message
151
+
152
+ def _validate_pixel_values(self,
153
+ data: Union[torch.Tensor, List[torch.Tensor]]
154
+ ) -> Union[torch.Tensor, List[torch.Tensor]]:
155
+
156
+ h = w = self.config.vision_config.image_size
157
+ expected_dims = (3, h, w)
158
+
159
+ def _validate_shape(d: torch.Tensor):
160
+ actual_dims = tuple(d.shape)
161
+ if actual_dims != expected_dims:
162
+ # expected_expr = ("num_patches", *map(str, expected_dims))
163
+ expected_expr = (expected_dims)
164
+ raise ValueError("The expected shape of pixel values in each batch element "
165
+ f" is {expected_expr}. You supplied {tuple(d.shape)}.")
166
+ for d in data:
167
+ _validate_shape(d)
168
+ return data
169
+
170
+
171
+
172
+ def _parse_and_validate_image_input(self,
173
+ pixel_values: List[torch.Tensor] = None,
174
+ image_token_id: torch.Tensor = None,
175
+ image_embeds: torch.Tensor = None,
176
+ ) -> Optional[InternVLImagePixelInputs]:
177
+ if pixel_values is None and image_embeds is None:
178
+ return None
179
+
180
+ if image_embeds is not None:
181
+ if not isinstance(image_embeds, torch.Tensor):
182
+ raise ValueError("Incorrect type of image embeddings. "
183
+ f"Got type: {type(image_embeds)}")
184
+ return InternVLImageEmbeddingInputs(
185
+ type="image_embeds",
186
+ data=flatten_bn(image_embeds),
187
+ )
188
+
189
+ if pixel_values is not None:
190
+ if not isinstance(pixel_values, (torch.Tensor, list)):
191
+ raise ValueError("Incorrect type of pixel values. "
192
+ f"Got type: {type(pixel_values)}")
193
+ patches_per_image = []
194
+ for request_pixel_values in pixel_values:
195
+ patches_per_image.append(request_pixel_values.shape[0])
196
+
197
+ # We need to flatten (B, N, P) to (B*N*P)
198
+ # so we call flatten_bn twice.
199
+ # (total_patches, 3, h, w)
200
+ return InternVLImagePixelInputs(
201
+ type="pixel_values",
202
+ data=self._validate_pixel_values(flatten_bn(pixel_values)),
203
+ patches_per_image=patches_per_image)
204
+ raise AssertionError("This line should be unreachable")
205
+
206
+ def _process_image_input(
207
+ self,
208
+ image_input: InternVLImageInputs,
209
+ ) -> Tuple[torch.Tensor] :
210
+ if image_input["type"] == "image_embeds":
211
+ return image_input["data"]
212
+ assert self.vision_model is not None
213
+ # (total_patches, tokens_per_image, llm_config.hidden_size)
214
+ image_embeds = self.extract_feature(image_input["data"])
215
+
216
+ patches_per_image = image_input["patches_per_image"]
217
+
218
+ # Only one image in the current batch
219
+ if len(patches_per_image) == 1:
220
+ image_embeds = image_embeds.view(-1, self.config.llm_config.hidden_size).unsqueeze(1)
221
+ return image_embeds
222
+ # NOTE: Image embeddings are split into separate tensors for each image
223
+ # by the size of each embedding.
224
+ # feature_size 每个patch 256个token位置
225
+ feature_size = image_embeds.shape[1]
226
+ # (total_image_tokens, llm_config.hidden_size)
227
+ image_embeds = image_embeds.view(-1, self.config.llm_config.hidden_size)
228
+ image_feature_sizes = [num_patches * feature_size for num_patches in patches_per_image]
229
+ image_embeds = image_embeds.split(image_feature_sizes)
230
+
231
+ return image_embeds
232
+
233
+
234
+
235
+ def get_multimodal_embeddings(self,
236
+ pixel_values: Optional[List[torch.Tensor]] = None,
237
+ image_token_id: Optional[List[torch.Tensor]] = None,
238
+ image_embeds: Optional[List[torch.Tensor]] = None,
239
+ image_input: InternVLImageInputs = None,
240
+ ):
241
+ image_input = self._parse_and_validate_image_input(pixel_values, image_token_id, image_embeds)
242
+ if image_input is None:
243
+ return None
244
+
245
+ # image_input: (total_patches, 3, h, w)
246
+ vision_embeddings = self._process_image_input(image_input)
247
+ return vision_embeddings
248
+
249
+ def get_input_embeddings(
250
+ self,
251
+ input_ids: torch.Tensor,
252
+ multimodal_embeddings: Optional[torch.Tensor]
253
+ ) -> torch.Tensor:
254
+ inputs_embeds = self.language_model.model.get_input_embeddings(input_ids)
255
+ if multimodal_embeddings is not None:
256
+ assert self.img_context_token_id is not None
257
+ inputs_embeds = merge_multimodal_embeddings(
258
+ input_ids, inputs_embeds, multimodal_embeddings,
259
+ self.img_context_token_id)
260
+ return inputs_embeds
261
+
262
+ def forward(
263
+ self,
264
+ input_ids: torch.LongTensor = None,
265
+ attention_mask: torch.Tensor = None,
266
+ position_ids: torch.LongTensor = None,
267
+ past_key_values: List[torch.FloatTensor] = None,
268
+ inputs_embeds: Optional[torch.FloatTensor] = None,
269
+ labels: Optional[torch.LongTensor] = None,
270
+ use_cache: Optional[bool] = None,
271
+ output_attentions: Optional[bool] = None,
272
+ output_hidden_states: Optional[bool] = None,
273
+ return_dict: Optional[bool] = None,
274
+ pixel_values: Optional[List[torch.Tensor]] = None,
275
+ image_token_id: Optional[List[torch.Tensor]] = None,
276
+ image_embeds: Optional[List[torch.Tensor]] = None,
277
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
278
+
279
+ if inputs_embeds is None:
280
+ # (images, patches * token_per_image)
281
+ vision_embeddings = self.get_multimodal_embeddings(pixel_values, image_token_id, image_embeds)
282
+ # (tokens, hidden_size)
283
+ inputs_embeds = self.get_input_embeddings(input_ids, vision_embeddings).permute(1, 0, 2)
284
+ input_ids = None
285
+
286
+ hidden_states = self.language_model.model(input_ids, attention_mask, position_ids, past_key_values,
287
+ inputs_embeds, labels, use_cache, output_attentions,
288
+ output_hidden_states, return_dict)
289
+
290
+ return hidden_states
291
+
292
+ def pixel_shuffle(self, x, scale_factor=0.5):
293
+ n, w, h, c = x.size()
294
+ # N, W, H, C --> N, W, H * scale, C // scale
295
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
296
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
297
+ x = x.permute(0, 2, 1, 3).contiguous()
298
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
299
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
300
+ int(c / (scale_factor * scale_factor)))
301
+ if self.ps_version == 'v1':
302
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
303
+ 'which results in a transposed image.')
304
+ else:
305
+ x = x.permute(0, 2, 1, 3).contiguous()
306
+ return x
307
+
308
+ # Internvl vision
309
+ def extract_feature(self, pixel_values):
310
+ # pixel_values: (imbs * num_image, ic, ih, iw)
311
+ pixel_values = pixel_values.to(torch.bfloat16)
312
+ output = self.vision_model(pixel_values=pixel_values)
313
+ vit_embeds=output[0]
314
+ # vit_embeds: (imbs * num_images, h*w, vit_dim)
315
+ vit_embeds = vit_embeds[:, 1:, :]
316
+
317
+ pn, phw, pc = vit_embeds.shape
318
+ ph = pw = int(phw**0.5)
319
+ vit_embeds = vit_embeds.view(pn, ph, pw, pc).permute(0, 3, 1, 2)
320
+ vit_embeds = self.pixel_unshuffle(vit_embeds)
321
+ pn, pc, ph, pw = vit_embeds.shape
322
+ vit_embeds = vit_embeds.view(pn, pc, ph * pw).permute(0, 2, 1)
323
+ num_images, cvs, chs = vit_embeds.shape
324
+ vit_embeds = vit_embeds.reshape(1, -1, vit_embeds.shape[-1]).permute(1, 0, 2)
325
+ vit_embeds = self.imagemlp(vit_embeds)
326
+ vit_embeds = self.imagemlp_layernorm(vit_embeds)
327
+ vit_embeds = vit_embeds.view(num_images, cvs, -1)
328
+ return vit_embeds
329
+
330
+ @torch.no_grad()
331
+ def generate(
332
+ self,
333
+ pixel_values: Optional[torch.FloatTensor] = None,
334
+ input_ids: Optional[torch.FloatTensor] = None,
335
+ attention_mask: Optional[torch.LongTensor] = None,
336
+ visual_features: Optional[torch.FloatTensor] = None,
337
+ generation_config: Optional[GenerationConfig] = None,
338
+ position_ids: Optional[torch.Tensor] = None,
339
+ output_hidden_states: Optional[bool] = None,
340
+ ) -> torch.LongTensor:
341
+
342
+
343
+ if pixel_values is not None:
344
+ if visual_features is not None:
345
+ vit_embeds = visual_features
346
+ else:
347
+ vit_embeds = self.get_multimodal_embeddings(pixel_values)
348
+ inputs_embeds = self.get_input_embeddings(input_ids, vit_embeds)
349
+ input_ids = None
350
+
351
+
352
+ outputs = self.language_model.generate(
353
+ inputs_embeds=inputs_embeds,
354
+ attention_mask=attention_mask,
355
+ generation_config=generation_config,
356
+ output_hidden_states=output_hidden_states,
357
+ position_ids=position_ids,
358
+ max_length=8192,
359
+ use_cache=True,
360
+ )
361
+
362
+
363
+ return outputs
special_tokens_map.json ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<s>",
4
+ "<eod>",
5
+ "<unk>",
6
+ "<sep>",
7
+ "<pad>",
8
+ "<mask>",
9
+ "<predict>",
10
+ "<FIM_SUFFIX>",
11
+ "<FIM_PREFIX>",
12
+ "<FIM_MIDDLE>",
13
+ "<commit_before>",
14
+ "<commit_msg>",
15
+ "<commit_after>",
16
+ "<jupyter_start>",
17
+ "<jupyter_text>",
18
+ "<jupyter_code>",
19
+ "<jupyter_output>",
20
+ "<empty_output>",
21
+ "<repo_name>",
22
+ "<file_sep>",
23
+ "<BOS>",
24
+ "<IMAGE>",
25
+ "</IMAGE>",
26
+ "<grounding>",
27
+ "<obj>",
28
+ "</obj>",
29
+ "<box>",
30
+ "</box>",
31
+ "<point>",
32
+ "</point>",
33
+ "<3dbox>",
34
+ "</3dbox>",
35
+ "<depth>",
36
+ "</depth>",
37
+ "s000",
38
+ "s001",
39
+ "s002",
40
+ "s003",
41
+ "s004",
42
+ "s005",
43
+ "s006",
44
+ "s007",
45
+ "s008",
46
+ "s009",
47
+ "s010",
48
+ "s011",
49
+ "s012",
50
+ "s013",
51
+ "s014",
52
+ "s015",
53
+ "s016",
54
+ "s017",
55
+ "s018",
56
+ "s019",
57
+ "s020",
58
+ "s021",
59
+ "s022",
60
+ "s023",
61
+ "s024",
62
+ "s025",
63
+ "s026",
64
+ "s027",
65
+ "s028",
66
+ "s029",
67
+ "s030",
68
+ "s031",
69
+ "s032",
70
+ "s033",
71
+ "s034",
72
+ "s035",
73
+ "s036",
74
+ "s037",
75
+ "s038",
76
+ "s039",
77
+ "s040",
78
+ "s041",
79
+ "s042",
80
+ "s043",
81
+ "s044",
82
+ "s045",
83
+ "s046",
84
+ "s047",
85
+ "s048",
86
+ "s049",
87
+ "s050",
88
+ "s051",
89
+ "s052",
90
+ "s053",
91
+ "s054",
92
+ "s055",
93
+ "s056",
94
+ "s057",
95
+ "s058",
96
+ "s059",
97
+ "s060",
98
+ "s061",
99
+ "s062",
100
+ "s063",
101
+ "s064",
102
+ "s065",
103
+ "s066",
104
+ "s067",
105
+ "s068",
106
+ "s069",
107
+ "s070",
108
+ "s071",
109
+ "s072",
110
+ "s073",
111
+ "s074",
112
+ "s075",
113
+ "s076",
114
+ "s077",
115
+ "s078",
116
+ "s079",
117
+ "s080",
118
+ "s081",
119
+ "s082",
120
+ "s083",
121
+ "s084",
122
+ "s085",
123
+ "s086",
124
+ "s087",
125
+ "s088",
126
+ "s089",
127
+ "s090",
128
+ "s091",
129
+ "s092",
130
+ "s093",
131
+ "s094",
132
+ "s095",
133
+ "s096",
134
+ "s097",
135
+ "s098",
136
+ "s099",
137
+ "s100",
138
+ "s101",
139
+ "s102",
140
+ "s103",
141
+ "s104",
142
+ "s105",
143
+ "s106",
144
+ "s107",
145
+ "s108",
146
+ "s109",
147
+ "s110",
148
+ "s111",
149
+ "s112",
150
+ "s113",
151
+ "s114",
152
+ "s115",
153
+ "s116",
154
+ "s117",
155
+ "s118",
156
+ "s119",
157
+ "s120",
158
+ "s121",
159
+ "s122",
160
+ "s123",
161
+ "s124",
162
+ "s125",
163
+ "s126",
164
+ "s127",
165
+ "s128",
166
+ "s129",
167
+ "s130",
168
+ "s131",
169
+ "s132",
170
+ "s133",
171
+ "s134",
172
+ "s135",
173
+ "s136",
174
+ "s137",
175
+ "s138",
176
+ "s139",
177
+ "s140",
178
+ "s141",
179
+ "s142",
180
+ "s143",
181
+ "s144",
182
+ "s145",
183
+ "s146",
184
+ "s147",
185
+ "s148",
186
+ "s149",
187
+ "s150",
188
+ "s151",
189
+ "s152",
190
+ "s153",
191
+ "s154",
192
+ "s155",
193
+ "s156",
194
+ "s157",
195
+ "s158",
196
+ "s159",
197
+ "s160",
198
+ "s161",
199
+ "s162",
200
+ "s163",
201
+ "s164",
202
+ "s165",
203
+ "s166",
204
+ "s167",
205
+ "s168",
206
+ "s169",
207
+ "s170",
208
+ "s171",
209
+ "s172",
210
+ "s173",
211
+ "s174",
212
+ "s175",
213
+ "s176",
214
+ "s177",
215
+ "s178",
216
+ "s179",
217
+ "s180",
218
+ "s181",
219
+ "s182",
220
+ "s183",
221
+ "s184",
222
+ "s185",
223
+ "s186",
224
+ "s187",
225
+ "s188",
226
+ "s189",
227
+ "s190",
228
+ "s191",
229
+ "s192",
230
+ "s193",
231
+ "s194",
232
+ "s195",
233
+ "s196",
234
+ "s197",
235
+ "s198",
236
+ "s199",
237
+ "s200",
238
+ "s201",
239
+ "s202",
240
+ "s203",
241
+ "s204",
242
+ "s205",
243
+ "s206",
244
+ "s207",
245
+ "s208",
246
+ "s209",
247
+ "s210",
248
+ "s211",
249
+ "s212",
250
+ "s213",
251
+ "s214",
252
+ "s215",
253
+ "s216",
254
+ "s217",
255
+ "s218",
256
+ "s219",
257
+ "s220",
258
+ "s221",
259
+ "s222",
260
+ "s223",
261
+ "s224",
262
+ "s225",
263
+ "s226",
264
+ "s227",
265
+ "s228",
266
+ "s229",
267
+ "s230",
268
+ "s231",
269
+ "s232",
270
+ "s233",
271
+ "s234",
272
+ "s235",
273
+ "s236",
274
+ "s237",
275
+ "s238",
276
+ "s239",
277
+ "s240",
278
+ "s241",
279
+ "s242",
280
+ "s243",
281
+ "s244",
282
+ "s245",
283
+ "s246",
284
+ "s247",
285
+ "s248",
286
+ "s249",
287
+ "s250",
288
+ "s251",
289
+ "s252",
290
+ "s253",
291
+ "s254",
292
+ "s255",
293
+ "s256",
294
+ "s257",
295
+ "s258",
296
+ "s259",
297
+ "s260",
298
+ "s261",
299
+ "s262",
300
+ "s263",
301
+ "s264",
302
+ "s265",
303
+ "s266",
304
+ "s267",
305
+ "s268",
306
+ "s269",
307
+ "s270",
308
+ "s271",
309
+ "s272",
310
+ "s273",
311
+ "s274",
312
+ "s275",
313
+ "s276",
314
+ "s277",
315
+ "s278",
316
+ "s279",
317
+ "s280",
318
+ "s281",
319
+ "s282",
320
+ "s283",
321
+ "s284",
322
+ "s285",
323
+ "s286",
324
+ "s287",
325
+ "s288",
326
+ "s289",
327
+ "s290",
328
+ "s291",
329
+ "s292",
330
+ "s293",
331
+ "s294",
332
+ "s295",
333
+ "s296",
334
+ "s297",
335
+ "s298",
336
+ "s299",
337
+ "s300",
338
+ "s301",
339
+ "s302",
340
+ "s303",
341
+ "s304",
342
+ "s305",
343
+ "s306",
344
+ "s307",
345
+ "s308",
346
+ "s309",
347
+ "s310",
348
+ "s311",
349
+ "s312",
350
+ "s313",
351
+ "s314",
352
+ "s315",
353
+ "s316",
354
+ "s317",
355
+ "s318",
356
+ "s319",
357
+ "s320",
358
+ "s321",
359
+ "s322",
360
+ "s323",
361
+ "s324",
362
+ "s325",
363
+ "s326",
364
+ "s327",
365
+ "s328",
366
+ "s329",
367
+ "s330",
368
+ "s331",
369
+ "s332",
370
+ "s333",
371
+ "s334",
372
+ "s335",
373
+ "s336",
374
+ "s337",
375
+ "s338",
376
+ "s339",
377
+ "s340",
378
+ "s341",
379
+ "s342",
380
+ "s343",
381
+ "s344",
382
+ "s345",
383
+ "s346",
384
+ "s347",
385
+ "s348",
386
+ "s349",
387
+ "s350",
388
+ "s351",
389
+ "s352",
390
+ "s353",
391
+ "s354",
392
+ "s355",
393
+ "s356",
394
+ "s357",
395
+ "s358",
396
+ "s359",
397
+ "s360",
398
+ "s361",
399
+ "s362",
400
+ "s363",
401
+ "s364",
402
+ "s365",
403
+ "s366",
404
+ "s367",
405
+ "s368",
406
+ "s369",
407
+ "s370",
408
+ "s371",
409
+ "s372",
410
+ "s373",
411
+ "s374",
412
+ "s375",
413
+ "s376",
414
+ "s377",
415
+ "s378",
416
+ "s379",
417
+ "s380",
418
+ "s381",
419
+ "s382",
420
+ "s383",
421
+ "s384",
422
+ "s385",
423
+ "s386",
424
+ "s387",
425
+ "s388",
426
+ "s389",
427
+ "s390",
428
+ "s391",
429
+ "s392",
430
+ "s393",
431
+ "s394",
432
+ "s395",
433
+ "s396",
434
+ "s397",
435
+ "s398",
436
+ "s399",
437
+ "s400",
438
+ "s401",
439
+ "s402",
440
+ "s403",
441
+ "s404",
442
+ "s405",
443
+ "s406",
444
+ "s407",
445
+ "s408",
446
+ "s409",
447
+ "s410",
448
+ "s411",
449
+ "s412",
450
+ "s413",
451
+ "s414",
452
+ "s415",
453
+ "s416",
454
+ "s417",
455
+ "s418",
456
+ "s419",
457
+ "s420",
458
+ "s421",
459
+ "s422",
460
+ "s423",
461
+ "s424",
462
+ "s425",
463
+ "s426",
464
+ "s427",
465
+ "s428",
466
+ "s429",
467
+ "s430",
468
+ "s431",
469
+ "s432",
470
+ "s433",
471
+ "s434",
472
+ "s435",
473
+ "s436",
474
+ "s437",
475
+ "s438",
476
+ "s439",
477
+ "s440",
478
+ "s441",
479
+ "s442",
480
+ "s443",
481
+ "s444",
482
+ "s445",
483
+ "s446",
484
+ "s447",
485
+ "s448",
486
+ "s449",
487
+ "s450",
488
+ "s451",
489
+ "s452",
490
+ "s453",
491
+ "s454",
492
+ "s455",
493
+ "s456",
494
+ "s457",
495
+ "s458",
496
+ "s459",
497
+ "s460",
498
+ "s461",
499
+ "s462",
500
+ "s463",
501
+ "s464",
502
+ "s465",
503
+ "s466",
504
+ "s467",
505
+ "s468",
506
+ "s469",
507
+ "s470",
508
+ "s471",
509
+ "s472",
510
+ "s473",
511
+ "s474",
512
+ "s475",
513
+ "s476",
514
+ "s477",
515
+ "s478",
516
+ "s479",
517
+ "s480",
518
+ "s481",
519
+ "s482",
520
+ "s483",
521
+ "s484",
522
+ "s485",
523
+ "s486",
524
+ "s487",
525
+ "s488",
526
+ "s489",
527
+ "s490",
528
+ "s491",
529
+ "s492",
530
+ "s493",
531
+ "s494",
532
+ "s495",
533
+ "s496",
534
+ "s497",
535
+ "s498",
536
+ "s499",
537
+ "s500",
538
+ "s501",
539
+ "s502",
540
+ "s503",
541
+ "s504",
542
+ "s505",
543
+ "s506",
544
+ "s507",
545
+ "s508",
546
+ "s509",
547
+ "s510",
548
+ "s511",
549
+ "s512",
550
+ "s513",
551
+ "s514",
552
+ "s515",
553
+ "s516",
554
+ "s517",
555
+ "s518",
556
+ "s519",
557
+ "s520",
558
+ "s521",
559
+ "s522",
560
+ "s523",
561
+ "s524",
562
+ "s525",
563
+ "s526",
564
+ "s527",
565
+ "s528",
566
+ "s529",
567
+ "s530",
568
+ "s531",
569
+ "s532",
570
+ "s533",
571
+ "s534",
572
+ "s535",
573
+ "s536",
574
+ "s537",
575
+ "s538",
576
+ "s539",
577
+ "s540",
578
+ "s541",
579
+ "s542",
580
+ "s543",
581
+ "s544",
582
+ "s545",
583
+ "s546",
584
+ "s547",
585
+ "s548",
586
+ "s549",
587
+ "s550",
588
+ "s551",
589
+ "s552",
590
+ "s553",
591
+ "s554",
592
+ "s555",
593
+ "s556",
594
+ "s557",
595
+ "s558",
596
+ "s559",
597
+ "s560",
598
+ "s561",
599
+ "s562",
600
+ "s563",
601
+ "s564",
602
+ "s565",
603
+ "s566",
604
+ "s567",
605
+ "s568",
606
+ "s569",
607
+ "s570",
608
+ "s571",
609
+ "s572",
610
+ "s573",
611
+ "s574",
612
+ "s575",
613
+ "s576",
614
+ "s577",
615
+ "s578",
616
+ "s579",
617
+ "s580",
618
+ "s581",
619
+ "s582",
620
+ "s583",
621
+ "s584",
622
+ "s585",
623
+ "s586",
624
+ "s587",
625
+ "s588",
626
+ "s589",
627
+ "s590",
628
+ "s591",
629
+ "s592",
630
+ "s593",
631
+ "s594",
632
+ "s595",
633
+ "s596",
634
+ "s597",
635
+ "s598",
636
+ "s599",
637
+ "s600",
638
+ "s601",
639
+ "s602",
640
+ "s603",
641
+ "s604",
642
+ "s605",
643
+ "s606",
644
+ "s607",
645
+ "s608",
646
+ "s609",
647
+ "s610",
648
+ "s611",
649
+ "s612",
650
+ "s613",
651
+ "s614",
652
+ "s615",
653
+ "s616",
654
+ "s617",
655
+ "s618",
656
+ "s619",
657
+ "s620",
658
+ "s621",
659
+ "s622",
660
+ "s623",
661
+ "s624",
662
+ "s625",
663
+ "s626",
664
+ "s627",
665
+ "s628",
666
+ "s629",
667
+ "s630",
668
+ "s631",
669
+ "s632",
670
+ "s633",
671
+ "s634",
672
+ "s635",
673
+ "s636",
674
+ "s637",
675
+ "s638",
676
+ "s639",
677
+ "s640",
678
+ "s641",
679
+ "s642",
680
+ "s643",
681
+ "s644",
682
+ "s645",
683
+ "s646",
684
+ "s647",
685
+ "s648",
686
+ "s649",
687
+ "s650",
688
+ "s651",
689
+ "s652",
690
+ "s653",
691
+ "s654",
692
+ "s655",
693
+ "s656",
694
+ "s657",
695
+ "s658",
696
+ "s659",
697
+ "s660",
698
+ "s661",
699
+ "s662",
700
+ "s663",
701
+ "s664",
702
+ "s665",
703
+ "s666",
704
+ "s667",
705
+ "s668",
706
+ "s669",
707
+ "s670",
708
+ "s671",
709
+ "s672",
710
+ "s673",
711
+ "s674",
712
+ "s675",
713
+ "s676",
714
+ "s677",
715
+ "s678",
716
+ "s679",
717
+ "s680",
718
+ "s681",
719
+ "s682",
720
+ "s683",
721
+ "s684",
722
+ "s685",
723
+ "s686",
724
+ "s687",
725
+ "s688",
726
+ "s689",
727
+ "s690",
728
+ "s691",
729
+ "s692",
730
+ "s693",
731
+ "s694",
732
+ "s695",
733
+ "s696",
734
+ "s697",
735
+ "s698",
736
+ "s699",
737
+ "s700",
738
+ "s701",
739
+ "s702",
740
+ "s703",
741
+ "s704",
742
+ "s705",
743
+ "s706",
744
+ "s707",
745
+ "s708",
746
+ "s709",
747
+ "s710",
748
+ "s711",
749
+ "s712",
750
+ "s713",
751
+ "s714",
752
+ "s715",
753
+ "s716",
754
+ "s717",
755
+ "s718",
756
+ "s719",
757
+ "s720",
758
+ "s721",
759
+ "s722",
760
+ "s723",
761
+ "s724",
762
+ "s725",
763
+ "s726",
764
+ "s727",
765
+ "s728",
766
+ "s729",
767
+ "s730",
768
+ "s731",
769
+ "s732",
770
+ "s733",
771
+ "s734",
772
+ "s735",
773
+ "s736",
774
+ "s737",
775
+ "s738",
776
+ "s739",
777
+ "s740",
778
+ "s741",
779
+ "s742",
780
+ "s743",
781
+ "s744",
782
+ "s745",
783
+ "s746",
784
+ "s747",
785
+ "s748",
786
+ "s749",
787
+ "s750",
788
+ "s751",
789
+ "s752",
790
+ "s753",
791
+ "s754",
792
+ "s755",
793
+ "s756",
794
+ "s757",
795
+ "s758",
796
+ "s759",
797
+ "s760",
798
+ "s761",
799
+ "s762",
800
+ "s763",
801
+ "s764",
802
+ "s765",
803
+ "s766",
804
+ "s767",
805
+ "s768",
806
+ "s769",
807
+ "s770",
808
+ "s771",
809
+ "s772",
810
+ "s773",
811
+ "s774",
812
+ "s775",
813
+ "s776",
814
+ "s777",
815
+ "s778",
816
+ "s779",
817
+ "s780",
818
+ "s781",
819
+ "s782",
820
+ "s783",
821
+ "s784",
822
+ "s785",
823
+ "s786",
824
+ "s787",
825
+ "s788",
826
+ "s789",
827
+ "s790",
828
+ "s791",
829
+ "s792",
830
+ "s793",
831
+ "s794",
832
+ "s795",
833
+ "s796",
834
+ "s797",
835
+ "s798",
836
+ "s799",
837
+ "s800",
838
+ "s801",
839
+ "s802",
840
+ "s803",
841
+ "s804",
842
+ "s805",
843
+ "s806",
844
+ "s807",
845
+ "s808",
846
+ "s809",
847
+ "s810",
848
+ "s811",
849
+ "s812",
850
+ "s813",
851
+ "s814",
852
+ "s815",
853
+ "s816",
854
+ "s817",
855
+ "s818",
856
+ "s819",
857
+ "s820",
858
+ "s821",
859
+ "s822",
860
+ "s823",
861
+ "s824",
862
+ "s825",
863
+ "s826",
864
+ "s827",
865
+ "s828",
866
+ "s829",
867
+ "s830",
868
+ "s831",
869
+ "s832",
870
+ "s833",
871
+ "s834",
872
+ "s835",
873
+ "s836",
874
+ "s837",
875
+ "s838",
876
+ "s839",
877
+ "s840",
878
+ "s841",
879
+ "s842",
880
+ "s843",
881
+ "s844",
882
+ "s845",
883
+ "s846",
884
+ "s847",
885
+ "s848",
886
+ "s849",
887
+ "s850",
888
+ "s851",
889
+ "s852",
890
+ "s853",
891
+ "s854",
892
+ "s855",
893
+ "s856",
894
+ "s857",
895
+ "s858",
896
+ "s859",
897
+ "s860",
898
+ "s861",
899
+ "s862",
900
+ "s863",
901
+ "s864",
902
+ "s865",
903
+ "s866",
904
+ "s867",
905
+ "s868",
906
+ "s869",
907
+ "s870",
908
+ "s871",
909
+ "s872",
910
+ "s873",
911
+ "s874",
912
+ "s875",
913
+ "s876",
914
+ "s877",
915
+ "s878",
916
+ "s879",
917
+ "s880",
918
+ "s881",
919
+ "s882",
920
+ "s883",
921
+ "s884",
922
+ "s885",
923
+ "s886",
924
+ "s887",
925
+ "s888",
926
+ "s889",
927
+ "s890",
928
+ "s891",
929
+ "s892",
930
+ "s893",
931
+ "s894",
932
+ "s895",
933
+ "s896",
934
+ "s897",
935
+ "s898",
936
+ "s899",
937
+ "s900",
938
+ "s901",
939
+ "s902",
940
+ "s903",
941
+ "s904",
942
+ "s905",
943
+ "s906",
944
+ "s907",
945
+ "s908",
946
+ "s909",
947
+ "s910",
948
+ "s911",
949
+ "s912",
950
+ "s913",
951
+ "s914",
952
+ "s915",
953
+ "s916",
954
+ "s917",
955
+ "s918",
956
+ "s919",
957
+ "s920",
958
+ "s921",
959
+ "s922",
960
+ "s923",
961
+ "s924",
962
+ "s925",
963
+ "s926",
964
+ "s927",
965
+ "s928",
966
+ "s929",
967
+ "s930",
968
+ "s931",
969
+ "s932",
970
+ "s933",
971
+ "s934",
972
+ "s935",
973
+ "s936",
974
+ "s937",
975
+ "s938",
976
+ "s939",
977
+ "s940",
978
+ "s941",
979
+ "s942",
980
+ "s943",
981
+ "s944",
982
+ "s945",
983
+ "s946",
984
+ "s947",
985
+ "s948",
986
+ "s949",
987
+ "s950",
988
+ "s951",
989
+ "s952",
990
+ "s953",
991
+ "s954",
992
+ "s955",
993
+ "s956",
994
+ "s957",
995
+ "s958",
996
+ "s959",
997
+ "s960",
998
+ "s961",
999
+ "s962",
1000
+ "s963",
1001
+ "s964",
1002
+ "s965",
1003
+ "s966",
1004
+ "s967",
1005
+ "s968",
1006
+ "s969",
1007
+ "s970",
1008
+ "s971",
1009
+ "s972",
1010
+ "s973",
1011
+ "s974",
1012
+ "s975",
1013
+ "s976",
1014
+ "s977",
1015
+ "s978",
1016
+ "s979",
1017
+ "s980",
1018
+ "s981",
1019
+ "s982",
1020
+ "s983",
1021
+ "s984",
1022
+ "s985",
1023
+ "s986",
1024
+ "s987",
1025
+ "s988",
1026
+ "s989",
1027
+ "s990",
1028
+ "s991",
1029
+ "s992",
1030
+ "s993",
1031
+ "s994",
1032
+ "s995",
1033
+ "s996",
1034
+ "s997",
1035
+ "s998",
1036
+ "s999",
1037
+ "<eop>",
1038
+ "<eog>",
1039
+ "<|begin_of_sentence|>",
1040
+ "<|end_of_sentence|>",
1041
+ "<|User|>",
1042
+ "<|Assistant|>",
1043
+ "<think>",
1044
+ "</think>",
1045
+ "<search_result>",
1046
+ "</search_result>",
1047
+ "<search_query>",
1048
+ "</search_query>",
1049
+ "<code_query>",
1050
+ "</code_query>",
1051
+ "<code_result>",
1052
+ "</code_result>",
1053
+ "<infer>",
1054
+ "</infer>",
1055
+ "<inferresult>",
1056
+ "</inferresult>",
1057
+ "<tool_calls>",
1058
+ "</tool_calls>",
1059
+ "<tool_response>",
1060
+ "</tool_response>",
1061
+ "<final_answer>",
1062
+ "</final_answer>"
1063
+ ],
1064
+ "bos_token": {
1065
+ "content": "<s>",
1066
+ "lstrip": false,
1067
+ "normalized": false,
1068
+ "rstrip": false,
1069
+ "single_word": false
1070
+ },
1071
+ "eos_token": {
1072
+ "content": "<eod>",
1073
+ "lstrip": false,
1074
+ "normalized": false,
1075
+ "rstrip": false,
1076
+ "single_word": false
1077
+ },
1078
+ "pad_token": {
1079
+ "content": "<eod>",
1080
+ "lstrip": false,
1081
+ "normalized": false,
1082
+ "rstrip": false,
1083
+ "single_word": false
1084
+ },
1085
+ "unk_token": {
1086
+ "content": "<unk>",
1087
+ "lstrip": false,
1088
+ "normalized": false,
1089
+ "rstrip": false,
1090
+ "single_word": false
1091
+ }
1092
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c84861a800c30e71099d63dca0963edbacf554586527ac037155a0560e2fb04
3
+ size 14976038
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36f79e0c70f73cdd2a8dd0fbe7bfe290da158eea746778d289e4ad76c8b383d9
3
+ size 2155861
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
utils.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (Callable, Dict, Iterable, List, Literal, Mapping, Optional, Protocol, Set, Tuple, Union, overload)
2
+ import torch
3
+ from torch.func import functional_call
4
+
5
+ @overload
6
+ def flatten_bn(x: torch.Tensor) -> torch.Tensor:
7
+ ...
8
+
9
+
10
+ @overload
11
+ def flatten_bn(x: List[torch.Tensor]) -> List[torch.Tensor]:
12
+ ...
13
+
14
+
15
+ @overload
16
+ def flatten_bn(
17
+ x: Union[List[torch.Tensor], torch.Tensor],
18
+ *,
19
+ concat: Literal[True],
20
+ ) -> torch.Tensor:
21
+ ...
22
+
23
+
24
+ @overload
25
+ def flatten_bn(
26
+ x: Union[List[torch.Tensor], torch.Tensor],
27
+ *,
28
+ concat: bool = False,
29
+ ) -> Union[List[torch.Tensor], torch.Tensor]:
30
+ ...
31
+
32
+
33
+ def flatten_bn(
34
+ x: Union[List[torch.Tensor], torch.Tensor],
35
+ *,
36
+ concat: bool = False,
37
+ ) -> Union[List[torch.Tensor], torch.Tensor]:
38
+ """
39
+ Flatten the ``B`` and ``N`` dimensions of batched multimodal inputs.
40
+
41
+ The input tensor should have shape ``(B, N, ...)```.
42
+ """
43
+ if isinstance(x, torch.Tensor):
44
+ return x.flatten(0, 1)
45
+
46
+ if concat:
47
+ return torch.cat(x)
48
+
49
+ return [x_n for x_b in x for x_n in x_b]
50
+
51
+ def _flatten_embeddings(embeddings: torch.Tensor) -> torch.Tensor:
52
+ """
53
+ Recursively flattens and concatenates NestedTensors on all but the last
54
+ dimension.
55
+ """
56
+
57
+ if isinstance(embeddings, torch.Tensor):
58
+ # Flatten all but the last dimension.
59
+ return embeddings.flatten(0, -2)
60
+
61
+ return torch.cat(tuple(_flatten_embeddings(t) for t in embeddings))
62
+
63
+ def _embedding_count_expression(embeddings: torch.Tensor) -> str:
64
+ """
65
+ Constructs a debugging representation of the number of embeddings in the
66
+ Tensors.
67
+ """
68
+
69
+ if isinstance(embeddings, torch.Tensor):
70
+ return " x ".join([str(dim) for dim in embeddings.shape[:-1]])
71
+
72
+ return " + ".join(
73
+ _embedding_count_expression(inner) for inner in embeddings)
74
+
75
+ def _merge_multimodal_embeddings(
76
+ inputs_embeds: torch.Tensor,
77
+ is_multimodal: torch.Tensor,
78
+ multimodal_embeddings: torch.Tensor,
79
+ ) -> torch.Tensor:
80
+ """
81
+ Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the
82
+ positions in ``inputs_embeds`` corresponding to placeholder tokens in
83
+ ``input_ids``.
84
+
85
+ Note:
86
+ This updates ``inputs_embeds`` in place.
87
+ """
88
+ num_expected_tokens = is_multimodal.sum().item()
89
+ assert isinstance(num_expected_tokens, int)
90
+ # [total_patches, text_config.hidden_size]
91
+ flattened = _flatten_embeddings(multimodal_embeddings)
92
+ if flattened.shape[0] != num_expected_tokens:
93
+ expr = _embedding_count_expression(multimodal_embeddings)
94
+ raise ValueError(
95
+ f"Attempted to assign {expr} = {flattened.shape[0]} "
96
+ f"multimodal tokens to {num_expected_tokens} placeholders")
97
+
98
+ inputs_embeds[is_multimodal] = flattened
99
+ return inputs_embeds
100
+
101
+ def merge_multimodal_embeddings(
102
+ input_ids: torch.Tensor,
103
+ inputs_embeds: torch.Tensor,
104
+ multimodal_embeddings: torch.Tensor,
105
+ placeholder_token_id: Union[int, List[int]],
106
+ ) -> torch.Tensor:
107
+ """
108
+ Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the
109
+ positions in ``inputs_embeds`` corresponding to placeholder tokens in
110
+ ``input_ids``.
111
+
112
+ ``placeholder_token_id`` can be a list of token ids (e.g, token ids
113
+ of img_start, img_break, and img_end tokens) when needed: This means
114
+ the order of these tokens in the ``input_ids`` MUST MATCH the order of
115
+ their embeddings in ``multimodal_embeddings`` since we need to
116
+ slice-merge instead of individually scattering.
117
+
118
+ For example, if input_ids is "TTTTTSIIIBIIIBIIIETTT", where
119
+ - T is text token
120
+ - S is image start token
121
+ - I is image embedding token
122
+ - B is image break token
123
+ - E is image end token.
124
+
125
+ Then the image embeddings (that correspond to I's) from vision encoder
126
+ must be padded with embeddings of S, B, and E in the same order of
127
+ input_ids for a correct embedding merge.
128
+
129
+ Note:
130
+ This updates ``inputs_embeds`` in place.
131
+ """
132
+ if isinstance(placeholder_token_id, list):
133
+ placeholder_token_id = torch.tensor(placeholder_token_id,
134
+ device=input_ids.device)
135
+ return _merge_multimodal_embeddings(
136
+ inputs_embeds,
137
+ torch.isin(input_ids, placeholder_token_id),
138
+ multimodal_embeddings,
139
+ )
140
+ return _merge_multimodal_embeddings(
141
+ inputs_embeds,
142
+ (input_ids == placeholder_token_id),
143
+ multimodal_embeddings,
144
+ )