JPShi commited on
Commit
6b89cc6
·
verified ·
1 Parent(s): 247f1fd

Add files using upload-large-folder tool

Browse files
configuration_internlm2.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ InternLM2 model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ # Modified from transformers.model.llama.configuration_llama.LlamaConfig
27
+ class InternLM2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
30
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32000):
39
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`InternLM2Model`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 11008):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
56
+ `num_attention_heads`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
70
+ Whether to tie weight embeddings
71
+ Example:
72
+
73
+ """
74
+ model_type = 'internlm2'
75
+ _auto_class = 'AutoConfig'
76
+
77
+ def __init__( # pylint: disable=W0102
78
+ self,
79
+ vocab_size=103168,
80
+ hidden_size=4096,
81
+ intermediate_size=11008,
82
+ num_hidden_layers=32,
83
+ num_attention_heads=32,
84
+ num_key_value_heads=None,
85
+ hidden_act='silu',
86
+ max_position_embeddings=2048,
87
+ initializer_range=0.02,
88
+ rms_norm_eps=1e-6,
89
+ use_cache=True,
90
+ pad_token_id=0,
91
+ bos_token_id=1,
92
+ eos_token_id=2,
93
+ tie_word_embeddings=False,
94
+ bias=True,
95
+ rope_theta=10000,
96
+ rope_scaling=None,
97
+ attn_implementation='eager',
98
+ **kwargs,
99
+ ):
100
+ self.vocab_size = vocab_size
101
+ self.max_position_embeddings = max_position_embeddings
102
+ self.hidden_size = hidden_size
103
+ self.intermediate_size = intermediate_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.bias = bias
107
+
108
+ if num_key_value_heads is None:
109
+ num_key_value_heads = num_attention_heads
110
+ self.num_key_value_heads = num_key_value_heads
111
+
112
+ self.hidden_act = hidden_act
113
+ self.initializer_range = initializer_range
114
+ self.rms_norm_eps = rms_norm_eps
115
+ self.use_cache = use_cache
116
+ self.rope_theta = rope_theta
117
+ self.rope_scaling = rope_scaling
118
+ self._rope_scaling_validation()
119
+
120
+ self.attn_implementation = attn_implementation
121
+ if self.attn_implementation is None:
122
+ self.attn_implementation = 'eager'
123
+ super().__init__(
124
+ pad_token_id=pad_token_id,
125
+ bos_token_id=bos_token_id,
126
+ eos_token_id=eos_token_id,
127
+ tie_word_embeddings=tie_word_embeddings,
128
+ **kwargs,
129
+ )
130
+
131
+ def _rope_scaling_validation(self):
132
+ """
133
+ Validate the `rope_scaling` configuration.
134
+ """
135
+ if self.rope_scaling is None:
136
+ return
137
+
138
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
139
+ raise ValueError(
140
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
141
+ f'got {self.rope_scaling}'
142
+ )
143
+ rope_scaling_type = self.rope_scaling.get('type', None)
144
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
145
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
146
+ raise ValueError(
147
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
148
+ )
149
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
150
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_intern_vit.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ from typing import Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+ from einops import rearrange
13
+ from timm.models.layers import DropPath
14
+ from torch import nn
15
+ from transformers.activations import ACT2FN
16
+ from transformers.modeling_outputs import (BaseModelOutput,
17
+ BaseModelOutputWithPooling)
18
+ from transformers.modeling_utils import PreTrainedModel
19
+ from transformers.utils import logging
20
+
21
+ from .configuration_intern_vit import InternVisionConfig
22
+
23
+ try:
24
+ from .flash_attention import FlashAttention
25
+ has_flash_attn = True
26
+ except:
27
+ print('FlashAttention is not installed.')
28
+ has_flash_attn = False
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class InternRMSNorm(nn.Module):
34
+ def __init__(self, hidden_size, eps=1e-6):
35
+ super().__init__()
36
+ self.weight = nn.Parameter(torch.ones(hidden_size))
37
+ self.variance_epsilon = eps
38
+
39
+ def forward(self, hidden_states):
40
+ input_dtype = hidden_states.dtype
41
+ hidden_states = hidden_states.to(torch.float32)
42
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
43
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
44
+ return self.weight * hidden_states.to(input_dtype)
45
+
46
+
47
+ try:
48
+ from apex.normalization import FusedRMSNorm
49
+
50
+ InternRMSNorm = FusedRMSNorm # noqa
51
+
52
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
53
+ except ImportError:
54
+ # using the normal InternRMSNorm
55
+ pass
56
+ except Exception:
57
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
58
+ pass
59
+
60
+
61
+ NORM2FN = {
62
+ 'rms_norm': InternRMSNorm,
63
+ 'layer_norm': nn.LayerNorm,
64
+ }
65
+
66
+
67
+ class InternVisionEmbeddings(nn.Module):
68
+ def __init__(self, config: InternVisionConfig):
69
+ super().__init__()
70
+ self.config = config
71
+ self.embed_dim = config.hidden_size
72
+ self.image_size = config.image_size
73
+ self.patch_size = config.patch_size
74
+
75
+ self.class_embedding = nn.Parameter(
76
+ torch.randn(1, 1, self.embed_dim),
77
+ )
78
+
79
+ self.patch_embedding = nn.Conv2d(
80
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
81
+ )
82
+
83
+ self.num_patches = (self.image_size // self.patch_size) ** 2
84
+ self.num_positions = self.num_patches + 1
85
+
86
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
87
+
88
+ def _get_pos_embed(self, pos_embed, H, W):
89
+ target_dtype = pos_embed.dtype
90
+ pos_embed = pos_embed.float().reshape(
91
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
92
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
93
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
94
+ return pos_embed
95
+
96
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
97
+ target_dtype = self.patch_embedding.weight.dtype
98
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
99
+ batch_size, _, height, width = patch_embeds.shape
100
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
101
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
102
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
103
+ position_embedding = torch.cat([
104
+ self.position_embedding[:, :1, :],
105
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
106
+ ], dim=1)
107
+ embeddings = embeddings + position_embedding.to(target_dtype)
108
+ return embeddings
109
+
110
+
111
+ class InternAttention(nn.Module):
112
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
113
+
114
+ def __init__(self, config: InternVisionConfig):
115
+ super().__init__()
116
+ self.config = config
117
+ self.embed_dim = config.hidden_size
118
+ self.num_heads = config.num_attention_heads
119
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
120
+ if config.use_flash_attn and not has_flash_attn:
121
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
122
+ self.head_dim = self.embed_dim // self.num_heads
123
+ if self.head_dim * self.num_heads != self.embed_dim:
124
+ raise ValueError(
125
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
126
+ f' {self.num_heads}).'
127
+ )
128
+
129
+ self.scale = self.head_dim ** -0.5
130
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
131
+ self.attn_drop = nn.Dropout(config.attention_dropout)
132
+ self.proj_drop = nn.Dropout(config.dropout)
133
+
134
+ self.qk_normalization = config.qk_normalization
135
+
136
+ if self.qk_normalization:
137
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
138
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
139
+
140
+ if self.use_flash_attn:
141
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
142
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
143
+
144
+ def _naive_attn(self, x):
145
+ B, N, C = x.shape
146
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
147
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
148
+
149
+ if self.qk_normalization:
150
+ B_, H_, N_, D_ = q.shape
151
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
152
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
153
+
154
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
155
+ attn = attn.softmax(dim=-1)
156
+ attn = self.attn_drop(attn)
157
+
158
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
159
+ x = self.proj(x)
160
+ x = self.proj_drop(x)
161
+ return x
162
+
163
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
164
+ qkv = self.qkv(x)
165
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
166
+
167
+ if self.qk_normalization:
168
+ q, k, v = qkv.unbind(2)
169
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
170
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
171
+ qkv = torch.stack([q, k, v], dim=2)
172
+
173
+ context, _ = self.inner_attn(
174
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
175
+ )
176
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
177
+ outs = self.proj_drop(outs)
178
+ return outs
179
+
180
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
181
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
182
+ return x
183
+
184
+
185
+ class InternMLP(nn.Module):
186
+ def __init__(self, config: InternVisionConfig):
187
+ super().__init__()
188
+ self.config = config
189
+ self.act = ACT2FN[config.hidden_act]
190
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
191
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
192
+
193
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
194
+ hidden_states = self.fc1(hidden_states)
195
+ hidden_states = self.act(hidden_states)
196
+ hidden_states = self.fc2(hidden_states)
197
+ return hidden_states
198
+
199
+
200
+ class InternVisionEncoderLayer(nn.Module):
201
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
202
+ super().__init__()
203
+ self.embed_dim = config.hidden_size
204
+ self.intermediate_size = config.intermediate_size
205
+ self.norm_type = config.norm_type
206
+
207
+ self.attn = InternAttention(config)
208
+ self.mlp = InternMLP(config)
209
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
210
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
211
+
212
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
213
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
214
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
215
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
216
+
217
+ def forward(
218
+ self,
219
+ hidden_states: torch.Tensor,
220
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
221
+ """
222
+ Args:
223
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
224
+ """
225
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
226
+
227
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
228
+
229
+ return hidden_states
230
+
231
+
232
+ class InternVisionEncoder(nn.Module):
233
+ """
234
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
235
+ [`InternEncoderLayer`].
236
+
237
+ Args:
238
+ config (`InternConfig`):
239
+ The corresponding vision configuration for the `InternEncoder`.
240
+ """
241
+
242
+ def __init__(self, config: InternVisionConfig):
243
+ super().__init__()
244
+ self.config = config
245
+ # stochastic depth decay rule
246
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
247
+ self.layers = nn.ModuleList([
248
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
249
+ self.gradient_checkpointing = True
250
+
251
+ def forward(
252
+ self,
253
+ inputs_embeds,
254
+ output_hidden_states: Optional[bool] = None,
255
+ return_dict: Optional[bool] = None,
256
+ ) -> Union[Tuple, BaseModelOutput]:
257
+ r"""
258
+ Args:
259
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
260
+ Embedded representation of the inputs. Should be float, not int tokens.
261
+ output_hidden_states (`bool`, *optional*):
262
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
263
+ for more detail.
264
+ return_dict (`bool`, *optional*):
265
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
266
+ """
267
+ output_hidden_states = (
268
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
269
+ )
270
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
271
+
272
+ encoder_states = () if output_hidden_states else None
273
+ hidden_states = inputs_embeds
274
+
275
+ for idx, encoder_layer in enumerate(self.layers):
276
+ if output_hidden_states:
277
+ encoder_states = encoder_states + (hidden_states,)
278
+ if self.gradient_checkpointing and self.training:
279
+ layer_outputs = torch.utils.checkpoint.checkpoint(
280
+ encoder_layer,
281
+ hidden_states)
282
+ else:
283
+ layer_outputs = encoder_layer(
284
+ hidden_states,
285
+ )
286
+ hidden_states = layer_outputs
287
+
288
+ if output_hidden_states:
289
+ encoder_states = encoder_states + (hidden_states,)
290
+
291
+ if not return_dict:
292
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
293
+ return BaseModelOutput(
294
+ last_hidden_state=hidden_states, hidden_states=encoder_states
295
+ )
296
+
297
+
298
+ class InternVisionModel(PreTrainedModel):
299
+ main_input_name = 'pixel_values'
300
+ _supports_flash_attn_2 = True
301
+ config_class = InternVisionConfig
302
+ _no_split_modules = ['InternVisionEncoderLayer']
303
+
304
+ def __init__(self, config: InternVisionConfig):
305
+ super().__init__(config)
306
+ self.config = config
307
+
308
+ self.embeddings = InternVisionEmbeddings(config)
309
+ self.encoder = InternVisionEncoder(config)
310
+
311
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
312
+ pos_emb = self.embeddings.position_embedding
313
+ _, num_positions, embed_dim = pos_emb.shape
314
+ cls_emb = pos_emb[:, :1, :]
315
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
316
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
317
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
318
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
319
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
320
+ self.embeddings.image_size = new_size
321
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
322
+
323
+ def get_input_embeddings(self):
324
+ return self.embeddings
325
+
326
+ def forward(
327
+ self,
328
+ pixel_values: Optional[torch.FloatTensor] = None,
329
+ output_hidden_states: Optional[bool] = None,
330
+ return_dict: Optional[bool] = None,
331
+ pixel_embeds: Optional[torch.FloatTensor] = None,
332
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
333
+ output_hidden_states = (
334
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
335
+ )
336
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
337
+
338
+ if pixel_values is None and pixel_embeds is None:
339
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
340
+
341
+ if pixel_embeds is not None:
342
+ hidden_states = pixel_embeds
343
+ else:
344
+ if len(pixel_values.shape) == 4:
345
+ hidden_states = self.embeddings(pixel_values)
346
+ else:
347
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
348
+ encoder_outputs = self.encoder(
349
+ inputs_embeds=hidden_states,
350
+ output_hidden_states=output_hidden_states,
351
+ return_dict=return_dict,
352
+ )
353
+ last_hidden_state = encoder_outputs.last_hidden_state
354
+ pooled_output = last_hidden_state[:, 0, :]
355
+
356
+ if not return_dict:
357
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
358
+
359
+ return BaseModelOutputWithPooling(
360
+ last_hidden_state=last_hidden_state,
361
+ pooler_output=pooled_output,
362
+ hidden_states=encoder_outputs.hidden_states,
363
+ attentions=encoder_outputs.attentions,
364
+ )
modeling_internlm2.py ADDED
@@ -0,0 +1,1429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
31
+ CausalLMOutputWithPast,
32
+ SequenceClassifierOutputWithPast)
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import (add_start_docstrings,
35
+ add_start_docstrings_to_model_forward, logging,
36
+ replace_return_docstrings)
37
+
38
+ try:
39
+ from transformers.generation.streamers import BaseStreamer
40
+ except: # noqa # pylint: disable=bare-except
41
+ BaseStreamer = None
42
+
43
+ from .configuration_internlm2 import InternLM2Config
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = 'InternLM2Config'
48
+
49
+ flash_attn_func, flash_attn_varlen_func = None, None
50
+ pad_input, index_first_axis, unpad_input = None, None, None
51
+ try:
52
+ from flash_attn import flash_attn_func as _flash_attn_func
53
+ from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
54
+ from flash_attn.bert_padding import index_first_axis as _index_first_axis
55
+ from flash_attn.bert_padding import pad_input as _pad_input
56
+ from flash_attn.bert_padding import unpad_input as _unpad_input
57
+
58
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
59
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
60
+ has_flash_attn = True
61
+ except:
62
+ has_flash_attn = False
63
+
64
+
65
+ def _import_flash_attn():
66
+ global flash_attn_func, flash_attn_varlen_func
67
+ global pad_input, index_first_axis, unpad_input
68
+ try:
69
+ from flash_attn import flash_attn_func as _flash_attn_func
70
+ from flash_attn import \
71
+ flash_attn_varlen_func as _flash_attn_varlen_func
72
+ from flash_attn.bert_padding import \
73
+ index_first_axis as _index_first_axis
74
+ from flash_attn.bert_padding import pad_input as _pad_input
75
+ from flash_attn.bert_padding import unpad_input as _unpad_input
76
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
77
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
78
+ except ImportError:
79
+ raise ImportError('flash_attn is not installed.')
80
+
81
+
82
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
83
+ def _get_unpad_data(attention_mask):
84
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
85
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
86
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
87
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
88
+ return (
89
+ indices,
90
+ cu_seqlens,
91
+ max_seqlen_in_batch,
92
+ )
93
+
94
+
95
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
96
+ def _make_causal_mask(
97
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
98
+ ):
99
+ """
100
+ Make causal mask used for bi-directional self-attention.
101
+ """
102
+ bsz, tgt_len = input_ids_shape
103
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
104
+ mask_cond = torch.arange(mask.size(-1), device=device)
105
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
106
+ mask = mask.to(dtype)
107
+
108
+ if past_key_values_length > 0:
109
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
110
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
111
+
112
+
113
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
114
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
115
+ """
116
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
117
+ """
118
+ bsz, src_len = mask.size()
119
+ tgt_len = tgt_len if tgt_len is not None else src_len
120
+
121
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
122
+
123
+ inverted_mask = 1.0 - expanded_mask
124
+
125
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
126
+
127
+
128
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
129
+ class InternLM2RMSNorm(nn.Module):
130
+ def __init__(self, hidden_size, eps=1e-6):
131
+ """
132
+ InternLM2RMSNorm is equivalent to T5LayerNorm
133
+ """
134
+ super().__init__()
135
+ self.weight = nn.Parameter(torch.ones(hidden_size))
136
+ self.variance_epsilon = eps
137
+
138
+ def forward(self, hidden_states):
139
+ input_dtype = hidden_states.dtype
140
+ hidden_states = hidden_states.to(torch.float32)
141
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
142
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
143
+ return self.weight * hidden_states.to(input_dtype)
144
+
145
+
146
+ try:
147
+ from functools import partial
148
+
149
+ from apex.normalization import FusedRMSNorm
150
+ InternLM2RMSNorm = partial(FusedRMSNorm, eps=1e-6) # noqa
151
+ print('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternLM2RMSNorm')
152
+ except ImportError:
153
+ # using the normal LlamaRMSNorm
154
+ pass
155
+ except Exception:
156
+ print('discovered apex but it failed to load, falling back to InternLM2RMSNorm')
157
+ pass
158
+
159
+
160
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
161
+ class InternLM2RotaryEmbedding(nn.Module):
162
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
163
+ super().__init__()
164
+
165
+ self.dim = dim
166
+ self.max_position_embeddings = max_position_embeddings
167
+ self.base = base
168
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
169
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
170
+
171
+ # Build here to make `torch.jit.trace` work.
172
+ self._set_cos_sin_cache(
173
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
174
+ )
175
+
176
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
177
+ self.max_seq_len_cached = seq_len
178
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
179
+
180
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
181
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
182
+ emb = torch.cat((freqs, freqs), dim=-1)
183
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
184
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
185
+
186
+ def forward(self, x, seq_len=None):
187
+ # x: [bs, num_attention_heads, seq_len, head_size]
188
+ if seq_len > self.max_seq_len_cached:
189
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
190
+
191
+ return (
192
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
193
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
194
+ )
195
+
196
+
197
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
198
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
199
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
200
+
201
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
202
+ self.scaling_factor = scaling_factor
203
+ super().__init__(dim, max_position_embeddings, base, device)
204
+
205
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
206
+ self.max_seq_len_cached = seq_len
207
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
208
+ t = t / self.scaling_factor
209
+
210
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
211
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
212
+ emb = torch.cat((freqs, freqs), dim=-1)
213
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
214
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
215
+
216
+
217
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
218
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
219
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
220
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
221
+ """
222
+
223
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
224
+ self.scaling_factor = scaling_factor
225
+ super().__init__(dim, max_position_embeddings, base, device)
226
+
227
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
228
+ self.max_seq_len_cached = seq_len
229
+
230
+ if seq_len > self.max_position_embeddings:
231
+ base = self.base * (
232
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
233
+ ) ** (self.dim / (self.dim - 2))
234
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
235
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
236
+
237
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
238
+
239
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
240
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
241
+ emb = torch.cat((freqs, freqs), dim=-1)
242
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
243
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
244
+
245
+
246
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
247
+ def rotate_half(x):
248
+ """Rotates half the hidden dims of the input."""
249
+ x1 = x[..., : x.shape[-1] // 2]
250
+ x2 = x[..., x.shape[-1] // 2:]
251
+ return torch.cat((-x2, x1), dim=-1)
252
+
253
+
254
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
255
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
256
+ """Applies Rotary Position Embedding to the query and key tensors."""
257
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
258
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
259
+ q_embed = (q * cos) + (rotate_half(q) * sin)
260
+ k_embed = (k * cos) + (rotate_half(k) * sin)
261
+ return q_embed, k_embed
262
+
263
+
264
+ class InternLM2MLP(nn.Module):
265
+ def __init__(self, config):
266
+ super().__init__()
267
+ self.config = config
268
+ self.hidden_size = config.hidden_size
269
+ self.intermediate_size = config.intermediate_size
270
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
271
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
272
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
273
+ self.act_fn = ACT2FN[config.hidden_act]
274
+
275
+ def forward(self, x):
276
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
277
+
278
+ return down_proj
279
+
280
+
281
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
282
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
283
+ """
284
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
285
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
286
+ """
287
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
288
+ if n_rep == 1:
289
+ return hidden_states
290
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
291
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
292
+
293
+
294
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
295
+ class InternLM2Attention(nn.Module):
296
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
297
+
298
+ def __init__(self, config: InternLM2Config):
299
+ super().__init__()
300
+ self.config = config
301
+ self.hidden_size = config.hidden_size
302
+ self.num_heads = config.num_attention_heads
303
+ self.head_dim = self.hidden_size // self.num_heads
304
+ self.num_key_value_heads = config.num_key_value_heads
305
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
306
+ self.max_position_embeddings = config.max_position_embeddings
307
+ self.is_causal = True
308
+
309
+ if (self.head_dim * self.num_heads) != self.hidden_size:
310
+ raise ValueError(
311
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
312
+ f' and `num_heads`: {self.num_heads}).'
313
+ )
314
+
315
+ self.wqkv = nn.Linear(
316
+ self.hidden_size,
317
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
318
+ bias=config.bias,
319
+ )
320
+
321
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
322
+ self._init_rope()
323
+
324
+ def _init_rope(self):
325
+ if self.config.rope_scaling is None:
326
+ self.rotary_emb = InternLM2RotaryEmbedding(
327
+ self.head_dim,
328
+ max_position_embeddings=self.max_position_embeddings,
329
+ base=self.config.rope_theta,
330
+ )
331
+ else:
332
+ scaling_type = self.config.rope_scaling['type']
333
+ scaling_factor = self.config.rope_scaling['factor']
334
+ if scaling_type == 'dynamic':
335
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
336
+ self.head_dim,
337
+ max_position_embeddings=self.max_position_embeddings,
338
+ base=self.config.rope_theta,
339
+ scaling_factor=scaling_factor,
340
+ )
341
+ elif scaling_type == 'linear':
342
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
343
+ self.head_dim,
344
+ max_position_embeddings=self.max_position_embeddings,
345
+ base=self.config.rope_theta,
346
+ scaling_factor=scaling_factor,
347
+ )
348
+ else:
349
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
350
+ return self.rotary_emb
351
+
352
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
353
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
354
+
355
+ def forward(
356
+ self,
357
+ hidden_states: torch.Tensor,
358
+ attention_mask: Optional[torch.Tensor] = None,
359
+ position_ids: Optional[torch.LongTensor] = None,
360
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
361
+ output_attentions: bool = False,
362
+ use_cache: bool = False,
363
+ **kwargs,
364
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
365
+ if 'padding_mask' in kwargs:
366
+ warnings.warn(
367
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
368
+ 'Please make sure use `attention_mask` instead.`'
369
+ )
370
+
371
+ bsz, q_len, _ = hidden_states.size()
372
+
373
+ qkv_states = self.wqkv(hidden_states)
374
+
375
+ qkv_states = rearrange(
376
+ qkv_states,
377
+ 'b q (h gs d) -> b q h gs d',
378
+ gs=2 + self.num_key_value_groups,
379
+ d=self.head_dim,
380
+ )
381
+
382
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
383
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
384
+ key_states = qkv_states[..., -2, :]
385
+ value_states = qkv_states[..., -1, :]
386
+
387
+ query_states = query_states.transpose(1, 2)
388
+ key_states = key_states.transpose(1, 2)
389
+ value_states = value_states.transpose(1, 2)
390
+
391
+ kv_seq_len = key_states.shape[-2]
392
+ if past_key_value is not None:
393
+ kv_seq_len += past_key_value[0].shape[-2]
394
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
395
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
396
+
397
+ if past_key_value is not None:
398
+ # reuse k, v, self_attention
399
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
400
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
401
+
402
+ past_key_value = (key_states, value_states) if use_cache else None
403
+
404
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
405
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
406
+
407
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
408
+
409
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
410
+ raise ValueError(
411
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
412
+ f' {attn_weights.size()}'
413
+ )
414
+
415
+ if attention_mask is not None:
416
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
417
+ raise ValueError(
418
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
419
+ )
420
+ attn_weights = attn_weights + attention_mask
421
+
422
+ # upcast attention to fp32
423
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
424
+ attn_output = torch.matmul(attn_weights, value_states)
425
+
426
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
427
+ raise ValueError(
428
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
429
+ f' {attn_output.size()}'
430
+ )
431
+
432
+ attn_output = attn_output.transpose(1, 2).contiguous()
433
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
434
+
435
+ attn_output = self.wo(attn_output)
436
+
437
+ if not output_attentions:
438
+ attn_weights = None
439
+
440
+ return attn_output, attn_weights, past_key_value
441
+
442
+
443
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
444
+ class InternLM2FlashAttention2(InternLM2Attention):
445
+ """
446
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
447
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
448
+ flash attention and deal with padding tokens in case the input contains any of them.
449
+ """
450
+
451
+ def forward(
452
+ self,
453
+ hidden_states: torch.Tensor,
454
+ attention_mask: Optional[torch.LongTensor] = None,
455
+ position_ids: Optional[torch.LongTensor] = None,
456
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
457
+ output_attentions: bool = False,
458
+ use_cache: bool = False,
459
+ **kwargs,
460
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
461
+ # InternLM2FlashAttention2 attention does not support output_attentions
462
+ if 'padding_mask' in kwargs:
463
+ warnings.warn(
464
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
465
+ 'Please make sure use `attention_mask` instead.`'
466
+ )
467
+
468
+ # overwrite attention_mask with padding_mask
469
+ attention_mask = kwargs.pop('padding_mask')
470
+
471
+ output_attentions = False
472
+
473
+ bsz, q_len, _ = hidden_states.size()
474
+
475
+ qkv_states = self.wqkv(hidden_states)
476
+
477
+ qkv_states = rearrange(
478
+ qkv_states,
479
+ 'b q (h gs d) -> b q h gs d',
480
+ gs=2 + self.num_key_value_groups,
481
+ d=self.head_dim,
482
+ )
483
+
484
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
485
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
486
+ key_states = qkv_states[..., -2, :]
487
+ value_states = qkv_states[..., -1, :]
488
+
489
+ query_states = query_states.transpose(1, 2)
490
+ key_states = key_states.transpose(1, 2)
491
+ value_states = value_states.transpose(1, 2)
492
+
493
+ kv_seq_len = key_states.shape[-2]
494
+ if past_key_value is not None:
495
+ kv_seq_len += past_key_value[0].shape[-2]
496
+
497
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
498
+
499
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
500
+
501
+ if past_key_value is not None:
502
+ # reuse k, v, self_attention
503
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
504
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
505
+
506
+ past_key_value = (key_states, value_states) if use_cache else None
507
+
508
+ query_states = query_states.transpose(1, 2)
509
+ key_states = key_states.transpose(1, 2)
510
+ value_states = value_states.transpose(1, 2)
511
+
512
+ attn_output = self._flash_attention_forward(
513
+ query_states, key_states, value_states, attention_mask, q_len
514
+ )
515
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
516
+ attn_output = self.wo(attn_output)
517
+
518
+ if not output_attentions:
519
+ attn_weights = None
520
+
521
+ return attn_output, attn_weights, past_key_value
522
+
523
+ def _flash_attention_forward(
524
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
525
+ ):
526
+ """
527
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
528
+ first unpad the input, then computes the attention scores and pad the final attention scores.
529
+
530
+ Args:
531
+ query_states (`torch.Tensor`):
532
+ Input query states to be passed to Flash Attention API
533
+ key_states (`torch.Tensor`):
534
+ Input key states to be passed to Flash Attention API
535
+ value_states (`torch.Tensor`):
536
+ Input value states to be passed to Flash Attention API
537
+ attention_mask (`torch.Tensor`):
538
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
539
+ position of padding tokens and 1 for the position of non-padding tokens.
540
+ dropout (`int`, *optional*):
541
+ Attention dropout
542
+ softmax_scale (`float`, *optional*):
543
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
544
+ """
545
+ # Contains at least one padding token in the sequence
546
+ causal = self.is_causal and query_length != 1
547
+ if attention_mask is not None:
548
+ batch_size = query_states.shape[0]
549
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
550
+ query_states, key_states, value_states, attention_mask, query_length
551
+ )
552
+
553
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
554
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
555
+
556
+ attn_output_unpad = flash_attn_varlen_func(
557
+ query_states,
558
+ key_states,
559
+ value_states,
560
+ cu_seqlens_q=cu_seqlens_q,
561
+ cu_seqlens_k=cu_seqlens_k,
562
+ max_seqlen_q=max_seqlen_in_batch_q,
563
+ max_seqlen_k=max_seqlen_in_batch_k,
564
+ dropout_p=dropout,
565
+ softmax_scale=softmax_scale,
566
+ causal=causal,
567
+ )
568
+
569
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
570
+ else:
571
+ attn_output = flash_attn_func(
572
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
573
+ )
574
+
575
+ return attn_output
576
+
577
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
578
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
579
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
580
+
581
+ key_layer = index_first_axis(
582
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
583
+ )
584
+ value_layer = index_first_axis(
585
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
586
+ )
587
+
588
+ if query_length == kv_seq_len:
589
+ query_layer = index_first_axis(
590
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
591
+ )
592
+ cu_seqlens_q = cu_seqlens_k
593
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
594
+ indices_q = indices_k
595
+ elif query_length == 1:
596
+ max_seqlen_in_batch_q = 1
597
+ cu_seqlens_q = torch.arange(
598
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
599
+ ) # There is a memcpy here, that is very bad.
600
+ indices_q = cu_seqlens_q[:-1]
601
+ query_layer = query_layer.squeeze(1)
602
+ else:
603
+ # The -q_len: slice assumes left padding.
604
+ attention_mask = attention_mask[:, -query_length:]
605
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
606
+
607
+ return (
608
+ query_layer,
609
+ key_layer,
610
+ value_layer,
611
+ indices_q.to(torch.int64),
612
+ (cu_seqlens_q, cu_seqlens_k),
613
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
614
+ )
615
+
616
+
617
+ INTERNLM2_ATTENTION_CLASSES = {
618
+ 'eager': InternLM2Attention,
619
+ 'flash_attention_2': InternLM2FlashAttention2,
620
+ }
621
+
622
+
623
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
624
+ class InternLM2DecoderLayer(nn.Module):
625
+ def __init__(self, config: InternLM2Config):
626
+ super().__init__()
627
+ self.hidden_size = config.hidden_size
628
+
629
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
630
+
631
+ self.feed_forward = InternLM2MLP(config)
632
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
633
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
634
+
635
+ def forward(
636
+ self,
637
+ hidden_states: torch.Tensor,
638
+ attention_mask: Optional[torch.Tensor] = None,
639
+ position_ids: Optional[torch.LongTensor] = None,
640
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
641
+ output_attentions: Optional[bool] = False,
642
+ use_cache: Optional[bool] = False,
643
+ **kwargs,
644
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
645
+ """
646
+ Args:
647
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
648
+ attention_mask (`torch.FloatTensor`, *optional*):
649
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
650
+ query_sequence_length, key_sequence_length)` if default attention is used.
651
+ output_attentions (`bool`, *optional*):
652
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
653
+ returned tensors for more detail.
654
+ use_cache (`bool`, *optional*):
655
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
656
+ (see `past_key_values`).
657
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
658
+ """
659
+ if 'padding_mask' in kwargs:
660
+ warnings.warn(
661
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
662
+ 'Please make sure use `attention_mask` instead.`'
663
+ )
664
+
665
+ residual = hidden_states
666
+
667
+ hidden_states = self.attention_norm(hidden_states)
668
+
669
+ # Self Attention
670
+ hidden_states, self_attn_weights, present_key_value = self.attention(
671
+ hidden_states=hidden_states,
672
+ attention_mask=attention_mask,
673
+ position_ids=position_ids,
674
+ past_key_value=past_key_value,
675
+ output_attentions=output_attentions,
676
+ use_cache=use_cache,
677
+ **kwargs,
678
+ )
679
+ hidden_states = residual + hidden_states
680
+
681
+ # Fully Connected
682
+ residual = hidden_states
683
+ hidden_states = self.ffn_norm(hidden_states)
684
+ hidden_states = self.feed_forward(hidden_states)
685
+ hidden_states = residual + hidden_states
686
+
687
+ outputs = (hidden_states,)
688
+
689
+ if output_attentions:
690
+ outputs += (self_attn_weights,)
691
+
692
+ if use_cache:
693
+ outputs += (present_key_value,)
694
+
695
+ return outputs
696
+
697
+
698
+ InternLM2_START_DOCSTRING = r"""
699
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
700
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
701
+ etc.)
702
+
703
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
704
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
705
+ and behavior.
706
+
707
+ Parameters:
708
+ config ([`InternLM2Config`]):
709
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
710
+ load the weights associated with the model, only the configuration. Check out the
711
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
712
+ """
713
+
714
+
715
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
716
+ @add_start_docstrings(
717
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
718
+ InternLM2_START_DOCSTRING,
719
+ )
720
+ class InternLM2PreTrainedModel(PreTrainedModel):
721
+ config_class = InternLM2Config
722
+ base_model_prefix = 'model'
723
+ supports_gradient_checkpointing = True
724
+ _no_split_modules = ['InternLM2DecoderLayer']
725
+ _skip_keys_device_placement = 'past_key_values'
726
+ _supports_flash_attn_2 = True
727
+
728
+ def _init_weights(self, module):
729
+ std = self.config.initializer_range
730
+ if isinstance(module, nn.Linear):
731
+ module.weight.data.normal_(mean=0.0, std=std)
732
+ if module.bias is not None:
733
+ module.bias.data.zero_()
734
+ elif isinstance(module, nn.Embedding):
735
+ module.weight.data.normal_(mean=0.0, std=std)
736
+ if module.padding_idx is not None:
737
+ module.weight.data[module.padding_idx].zero_()
738
+
739
+
740
+ InternLM2_INPUTS_DOCSTRING = r"""
741
+ Args:
742
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
743
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
744
+ it.
745
+
746
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
747
+ [`PreTrainedTokenizer.__call__`] for details.
748
+
749
+ [What are input IDs?](../glossary#input-ids)
750
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
751
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
752
+
753
+ - 1 for tokens that are **not masked**,
754
+ - 0 for tokens that are **masked**.
755
+
756
+ [What are attention masks?](../glossary#attention-mask)
757
+
758
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
759
+ [`PreTrainedTokenizer.__call__`] for details.
760
+
761
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
762
+ `past_key_values`).
763
+
764
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
765
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
766
+ information on the default strategy.
767
+
768
+ - 1 indicates the head is **not masked**,
769
+ - 0 indicates the head is **masked**.
770
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
771
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
772
+ config.n_positions - 1]`.
773
+
774
+ [What are position IDs?](../glossary#position-ids)
775
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
776
+ when `config.use_cache=True`):
777
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
778
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
779
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
780
+
781
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
782
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
783
+
784
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
785
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
786
+ of shape `(batch_size, sequence_length)`.
787
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
788
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
789
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
790
+ model's internal embedding lookup matrix.
791
+ use_cache (`bool`, *optional*):
792
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
793
+ `past_key_values`).
794
+ output_attentions (`bool`, *optional*):
795
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
796
+ tensors for more detail.
797
+ output_hidden_states (`bool`, *optional*):
798
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
799
+ more detail.
800
+ return_dict (`bool`, *optional*):
801
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
802
+ """
803
+
804
+
805
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
806
+ @add_start_docstrings(
807
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
808
+ InternLM2_START_DOCSTRING,
809
+ )
810
+ class InternLM2Model(InternLM2PreTrainedModel):
811
+ """
812
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
813
+
814
+ Args:
815
+ config: InternLM2Config
816
+ """
817
+
818
+ _auto_class = 'AutoModel'
819
+
820
+ def __init__(self, config: InternLM2Config):
821
+ super().__init__(config)
822
+ self.padding_idx = config.pad_token_id
823
+ self.vocab_size = config.vocab_size
824
+ self.config = config
825
+ if not has_flash_attn:
826
+ self.config.attn_implementation = 'eager'
827
+ print('Warning: Flash attention is not available, using eager attention instead.')
828
+
829
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
830
+
831
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
832
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
833
+
834
+ self.gradient_checkpointing = False
835
+ # Initialize weights and apply final processing
836
+ self.post_init()
837
+
838
+ def get_input_embeddings(self):
839
+ return self.tok_embeddings
840
+
841
+ def set_input_embeddings(self, value):
842
+ self.tok_embeddings = value
843
+
844
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
845
+ # create causal mask
846
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
847
+ combined_attention_mask = None
848
+ if input_shape[-1] > 1:
849
+ combined_attention_mask = _make_causal_mask(
850
+ input_shape,
851
+ inputs_embeds.dtype,
852
+ device=inputs_embeds.device,
853
+ past_key_values_length=past_key_values_length,
854
+ )
855
+
856
+ if attention_mask is not None:
857
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
858
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
859
+ inputs_embeds.device
860
+ )
861
+ combined_attention_mask = (
862
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
863
+ )
864
+
865
+ return combined_attention_mask
866
+
867
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
868
+ def forward(
869
+ self,
870
+ input_ids: torch.LongTensor = None,
871
+ attention_mask: Optional[torch.Tensor] = None,
872
+ position_ids: Optional[torch.LongTensor] = None,
873
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
874
+ inputs_embeds: Optional[torch.FloatTensor] = None,
875
+ use_cache: Optional[bool] = None,
876
+ output_attentions: Optional[bool] = None,
877
+ output_hidden_states: Optional[bool] = None,
878
+ return_dict: Optional[bool] = None,
879
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
880
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
881
+ output_hidden_states = (
882
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
883
+ )
884
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
885
+
886
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
887
+
888
+ if self.config.attn_implementation == 'flash_attention_2':
889
+ _import_flash_attn()
890
+
891
+ # retrieve input_ids and inputs_embeds
892
+ if input_ids is not None and inputs_embeds is not None:
893
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
894
+ elif input_ids is not None:
895
+ batch_size, seq_length = input_ids.shape[:2]
896
+ elif inputs_embeds is not None:
897
+ batch_size, seq_length = inputs_embeds.shape[:2]
898
+ else:
899
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
900
+
901
+ seq_length_with_past = seq_length
902
+ past_key_values_length = 0
903
+ if past_key_values is not None:
904
+ past_key_values_length = past_key_values[0][0].shape[2]
905
+ seq_length_with_past = seq_length_with_past + past_key_values_length
906
+
907
+ if position_ids is None:
908
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
909
+ position_ids = torch.arange(
910
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
911
+ )
912
+ position_ids = position_ids.unsqueeze(0)
913
+
914
+ if inputs_embeds is None:
915
+ inputs_embeds = self.tok_embeddings(input_ids)
916
+
917
+ if self.config.attn_implementation == 'flash_attention_2':
918
+ # 2d mask is passed through the layers
919
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
920
+ else:
921
+ if attention_mask is None:
922
+ attention_mask = torch.ones(
923
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
924
+ )
925
+ attention_mask = self._prepare_decoder_attention_mask(
926
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
927
+ )
928
+
929
+ # embed positions
930
+ hidden_states = inputs_embeds
931
+
932
+ if self.gradient_checkpointing and self.training:
933
+ if use_cache:
934
+ logger.warning_once(
935
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
936
+ )
937
+ use_cache = False
938
+
939
+ # decoder layers
940
+ all_hidden_states = () if output_hidden_states else None
941
+ all_self_attns = () if output_attentions else None
942
+ next_decoder_cache = () if use_cache else None
943
+
944
+ for idx, decoder_layer in enumerate(self.layers):
945
+ if output_hidden_states:
946
+ all_hidden_states += (hidden_states,)
947
+
948
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
949
+
950
+ if self.gradient_checkpointing and self.training:
951
+
952
+ def create_custom_forward(module):
953
+ def custom_forward(*inputs):
954
+ # None for past_key_value
955
+ return module(*inputs, output_attentions, None)
956
+
957
+ return custom_forward
958
+
959
+ layer_outputs = torch.utils.checkpoint.checkpoint(
960
+ create_custom_forward(decoder_layer),
961
+ hidden_states,
962
+ attention_mask,
963
+ position_ids,
964
+ None,
965
+ )
966
+ else:
967
+ layer_outputs = decoder_layer(
968
+ hidden_states,
969
+ attention_mask=attention_mask,
970
+ position_ids=position_ids,
971
+ past_key_value=past_key_value,
972
+ output_attentions=output_attentions,
973
+ use_cache=use_cache,
974
+ )
975
+
976
+ hidden_states = layer_outputs[0]
977
+
978
+ if use_cache:
979
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
980
+
981
+ if output_attentions:
982
+ all_self_attns += (layer_outputs[1],)
983
+
984
+ hidden_states = self.norm(hidden_states)
985
+
986
+ # add hidden states from the last decoder layer
987
+ if output_hidden_states:
988
+ all_hidden_states += (hidden_states,)
989
+
990
+ next_cache = next_decoder_cache if use_cache else None
991
+ if not return_dict:
992
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
993
+ return BaseModelOutputWithPast(
994
+ last_hidden_state=hidden_states,
995
+ past_key_values=next_cache,
996
+ hidden_states=all_hidden_states,
997
+ attentions=all_self_attns,
998
+ )
999
+
1000
+
1001
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
1002
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1003
+ _auto_class = 'AutoModelForCausalLM'
1004
+
1005
+ _tied_weights_keys = ['output.weight']
1006
+
1007
+ def __init__(self, config):
1008
+ super().__init__(config)
1009
+ self.model = InternLM2Model(config)
1010
+ self.vocab_size = config.vocab_size
1011
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1012
+
1013
+ # Initialize weights and apply final processing
1014
+ self.post_init()
1015
+
1016
+ def get_input_embeddings(self):
1017
+ return self.model.tok_embeddings
1018
+
1019
+ def set_input_embeddings(self, value):
1020
+ self.model.tok_embeddings = value
1021
+
1022
+ def get_output_embeddings(self):
1023
+ return self.output
1024
+
1025
+ def set_output_embeddings(self, new_embeddings):
1026
+ self.output = new_embeddings
1027
+
1028
+ def set_decoder(self, decoder):
1029
+ self.model = decoder
1030
+
1031
+ def get_decoder(self):
1032
+ return self.model
1033
+
1034
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1035
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1036
+ def forward(
1037
+ self,
1038
+ input_ids: torch.LongTensor = None,
1039
+ attention_mask: Optional[torch.Tensor] = None,
1040
+ position_ids: Optional[torch.LongTensor] = None,
1041
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1042
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1043
+ labels: Optional[torch.LongTensor] = None,
1044
+ use_cache: Optional[bool] = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1049
+ r"""
1050
+ Args:
1051
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1052
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1053
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1054
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1055
+
1056
+ Returns:
1057
+
1058
+ Example:
1059
+
1060
+ ```python
1061
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1062
+
1063
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1064
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1065
+
1066
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1067
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1068
+
1069
+ >>> # Generate
1070
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1071
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1072
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1073
+ ```"""
1074
+
1075
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1076
+ output_hidden_states = (
1077
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1078
+ )
1079
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1080
+
1081
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1082
+ outputs = self.model(
1083
+ input_ids=input_ids,
1084
+ attention_mask=attention_mask,
1085
+ position_ids=position_ids,
1086
+ past_key_values=past_key_values,
1087
+ inputs_embeds=inputs_embeds,
1088
+ use_cache=use_cache,
1089
+ output_attentions=output_attentions,
1090
+ output_hidden_states=output_hidden_states,
1091
+ return_dict=return_dict,
1092
+ )
1093
+
1094
+ hidden_states = outputs[0]
1095
+ logits = self.output(hidden_states)
1096
+ logits = logits.float()
1097
+
1098
+ loss = None
1099
+ if labels is not None:
1100
+ # Shift so that tokens < n predict n
1101
+ shift_logits = logits[..., :-1, :].contiguous()
1102
+ shift_labels = labels[..., 1:].contiguous()
1103
+ # Flatten the tokens
1104
+ loss_fct = CrossEntropyLoss()
1105
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1106
+ shift_labels = shift_labels.view(-1)
1107
+ # Enable model parallelism
1108
+ shift_labels = shift_labels.to(shift_logits.device)
1109
+ loss = loss_fct(shift_logits, shift_labels)
1110
+
1111
+ if not return_dict:
1112
+ output = (logits,) + outputs[1:]
1113
+ return (loss,) + output if loss is not None else output
1114
+
1115
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1116
+ output = CausalLMOutputWithPast(
1117
+ loss=loss,
1118
+ logits=logits,
1119
+ past_key_values=outputs.past_key_values,
1120
+ hidden_states=outputs.hidden_states,
1121
+ attentions=outputs.attentions,
1122
+ )
1123
+ output['logits'] = output['logits'].to(device)
1124
+ return output
1125
+
1126
+ def prepare_inputs_for_generation(
1127
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1128
+ ):
1129
+ if past_key_values is not None:
1130
+ past_length = past_key_values[0][0].shape[2]
1131
+
1132
+ # Some generation methods already pass only the last input ID
1133
+ if input_ids.shape[1] > past_length:
1134
+ remove_prefix_length = past_length
1135
+ else:
1136
+ # Default to old behavior: keep only final ID
1137
+ remove_prefix_length = input_ids.shape[1] - 1
1138
+
1139
+ input_ids = input_ids[:, remove_prefix_length:]
1140
+
1141
+ position_ids = kwargs.get('position_ids', None)
1142
+ if attention_mask is not None and position_ids is None:
1143
+ # create position_ids on the fly for batch generation
1144
+ position_ids = attention_mask.long().cumsum(-1) - 1
1145
+ position_ids.masked_fill_(attention_mask == 0, 1)
1146
+ if past_key_values:
1147
+ position_ids = position_ids[:, -input_ids.shape[1]:]
1148
+
1149
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1150
+ if inputs_embeds is not None and past_key_values is None:
1151
+ model_inputs = {'inputs_embeds': inputs_embeds}
1152
+ else:
1153
+ model_inputs = {'input_ids': input_ids}
1154
+
1155
+ model_inputs.update(
1156
+ {
1157
+ 'position_ids': position_ids,
1158
+ 'past_key_values': past_key_values,
1159
+ 'use_cache': kwargs.get('use_cache'),
1160
+ 'attention_mask': attention_mask,
1161
+ }
1162
+ )
1163
+ return model_inputs
1164
+
1165
+ @staticmethod
1166
+ def _reorder_cache(past_key_values, beam_idx):
1167
+ reordered_past = ()
1168
+ for layer_past in past_key_values:
1169
+ reordered_past += (
1170
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1171
+ )
1172
+ return reordered_past
1173
+
1174
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''):
1175
+ if tokenizer.add_bos_token:
1176
+ prompt = ''
1177
+ else:
1178
+ prompt = tokenizer.bos_token
1179
+ if meta_instruction:
1180
+ prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1181
+ for record in history:
1182
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1183
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1184
+ return tokenizer([prompt], return_tensors='pt')
1185
+
1186
+ @torch.no_grad()
1187
+ def chat(
1188
+ self,
1189
+ tokenizer,
1190
+ query: str,
1191
+ history: List[Tuple[str, str]] = [],
1192
+ streamer: Optional[BaseStreamer] = None,
1193
+ max_new_tokens: int = 1024,
1194
+ do_sample: bool = True,
1195
+ temperature: float = 0.8,
1196
+ top_p: float = 0.8,
1197
+ meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n'
1198
+ '- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n'
1199
+ '- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.',
1200
+ **kwargs,
1201
+ ):
1202
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1203
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1204
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1205
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]]
1206
+ outputs = self.generate(
1207
+ **inputs,
1208
+ streamer=streamer,
1209
+ max_new_tokens=max_new_tokens,
1210
+ do_sample=do_sample,
1211
+ temperature=temperature,
1212
+ top_p=top_p,
1213
+ eos_token_id=eos_token_id,
1214
+ **kwargs,
1215
+ )
1216
+ outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]):]
1217
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1218
+ response = response.split('<|im_end|>')[0]
1219
+ history = history + [(query, response)]
1220
+ return response, history
1221
+
1222
+ @torch.no_grad()
1223
+ def stream_chat(
1224
+ self,
1225
+ tokenizer,
1226
+ query: str,
1227
+ history: List[Tuple[str, str]] = [],
1228
+ max_new_tokens: int = 1024,
1229
+ do_sample: bool = True,
1230
+ temperature: float = 0.8,
1231
+ top_p: float = 0.8,
1232
+ **kwargs,
1233
+ ):
1234
+ """
1235
+ Return a generator in format: (response, history)
1236
+ Eg.
1237
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1238
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1239
+ """
1240
+ if BaseStreamer is None:
1241
+ raise ModuleNotFoundError(
1242
+ 'The version of `transformers` is too low. Please make sure '
1243
+ 'that you have installed `transformers>=4.28.0`.'
1244
+ )
1245
+
1246
+ response_queue = queue.Queue(maxsize=20)
1247
+
1248
+ class ChatStreamer(BaseStreamer):
1249
+ def __init__(self, tokenizer) -> None:
1250
+ super().__init__()
1251
+ self.tokenizer = tokenizer
1252
+ self.queue = response_queue
1253
+ self.query = query
1254
+ self.history = history
1255
+ self.response = ''
1256
+ self.cache = []
1257
+ self.received_inputs = False
1258
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1259
+
1260
+ def put(self, value):
1261
+ if len(value.shape) > 1 and value.shape[0] > 1:
1262
+ raise ValueError('ChatStreamer only supports batch size 1')
1263
+ elif len(value.shape) > 1:
1264
+ value = value[0]
1265
+
1266
+ if not self.received_inputs:
1267
+ # The first received value is input_ids, ignore here
1268
+ self.received_inputs = True
1269
+ return
1270
+
1271
+ self.cache.extend(value.tolist())
1272
+ token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
1273
+ if token.strip() != '<|im_end|>':
1274
+ self.response = self.response + token
1275
+ history = self.history + [(self.query, self.response)]
1276
+ self.queue.put((self.response, history))
1277
+ self.cache = []
1278
+ else:
1279
+ self.end()
1280
+
1281
+ def end(self):
1282
+ self.queue.put(None)
1283
+
1284
+ def stream_producer():
1285
+ return self.chat(
1286
+ tokenizer=tokenizer,
1287
+ query=query,
1288
+ streamer=ChatStreamer(tokenizer=tokenizer),
1289
+ history=history,
1290
+ max_new_tokens=max_new_tokens,
1291
+ do_sample=do_sample,
1292
+ temperature=temperature,
1293
+ top_p=top_p,
1294
+ **kwargs,
1295
+ )
1296
+
1297
+ def consumer():
1298
+ producer = threading.Thread(target=stream_producer)
1299
+ producer.start()
1300
+ while True:
1301
+ res = response_queue.get()
1302
+ if res is None:
1303
+ return
1304
+ yield res
1305
+
1306
+ return consumer()
1307
+
1308
+
1309
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1310
+ @add_start_docstrings(
1311
+ """
1312
+ The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1313
+
1314
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1315
+ as other causal models (e.g. GPT-2) do.
1316
+
1317
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1318
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1319
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1320
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1321
+ each row of the batch).
1322
+ """,
1323
+ InternLM2_START_DOCSTRING,
1324
+ )
1325
+ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1326
+ def __init__(self, config):
1327
+ super().__init__(config)
1328
+ self.num_labels = config.num_labels
1329
+ self.model = InternLM2Model(config)
1330
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1331
+
1332
+ # Initialize weights and apply final processing
1333
+ self.post_init()
1334
+
1335
+ def get_input_embeddings(self):
1336
+ return self.model.tok_embeddings
1337
+
1338
+ def set_input_embeddings(self, value):
1339
+ self.model.tok_embeddings = value
1340
+
1341
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1342
+ def forward(
1343
+ self,
1344
+ input_ids: torch.LongTensor = None,
1345
+ attention_mask: Optional[torch.Tensor] = None,
1346
+ position_ids: Optional[torch.LongTensor] = None,
1347
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1348
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1349
+ labels: Optional[torch.LongTensor] = None,
1350
+ use_cache: Optional[bool] = None,
1351
+ output_attentions: Optional[bool] = None,
1352
+ output_hidden_states: Optional[bool] = None,
1353
+ return_dict: Optional[bool] = None,
1354
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1355
+ r"""
1356
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1357
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1358
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1359
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1360
+ """
1361
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1362
+
1363
+ transformer_outputs = self.model(
1364
+ input_ids,
1365
+ attention_mask=attention_mask,
1366
+ position_ids=position_ids,
1367
+ past_key_values=past_key_values,
1368
+ inputs_embeds=inputs_embeds,
1369
+ use_cache=use_cache,
1370
+ output_attentions=output_attentions,
1371
+ output_hidden_states=output_hidden_states,
1372
+ return_dict=return_dict,
1373
+ )
1374
+ hidden_states = transformer_outputs[0]
1375
+ logits = self.score(hidden_states)
1376
+
1377
+ if input_ids is not None:
1378
+ batch_size = input_ids.shape[0]
1379
+ else:
1380
+ batch_size = inputs_embeds.shape[0]
1381
+
1382
+ if self.config.pad_token_id is None and batch_size != 1:
1383
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
1384
+ if self.config.pad_token_id is None:
1385
+ sequence_lengths = -1
1386
+ else:
1387
+ if input_ids is not None:
1388
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1389
+ logits.device
1390
+ )
1391
+ else:
1392
+ sequence_lengths = -1
1393
+
1394
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1395
+
1396
+ loss = None
1397
+ if labels is not None:
1398
+ labels = labels.to(logits.device)
1399
+ if self.config.problem_type is None:
1400
+ if self.num_labels == 1:
1401
+ self.config.problem_type = 'regression'
1402
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1403
+ self.config.problem_type = 'single_label_classification'
1404
+ else:
1405
+ self.config.problem_type = 'multi_label_classification'
1406
+
1407
+ if self.config.problem_type == 'regression':
1408
+ loss_fct = MSELoss()
1409
+ if self.num_labels == 1:
1410
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1411
+ else:
1412
+ loss = loss_fct(pooled_logits, labels)
1413
+ elif self.config.problem_type == 'single_label_classification':
1414
+ loss_fct = CrossEntropyLoss()
1415
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1416
+ elif self.config.problem_type == 'multi_label_classification':
1417
+ loss_fct = BCEWithLogitsLoss()
1418
+ loss = loss_fct(pooled_logits, labels)
1419
+ if not return_dict:
1420
+ output = (pooled_logits,) + transformer_outputs[1:]
1421
+ return ((loss,) + output) if loss is not None else output
1422
+
1423
+ return SequenceClassifierOutputWithPast(
1424
+ loss=loss,
1425
+ logits=pooled_logits,
1426
+ past_key_values=transformer_outputs.past_key_values,
1427
+ hidden_states=transformer_outputs.hidden_states,
1428
+ attentions=transformer_outputs.attentions,
1429
+ )
modeling_phi3.py ADDED
@@ -0,0 +1,1610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """ PyTorch Phi-3 model."""
16
+
17
+ import inspect
18
+ import math
19
+ import warnings
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+ from transformers.activations import ACT2FN
28
+ from transformers.cache_utils import Cache, DynamicCache
29
+ from transformers.modeling_attn_mask_utils import \
30
+ _prepare_4d_causal_attention_mask
31
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
32
+ CausalLMOutputWithPast,
33
+ SequenceClassifierOutputWithPast,
34
+ TokenClassifierOutput)
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.utils import (add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_flash_attn_2_available,
40
+ is_flash_attn_greater_or_equal_2_10, logging,
41
+ replace_return_docstrings)
42
+
43
+ from .configuration_phi3 import Phi3Config
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ # Transformers scans dependencies in the modeling file, causing issues on conditional loading. The regex only ignores try/catch blocks, but not if statements
48
+ # if is_flash_attn_2_available():
49
+ _flash_supports_window_size = False
50
+ try:
51
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
52
+ from flash_attn.bert_padding import (index_first_axis, pad_input, # noqa
53
+ unpad_input)
54
+
55
+ _flash_supports_window_size = 'window_size' in list(inspect.signature(flash_attn_func).parameters)
56
+ has_flash_attn = True
57
+ except ImportError as error:
58
+ logger.warning(
59
+ f'`flash-attention` package not found, consider installing for better performance: {error}.'
60
+ )
61
+ if not _flash_supports_window_size:
62
+ logger.warning(
63
+ "Current `flash-attenton` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`."
64
+ )
65
+ has_flash_attn = False
66
+
67
+ _CHECKPOINT_FOR_DOC = 'microsoft/Phi-3-mini-4k-instruct'
68
+ _CONFIG_FOR_DOC = 'Phi3Config'
69
+
70
+ PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [
71
+ 'microsoft/Phi-3-mini-4k-instruct',
72
+ 'microsoft/Phi-3-mini-128k-instruct',
73
+ # See all Phi-3 models at https://huggingface.co/models?filter=Phi-3
74
+ ]
75
+
76
+
77
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3
78
+ class Phi3RMSNorm(nn.Module):
79
+ def __init__(self, hidden_size, eps=1e-6):
80
+ """
81
+ Phi3RMSNorm is equivalent to T5LayerNorm
82
+ """
83
+ super().__init__()
84
+ self.weight = nn.Parameter(torch.ones(hidden_size))
85
+ self.variance_epsilon = eps
86
+
87
+ def forward(self, hidden_states):
88
+ input_dtype = hidden_states.dtype
89
+ hidden_states = hidden_states.to(torch.float32)
90
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
91
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
92
+ return self.weight * hidden_states.to(input_dtype)
93
+
94
+
95
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
96
+ def _get_unpad_data(attention_mask):
97
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
98
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
99
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
100
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
101
+ return (
102
+ indices,
103
+ cu_seqlens,
104
+ max_seqlen_in_batch,
105
+ )
106
+
107
+
108
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3
109
+ class Phi3RotaryEmbedding(nn.Module):
110
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
111
+ super().__init__()
112
+
113
+ self.dim = dim
114
+ self.max_position_embeddings = max_position_embeddings
115
+ self.base = base
116
+ self.register_buffer('inv_freq', None, persistent=False)
117
+
118
+ @torch.no_grad()
119
+ def forward(self, x, position_ids, seq_len=None):
120
+ # x: [bs, num_attention_heads, seq_len, head_size]
121
+ if self.inv_freq is None:
122
+ self.inv_freq = 1.0 / (
123
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
124
+ )
125
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
126
+ position_ids_expanded = position_ids[:, None, :].float()
127
+ # Force float32 since bfloat16 loses precision on long contexts
128
+ # See https://github.com/huggingface/transformers/pull/29285
129
+ device_type = x.device.type
130
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
131
+ with torch.autocast(device_type=device_type, enabled=False):
132
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
133
+ emb = torch.cat((freqs, freqs), dim=-1)
134
+ cos = emb.cos()
135
+ sin = emb.sin()
136
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
137
+
138
+
139
+ class Phi3SuScaledRotaryEmbedding(Phi3RotaryEmbedding):
140
+ def __init__(self, dim, config, device=None):
141
+ super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
142
+
143
+ self.short_factor = config.rope_scaling['short_factor']
144
+ self.long_factor = config.rope_scaling['long_factor']
145
+ self.original_max_position_embeddings = config.original_max_position_embeddings
146
+
147
+ @torch.no_grad()
148
+ def forward(self, x, position_ids, seq_len=None):
149
+ seq_len = torch.max(position_ids) + 1
150
+ if seq_len > self.original_max_position_embeddings:
151
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
152
+ else:
153
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
154
+
155
+ inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
156
+ self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
157
+
158
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
159
+ position_ids_expanded = position_ids[:, None, :].float()
160
+
161
+ # Force float32 since bfloat16 loses precision on long contexts
162
+ # See https://github.com/huggingface/transformers/pull/29285
163
+ device_type = x.device.type
164
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
165
+ with torch.autocast(device_type=device_type, enabled=False):
166
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
167
+ emb = torch.cat((freqs, freqs), dim=-1)
168
+
169
+ scale = self.max_position_embeddings / self.original_max_position_embeddings
170
+ if scale <= 1.0:
171
+ scaling_factor = 1.0
172
+ else:
173
+ scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
174
+
175
+ cos = emb.cos() * scaling_factor
176
+ sin = emb.sin() * scaling_factor
177
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
178
+
179
+
180
+ class Phi3YarnScaledRotaryEmbedding(Phi3RotaryEmbedding):
181
+ def __init__(self, dim, config, device=None):
182
+ super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
183
+
184
+ self.short_factor = config.rope_scaling['short_factor']
185
+ self.long_factor = config.rope_scaling['long_factor']
186
+ self.original_max_position_embeddings = config.original_max_position_embeddings
187
+
188
+ @torch.no_grad()
189
+ def forward(self, x, position_ids, seq_len=None):
190
+ seq_len = torch.max(position_ids) + 1
191
+ if seq_len > self.original_max_position_embeddings:
192
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
193
+ else:
194
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
195
+
196
+ inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
197
+ self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
198
+
199
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
200
+ position_ids_expanded = position_ids[:, None, :].float()
201
+
202
+ # Force float32 since bfloat16 loses precision on long contexts
203
+ # See https://github.com/huggingface/transformers/pull/29285
204
+ device_type = x.device.type
205
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
206
+ with torch.autocast(device_type=device_type, enabled=False):
207
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
208
+ emb = torch.cat((freqs, freqs), dim=-1)
209
+
210
+ scale = self.max_position_embeddings / self.original_max_position_embeddings
211
+ if scale <= 1.0:
212
+ scaling_factor = 1.0
213
+ else:
214
+ scaling_factor = 0.1 * math.log(scale) + 1.0
215
+
216
+ cos = emb.cos() * scaling_factor
217
+ sin = emb.sin() * scaling_factor
218
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
219
+
220
+
221
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
222
+ def rotate_half(x):
223
+ """Rotates half the hidden dims of the input."""
224
+ x1 = x[..., : x.shape[-1] // 2]
225
+ x2 = x[..., x.shape[-1] // 2 :]
226
+ return torch.cat((-x2, x1), dim=-1)
227
+
228
+
229
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
230
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
231
+ """Applies Rotary Position Embedding to the query and key tensors.
232
+
233
+ Args:
234
+ q (`torch.Tensor`): The query tensor.
235
+ k (`torch.Tensor`): The key tensor.
236
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
237
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
238
+ position_ids (`torch.Tensor`, *optional*):
239
+ Deprecated and unused.
240
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
241
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
242
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
243
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
244
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
245
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
246
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
247
+ Returns:
248
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
249
+ """
250
+ cos = cos.unsqueeze(unsqueeze_dim)
251
+ sin = sin.unsqueeze(unsqueeze_dim)
252
+ q_embed = (q * cos) + (rotate_half(q) * sin)
253
+ k_embed = (k * cos) + (rotate_half(k) * sin)
254
+ return q_embed, k_embed
255
+
256
+
257
+ class Phi3MLP(nn.Module):
258
+ def __init__(self, config):
259
+ super().__init__()
260
+
261
+ self.config = config
262
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
263
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
264
+
265
+ self.activation_fn = ACT2FN[config.hidden_act]
266
+
267
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
268
+ up_states = self.gate_up_proj(hidden_states)
269
+
270
+ gate, up_states = up_states.chunk(2, dim=-1)
271
+ up_states = up_states * self.activation_fn(gate)
272
+
273
+ return self.down_proj(up_states)
274
+
275
+
276
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
277
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
278
+ """
279
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
280
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
281
+ """
282
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
283
+ if n_rep == 1:
284
+ return hidden_states
285
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
286
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
287
+
288
+
289
+ class Phi3Attention(nn.Module):
290
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
291
+
292
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
293
+ super().__init__()
294
+ self.config = config
295
+ self.layer_idx = layer_idx
296
+ if layer_idx is None:
297
+ logger.warning_once(
298
+ f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will '
299
+ 'lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` '
300
+ 'when creating this class.'
301
+ )
302
+
303
+ self.attention_dropout = config.attention_dropout
304
+ self.hidden_size = config.hidden_size
305
+ self.num_heads = config.num_attention_heads
306
+ self.head_dim = self.hidden_size // self.num_heads
307
+ self.num_key_value_heads = config.num_key_value_heads
308
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
309
+ self.max_position_embeddings = config.max_position_embeddings
310
+ self.original_max_position_embeddings = config.original_max_position_embeddings
311
+ self.rope_theta = config.rope_theta
312
+ self.rope_scaling = config.rope_scaling
313
+ self.is_causal = True
314
+
315
+ if (self.head_dim * self.num_heads) != self.hidden_size:
316
+ raise ValueError(
317
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
318
+ f' and `num_heads`: {self.num_heads}).'
319
+ )
320
+
321
+ op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim)
322
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
323
+ self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False)
324
+ self._init_rope()
325
+
326
+ def _init_rope(self):
327
+ if self.rope_scaling is None:
328
+ self.rotary_emb = Phi3RotaryEmbedding(
329
+ self.head_dim,
330
+ max_position_embeddings=self.max_position_embeddings,
331
+ base=self.rope_theta,
332
+ )
333
+ else:
334
+ scaling_type = self.config.rope_scaling['type']
335
+ if scaling_type == 'su':
336
+ self.rotary_emb = Phi3SuScaledRotaryEmbedding(self.head_dim, self.config)
337
+ elif scaling_type == 'yarn':
338
+ self.rotary_emb = Phi3YarnScaledRotaryEmbedding(self.head_dim, self.config)
339
+ else:
340
+ raise ValueError(f'Unknown RoPE scaling type {scaling_type}')
341
+
342
+ def forward(
343
+ self,
344
+ hidden_states: torch.Tensor,
345
+ attention_mask: Optional[torch.Tensor] = None,
346
+ position_ids: Optional[torch.LongTensor] = None,
347
+ past_key_value: Optional[Cache] = None,
348
+ output_attentions: bool = False,
349
+ use_cache: bool = False,
350
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
351
+ logger.warning_once('You are not running the flash-attention implementation, expect numerical differences.')
352
+
353
+ bsz, q_len, _ = hidden_states.size()
354
+
355
+ qkv = self.qkv_proj(hidden_states)
356
+ query_pos = self.num_heads * self.head_dim
357
+ query_states = qkv[..., :query_pos]
358
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
359
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
360
+
361
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
362
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
363
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
364
+
365
+ kv_seq_len = key_states.shape[-2]
366
+ if past_key_value is not None:
367
+ if self.layer_idx is None:
368
+ raise ValueError(
369
+ f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
370
+ 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
371
+ 'with a layer index.'
372
+ )
373
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
374
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
375
+
376
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
377
+
378
+ if past_key_value is not None:
379
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
380
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
381
+
382
+ # repeat k/v heads if n_kv_heads < n_heads
383
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
384
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
385
+
386
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
387
+
388
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
389
+ raise ValueError(
390
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
391
+ f' {attn_weights.size()}'
392
+ )
393
+
394
+ if attention_mask is not None:
395
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
396
+ raise ValueError(
397
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
398
+ )
399
+ attn_weights = attn_weights + attention_mask
400
+
401
+ # upcast attention to fp32
402
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
403
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
404
+
405
+ attn_output = torch.matmul(attn_weights, value_states)
406
+
407
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
408
+ raise ValueError(
409
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
410
+ f' {attn_output.size()}'
411
+ )
412
+
413
+ attn_output = attn_output.transpose(1, 2).contiguous()
414
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
415
+
416
+ attn_output = self.o_proj(attn_output)
417
+
418
+ if not output_attentions:
419
+ attn_weights = None
420
+
421
+ return attn_output, attn_weights, past_key_value
422
+
423
+
424
+ class Phi3FlashAttention2(Phi3Attention):
425
+ """
426
+ Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays
427
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
428
+ flash attention and deal with padding tokens in case the input contains any of them.
429
+ """
430
+
431
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
432
+ def __init__(self, *args, **kwargs):
433
+ super().__init__(*args, **kwargs)
434
+
435
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
436
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
437
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
438
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
439
+
440
+ def forward(
441
+ self,
442
+ hidden_states: torch.Tensor,
443
+ attention_mask: Optional[torch.LongTensor] = None,
444
+ position_ids: Optional[torch.LongTensor] = None,
445
+ past_key_value: Optional[Cache] = None,
446
+ output_attentions: bool = False,
447
+ use_cache: bool = False,
448
+ **kwargs,
449
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
450
+ # Phi3FlashAttention2 attention does not support output_attentions
451
+
452
+ if not _flash_supports_window_size:
453
+ logger.warning_once(
454
+ "The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library."
455
+ )
456
+ raise ValueError('The current flash attention version does not support sliding window attention.')
457
+
458
+ output_attentions = False
459
+
460
+ if 'padding_mask' in kwargs:
461
+ warnings.warn(
462
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
463
+ )
464
+
465
+ # overwrite attention_mask with padding_mask
466
+ attention_mask = kwargs.pop('padding_mask')
467
+
468
+ bsz, q_len, _ = hidden_states.size()
469
+
470
+ qkv = self.qkv_proj(hidden_states)
471
+ query_pos = self.num_heads * self.head_dim
472
+ query_states = qkv[..., :query_pos]
473
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
474
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
475
+
476
+ # Flash attention requires the input to have the shape
477
+ # batch_size x seq_length x head_dim x hidden_dim
478
+ # therefore we just need to keep the original shape
479
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
480
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
481
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
482
+
483
+ kv_seq_len = key_states.shape[-2]
484
+ if past_key_value is not None:
485
+ if self.layer_idx is None:
486
+ raise ValueError(
487
+ f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
488
+ 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
489
+ 'with a layer index.'
490
+ )
491
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
492
+
493
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
494
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
495
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len)
496
+
497
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
498
+
499
+ use_sliding_windows = (
500
+ _flash_supports_window_size
501
+ and getattr(self.config, 'sliding_window', None) is not None
502
+ and kv_seq_len > self.config.sliding_window
503
+ )
504
+
505
+ if past_key_value is not None:
506
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
507
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
508
+ if (
509
+ getattr(self.config, 'sliding_window', None) is not None
510
+ and kv_seq_len > self.config.sliding_window
511
+ and cache_has_contents
512
+ ):
513
+ slicing_tokens = 1 - self.config.sliding_window
514
+
515
+ past_key = past_key_value[self.layer_idx][0]
516
+ past_value = past_key_value[self.layer_idx][1]
517
+
518
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
519
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
520
+
521
+ if past_key.shape[-2] != self.config.sliding_window - 1:
522
+ raise ValueError(
523
+ f'past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got'
524
+ f' {past_key.shape}'
525
+ )
526
+
527
+ if attention_mask is not None:
528
+ attention_mask = attention_mask[:, slicing_tokens:]
529
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
530
+
531
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
532
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
533
+
534
+ # repeat k/v heads if n_kv_heads < n_heads
535
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
536
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
537
+
538
+ attn_dropout = self.attention_dropout if self.training else 0.0
539
+
540
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
541
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
542
+ # cast them back in the correct dtype just to be sure everything works as expected.
543
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
544
+ # in fp32.
545
+
546
+ if query_states.dtype == torch.float32:
547
+ if torch.is_autocast_enabled():
548
+ target_dtype = torch.get_autocast_gpu_dtype()
549
+ # Handle the case where the model is quantized
550
+ elif hasattr(self.config, '_pre_quantization_dtype'):
551
+ target_dtype = self.config._pre_quantization_dtype
552
+ else:
553
+ target_dtype = self.qkv_proj.weight.dtype
554
+
555
+ logger.warning_once(
556
+ f'The input hidden states seems to be silently casted in float32, this might be related to'
557
+ f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in'
558
+ f' {target_dtype}.'
559
+ )
560
+
561
+ query_states = query_states.to(target_dtype)
562
+ key_states = key_states.to(target_dtype)
563
+ value_states = value_states.to(target_dtype)
564
+
565
+ # Reashape to the expected shape for Flash Attention
566
+ query_states = query_states.transpose(1, 2)
567
+ key_states = key_states.transpose(1, 2)
568
+ value_states = value_states.transpose(1, 2)
569
+
570
+ attn_output = self._flash_attention_forward(
571
+ query_states,
572
+ key_states,
573
+ value_states,
574
+ attention_mask,
575
+ q_len,
576
+ dropout=attn_dropout,
577
+ use_sliding_windows=use_sliding_windows,
578
+ )
579
+
580
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
581
+ attn_output = self.o_proj(attn_output)
582
+
583
+ if not output_attentions:
584
+ attn_weights = None
585
+
586
+ return attn_output, attn_weights, past_key_value
587
+
588
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward
589
+ def _flash_attention_forward(
590
+ self,
591
+ query_states,
592
+ key_states,
593
+ value_states,
594
+ attention_mask,
595
+ query_length,
596
+ dropout=0.0,
597
+ softmax_scale=None,
598
+ use_sliding_windows=False,
599
+ ):
600
+ """
601
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
602
+ first unpad the input, then computes the attention scores and pad the final attention scores.
603
+
604
+ Args:
605
+ query_states (`torch.Tensor`):
606
+ Input query states to be passed to Flash Attention API
607
+ key_states (`torch.Tensor`):
608
+ Input key states to be passed to Flash Attention API
609
+ value_states (`torch.Tensor`):
610
+ Input value states to be passed to Flash Attention API
611
+ attention_mask (`torch.Tensor`):
612
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
613
+ position of padding tokens and 1 for the position of non-padding tokens.
614
+ dropout (`float`):
615
+ Attention dropout
616
+ softmax_scale (`float`, *optional*):
617
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
618
+ use_sliding_windows (`bool`, *optional*):
619
+ Whether to activate sliding window attention.
620
+ """
621
+ if not self._flash_attn_uses_top_left_mask:
622
+ causal = self.is_causal
623
+ else:
624
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
625
+ causal = self.is_causal and query_length != 1
626
+
627
+ # Contains at least one padding token in the sequence
628
+ if attention_mask is not None:
629
+ batch_size = query_states.shape[0]
630
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
631
+ query_states, key_states, value_states, attention_mask, query_length
632
+ )
633
+
634
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
635
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
636
+
637
+ if not use_sliding_windows:
638
+ attn_output_unpad = flash_attn_varlen_func(
639
+ query_states,
640
+ key_states,
641
+ value_states,
642
+ cu_seqlens_q=cu_seqlens_q,
643
+ cu_seqlens_k=cu_seqlens_k,
644
+ max_seqlen_q=max_seqlen_in_batch_q,
645
+ max_seqlen_k=max_seqlen_in_batch_k,
646
+ dropout_p=dropout,
647
+ softmax_scale=softmax_scale,
648
+ causal=causal,
649
+ )
650
+ else:
651
+ attn_output_unpad = flash_attn_varlen_func(
652
+ query_states,
653
+ key_states,
654
+ value_states,
655
+ cu_seqlens_q=cu_seqlens_q,
656
+ cu_seqlens_k=cu_seqlens_k,
657
+ max_seqlen_q=max_seqlen_in_batch_q,
658
+ max_seqlen_k=max_seqlen_in_batch_k,
659
+ dropout_p=dropout,
660
+ softmax_scale=softmax_scale,
661
+ causal=causal,
662
+ window_size=(self.config.sliding_window, self.config.sliding_window),
663
+ )
664
+
665
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
666
+ else:
667
+ if not use_sliding_windows:
668
+ attn_output = flash_attn_func(
669
+ query_states,
670
+ key_states,
671
+ value_states,
672
+ dropout,
673
+ softmax_scale=softmax_scale,
674
+ causal=causal,
675
+ )
676
+ else:
677
+ attn_output = flash_attn_func(
678
+ query_states,
679
+ key_states,
680
+ value_states,
681
+ dropout,
682
+ softmax_scale=softmax_scale,
683
+ causal=causal,
684
+ window_size=(self.config.sliding_window, self.config.sliding_window),
685
+ )
686
+
687
+ return attn_output
688
+
689
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
690
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
691
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
692
+
693
+ # On the first iteration we need to properly re-create the padding mask
694
+ # by slicing it on the proper place
695
+ if kv_seq_len != attention_mask.shape[-1]:
696
+ attention_mask_num_tokens = attention_mask.shape[-1]
697
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
698
+
699
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
700
+
701
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
702
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
703
+
704
+ if query_length == kv_seq_len:
705
+ query_layer = index_first_axis(
706
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
707
+ )
708
+ cu_seqlens_q = cu_seqlens_k
709
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
710
+ indices_q = indices_k
711
+ elif query_length == 1:
712
+ max_seqlen_in_batch_q = 1
713
+ cu_seqlens_q = torch.arange(
714
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
715
+ ) # There is a memcpy here, that is very bad.
716
+ indices_q = cu_seqlens_q[:-1]
717
+ query_layer = query_layer.squeeze(1)
718
+ else:
719
+ # The -q_len: slice assumes left padding.
720
+ attention_mask = attention_mask[:, -query_length:]
721
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
722
+
723
+ return (
724
+ query_layer,
725
+ key_layer,
726
+ value_layer,
727
+ indices_q,
728
+ (cu_seqlens_q, cu_seqlens_k),
729
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
730
+ )
731
+
732
+
733
+ # copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3
734
+ # TODO @Arthur no longer copied from LLama after static cache
735
+ class Phi3SdpaAttention(Phi3Attention):
736
+ """
737
+ Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
738
+ `Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
739
+ SDPA API.
740
+ """
741
+
742
+ # Adapted from Phi3Attention.forward
743
+ def forward(
744
+ self,
745
+ hidden_states: torch.Tensor,
746
+ attention_mask: Optional[torch.Tensor] = None,
747
+ position_ids: Optional[torch.LongTensor] = None,
748
+ past_key_value: Optional[Cache] = None,
749
+ output_attentions: bool = False,
750
+ use_cache: bool = False,
751
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
752
+ if output_attentions:
753
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
754
+ logger.warning_once(
755
+ 'Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, '
756
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
757
+ )
758
+ return super().forward(
759
+ hidden_states=hidden_states,
760
+ attention_mask=attention_mask,
761
+ position_ids=position_ids,
762
+ past_key_value=past_key_value,
763
+ output_attentions=output_attentions,
764
+ use_cache=use_cache,
765
+ )
766
+
767
+ bsz, q_len, _ = hidden_states.size()
768
+
769
+ qkv = self.qkv_proj(hidden_states)
770
+ query_pos = self.num_heads * self.head_dim
771
+ query_states = qkv[..., :query_pos]
772
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
773
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
774
+
775
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
776
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
777
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
778
+
779
+ kv_seq_len = key_states.shape[-2]
780
+ if past_key_value is not None:
781
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
782
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
783
+
784
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
785
+
786
+ if past_key_value is not None:
787
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
788
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
789
+
790
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
791
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
792
+
793
+ if attention_mask is not None:
794
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
795
+ raise ValueError(
796
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
797
+ )
798
+
799
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
800
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
801
+ if query_states.device.type == 'cuda' and attention_mask is not None:
802
+ query_states = query_states.contiguous()
803
+ key_states = key_states.contiguous()
804
+ value_states = value_states.contiguous()
805
+
806
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
807
+ query_states,
808
+ key_states,
809
+ value_states,
810
+ attn_mask=attention_mask,
811
+ dropout_p=self.attention_dropout if self.training else 0.0,
812
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
813
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
814
+ )
815
+
816
+ attn_output = attn_output.transpose(1, 2).contiguous()
817
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
818
+
819
+ attn_output = self.o_proj(attn_output)
820
+
821
+ return attn_output, None, past_key_value
822
+
823
+
824
+ PHI3_ATTENTION_CLASSES = {
825
+ 'eager': Phi3Attention,
826
+ 'flash_attention_2': Phi3FlashAttention2,
827
+ 'sdpa': Phi3SdpaAttention,
828
+ }
829
+
830
+
831
+ class Phi3DecoderLayer(nn.Module):
832
+ def __init__(self, config: Phi3Config, layer_idx: int):
833
+ super().__init__()
834
+
835
+ self.config = config
836
+ self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
837
+
838
+ self.mlp = Phi3MLP(config)
839
+ self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
840
+
841
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
842
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
843
+ self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
844
+
845
+ def forward(
846
+ self,
847
+ hidden_states: torch.Tensor,
848
+ attention_mask: Optional[torch.Tensor] = None,
849
+ position_ids: Optional[torch.LongTensor] = None,
850
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
851
+ output_attentions: Optional[bool] = False,
852
+ use_cache: Optional[bool] = False,
853
+ **kwargs,
854
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
855
+ if 'padding_mask' in kwargs:
856
+ warnings.warn(
857
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
858
+ )
859
+ """
860
+ Args:
861
+ hidden_states (`torch.FloatTensor`):
862
+ input to the layer of shape `(batch, seq_len, embed_dim)`
863
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
864
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
865
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
866
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
867
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
868
+ output_attentions (`bool`, *optional*):
869
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
870
+ returned tensors for more detail.
871
+ use_cache (`bool`, *optional*):
872
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
873
+ (see `past_key_values`).
874
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
875
+ """
876
+
877
+ residual = hidden_states
878
+
879
+ hidden_states = self.input_layernorm(hidden_states)
880
+
881
+ # Self Attention
882
+ attn_outputs, self_attn_weights, present_key_value = self.self_attn(
883
+ hidden_states=hidden_states,
884
+ attention_mask=attention_mask,
885
+ position_ids=position_ids,
886
+ past_key_value=past_key_value,
887
+ output_attentions=output_attentions,
888
+ use_cache=use_cache,
889
+ )
890
+
891
+ hidden_states = residual + self.resid_attn_dropout(attn_outputs)
892
+
893
+ residual = hidden_states
894
+ hidden_states = self.post_attention_layernorm(hidden_states)
895
+ hidden_states = self.mlp(hidden_states)
896
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states)
897
+
898
+ outputs = (hidden_states,)
899
+
900
+ if output_attentions:
901
+ outputs += (self_attn_weights,)
902
+
903
+ if use_cache:
904
+ outputs += (present_key_value,)
905
+
906
+ return outputs
907
+
908
+
909
+ PHI3_START_DOCSTRING = r"""
910
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
911
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
912
+ etc.)
913
+
914
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
915
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
916
+ and behavior.
917
+
918
+ Parameters:
919
+ config ([`Phi3Config`]):
920
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
921
+ load the weights associated with the model, only the configuration. Check out the
922
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
923
+ """
924
+
925
+
926
+ @add_start_docstrings(
927
+ 'The bare Phi-3 model outputting raw hidden-states without any specific head on top.',
928
+ PHI3_START_DOCSTRING,
929
+ )
930
+ class Phi3PreTrainedModel(PreTrainedModel):
931
+ config_class = Phi3Config
932
+ base_model_prefix = 'model'
933
+ supports_gradient_checkpointing = True
934
+ _no_split_modules = ['Phi3DecoderLayer']
935
+ _skip_keys_device_placement = 'past_key_values'
936
+ _supports_flash_attn_2 = True
937
+ _supports_sdpa = False
938
+ _supports_cache_class = True
939
+
940
+ _version = '0.0.5'
941
+
942
+ def __init__(self, config: Phi3Config):
943
+ if not has_flash_attn:
944
+ config._attn_implementation = 'eager'
945
+ print('Warning: Flash attention is not available, using eager attention instead.')
946
+ super().__init__(config)
947
+
948
+ def _init_weights(self, module):
949
+ std = self.config.initializer_range
950
+ if isinstance(module, nn.Linear):
951
+ module.weight.data.normal_(mean=0.0, std=std)
952
+ if module.bias is not None:
953
+ module.bias.data.zero_()
954
+ elif isinstance(module, nn.Embedding):
955
+ module.weight.data.normal_(mean=0.0, std=std)
956
+ if module.padding_idx is not None:
957
+ module.weight.data[module.padding_idx].zero_()
958
+
959
+
960
+ PHI3_INPUTS_DOCSTRING = r"""
961
+ Args:
962
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
963
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
964
+ it.
965
+
966
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
967
+ [`PreTrainedTokenizer.__call__`] for details.
968
+
969
+ [What are input IDs?](../glossary#input-ids)
970
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
971
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
972
+
973
+ - 1 for tokens that are **not masked**,
974
+ - 0 for tokens that are **masked**.
975
+
976
+ [What are attention masks?](../glossary#attention-mask)
977
+
978
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
979
+ [`PreTrainedTokenizer.__call__`] for details.
980
+
981
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
982
+ `past_key_values`).
983
+
984
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
985
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
986
+ information on the default strategy.
987
+
988
+ - 1 indicates the head is **not masked**,
989
+ - 0 indicates the head is **masked**.
990
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
991
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
992
+ config.n_positions - 1]`.
993
+
994
+ [What are position IDs?](../glossary#position-ids)
995
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
996
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
997
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
998
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
999
+
1000
+ Two formats are allowed:
1001
+ - a [`~cache_utils.Cache`] instance;
1002
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1003
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1004
+ cache format.
1005
+
1006
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1007
+ legacy cache format will be returned.
1008
+
1009
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1010
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1011
+ of shape `(batch_size, sequence_length)`.
1012
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1013
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1014
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1015
+ model's internal embedding lookup matrix.
1016
+ use_cache (`bool`, *optional*):
1017
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1018
+ `past_key_values`).
1019
+ output_attentions (`bool`, *optional*):
1020
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1021
+ tensors for more detail.
1022
+ output_hidden_states (`bool`, *optional*):
1023
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1024
+ more detail.
1025
+ return_dict (`bool`, *optional*):
1026
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1027
+ """
1028
+
1029
+
1030
+ @add_start_docstrings(
1031
+ 'The bare Phi-3 model outputting raw hidden-states without any specific head on top.',
1032
+ PHI3_START_DOCSTRING,
1033
+ )
1034
+ class Phi3Model(Phi3PreTrainedModel):
1035
+ """
1036
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
1037
+
1038
+ Args:
1039
+ config: Phi3Config
1040
+ """
1041
+
1042
+ def __init__(self, config: Phi3Config):
1043
+ super().__init__(config)
1044
+ self.padding_idx = config.pad_token_id
1045
+ self.vocab_size = config.vocab_size
1046
+
1047
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1048
+ self.embed_dropout = nn.Dropout(config.embd_pdrop)
1049
+ self.layers = nn.ModuleList(
1050
+ [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1051
+ )
1052
+ self._attn_implementation = config._attn_implementation
1053
+
1054
+ self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1055
+
1056
+ self.gradient_checkpointing = False
1057
+ # Initialize weights and apply final processing
1058
+ self.post_init()
1059
+
1060
+ def get_input_embeddings(self):
1061
+ return self.embed_tokens
1062
+
1063
+ def set_input_embeddings(self, value):
1064
+ self.embed_tokens = value
1065
+
1066
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1067
+ def forward(
1068
+ self,
1069
+ input_ids: torch.LongTensor = None,
1070
+ attention_mask: Optional[torch.Tensor] = None,
1071
+ position_ids: Optional[torch.LongTensor] = None,
1072
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1073
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1074
+ use_cache: Optional[bool] = None,
1075
+ output_attentions: Optional[bool] = None,
1076
+ output_hidden_states: Optional[bool] = None,
1077
+ return_dict: Optional[bool] = None,
1078
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1079
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1080
+ output_hidden_states = (
1081
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1082
+ )
1083
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1084
+
1085
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1086
+
1087
+ # retrieve input_ids and inputs_embeds
1088
+ if input_ids is not None and inputs_embeds is not None:
1089
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
1090
+ elif input_ids is not None:
1091
+ batch_size, seq_length = input_ids.shape[:2]
1092
+ elif inputs_embeds is not None:
1093
+ batch_size, seq_length = inputs_embeds.shape[:2]
1094
+ else:
1095
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
1096
+
1097
+ past_key_values_length = 0
1098
+
1099
+ if self.gradient_checkpointing and self.training:
1100
+ if use_cache:
1101
+ logger.warning_once(
1102
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
1103
+ )
1104
+ use_cache = False
1105
+
1106
+ if use_cache:
1107
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1108
+ if use_legacy_cache:
1109
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1110
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1111
+
1112
+ if position_ids is None:
1113
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1114
+ position_ids = torch.arange(
1115
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1116
+ )
1117
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1118
+ else:
1119
+ position_ids = position_ids.view(-1, seq_length).long()
1120
+
1121
+ if inputs_embeds is None:
1122
+ inputs_embeds = self.embed_tokens(input_ids)
1123
+
1124
+ if attention_mask is not None and self._attn_implementation == 'flash_attention_2' and use_cache:
1125
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1126
+ if is_padding_right:
1127
+ raise ValueError(
1128
+ "You are attempting to perform batched generation with padding_side='right'"
1129
+ ' this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to '
1130
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1131
+ )
1132
+
1133
+ if self._attn_implementation == 'flash_attention_2':
1134
+ # 2d mask is passed through the layers
1135
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1136
+ else:
1137
+ # 4d mask is passed through the layers
1138
+ attention_mask = _prepare_4d_causal_attention_mask(
1139
+ attention_mask,
1140
+ (batch_size, seq_length),
1141
+ inputs_embeds,
1142
+ past_key_values_length,
1143
+ sliding_window=self.config.sliding_window,
1144
+ )
1145
+
1146
+ hidden_states = inputs_embeds
1147
+
1148
+ # decoder layers
1149
+ all_hidden_states = () if output_hidden_states else None
1150
+ all_self_attns = () if output_attentions else None
1151
+ next_decoder_cache = None
1152
+
1153
+ for decoder_layer in self.layers:
1154
+ if output_hidden_states:
1155
+ all_hidden_states += (hidden_states,)
1156
+
1157
+ if self.gradient_checkpointing and self.training:
1158
+ layer_outputs = self._gradient_checkpointing_func(
1159
+ decoder_layer.__call__,
1160
+ hidden_states,
1161
+ attention_mask,
1162
+ position_ids,
1163
+ past_key_values,
1164
+ output_attentions,
1165
+ use_cache,
1166
+ )
1167
+ else:
1168
+ layer_outputs = decoder_layer(
1169
+ hidden_states,
1170
+ attention_mask=attention_mask,
1171
+ position_ids=position_ids,
1172
+ past_key_value=past_key_values,
1173
+ output_attentions=output_attentions,
1174
+ use_cache=use_cache,
1175
+ )
1176
+
1177
+ hidden_states = layer_outputs[0]
1178
+
1179
+ if use_cache:
1180
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1181
+
1182
+ if output_attentions:
1183
+ all_self_attns += (layer_outputs[1],)
1184
+
1185
+ hidden_states = self.norm(hidden_states)
1186
+
1187
+ # add hidden states from the last decoder layer
1188
+ if output_hidden_states:
1189
+ all_hidden_states += (hidden_states,)
1190
+
1191
+ next_cache = None
1192
+ if use_cache:
1193
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1194
+ if not return_dict:
1195
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1196
+ return BaseModelOutputWithPast(
1197
+ last_hidden_state=hidden_states,
1198
+ past_key_values=next_cache,
1199
+ hidden_states=all_hidden_states,
1200
+ attentions=all_self_attns,
1201
+ )
1202
+
1203
+
1204
+ class Phi3ForCausalLM(Phi3PreTrainedModel):
1205
+ _tied_weights_keys = ['lm_head.weight']
1206
+
1207
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
1208
+ def __init__(self, config):
1209
+ super().__init__(config)
1210
+ self.model = Phi3Model(config)
1211
+ self.vocab_size = config.vocab_size
1212
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1213
+
1214
+ # Initialize weights and apply final processing
1215
+ self.post_init()
1216
+
1217
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
1218
+ def get_input_embeddings(self):
1219
+ return self.model.embed_tokens
1220
+
1221
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
1222
+ def set_input_embeddings(self, value):
1223
+ self.model.embed_tokens = value
1224
+
1225
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
1226
+ def get_output_embeddings(self):
1227
+ return self.lm_head
1228
+
1229
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
1230
+ def set_output_embeddings(self, new_embeddings):
1231
+ self.lm_head = new_embeddings
1232
+
1233
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
1234
+ def set_decoder(self, decoder):
1235
+ self.model = decoder
1236
+
1237
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
1238
+ def get_decoder(self):
1239
+ return self.model
1240
+
1241
+ # Ignore copy
1242
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1243
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1244
+ def forward(
1245
+ self,
1246
+ input_ids: torch.LongTensor = None,
1247
+ attention_mask: Optional[torch.Tensor] = None,
1248
+ position_ids: Optional[torch.LongTensor] = None,
1249
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1250
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1251
+ labels: Optional[torch.LongTensor] = None,
1252
+ use_cache: Optional[bool] = None,
1253
+ output_attentions: Optional[bool] = None,
1254
+ output_hidden_states: Optional[bool] = None,
1255
+ return_dict: Optional[bool] = None,
1256
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1257
+ r"""
1258
+ Args:
1259
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1260
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1261
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1262
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1263
+
1264
+ Returns:
1265
+
1266
+ Example:
1267
+
1268
+ ```python
1269
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
1270
+
1271
+ >>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1272
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1273
+
1274
+ >>> prompt = "This is an example script ."
1275
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1276
+
1277
+ >>> # Generate
1278
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1279
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1280
+ 'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
1281
+ ```"""
1282
+
1283
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1284
+ output_hidden_states = (
1285
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1286
+ )
1287
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1288
+
1289
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1290
+ outputs = self.model(
1291
+ input_ids=input_ids,
1292
+ attention_mask=attention_mask,
1293
+ position_ids=position_ids,
1294
+ past_key_values=past_key_values,
1295
+ inputs_embeds=inputs_embeds,
1296
+ use_cache=use_cache,
1297
+ output_attentions=output_attentions,
1298
+ output_hidden_states=output_hidden_states,
1299
+ return_dict=return_dict,
1300
+ )
1301
+
1302
+ hidden_states = outputs[0]
1303
+ logits = self.lm_head(hidden_states)
1304
+ logits = logits.float()
1305
+
1306
+ loss = None
1307
+ if labels is not None:
1308
+ # Shift so that tokens < n predict n
1309
+ shift_logits = logits[..., :-1, :].contiguous()
1310
+ shift_labels = labels[..., 1:].contiguous()
1311
+ # Flatten the tokens
1312
+ loss_fct = CrossEntropyLoss()
1313
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1314
+ shift_labels = shift_labels.view(-1)
1315
+ # Enable model parallelism
1316
+ shift_labels = shift_labels.to(shift_logits.device)
1317
+ loss = loss_fct(shift_logits, shift_labels)
1318
+
1319
+ if not return_dict:
1320
+ output = (logits,) + outputs[1:]
1321
+ return (loss,) + output if loss is not None else output
1322
+
1323
+ return CausalLMOutputWithPast(
1324
+ loss=loss,
1325
+ logits=logits,
1326
+ past_key_values=outputs.past_key_values,
1327
+ hidden_states=outputs.hidden_states,
1328
+ attentions=outputs.attentions,
1329
+ )
1330
+
1331
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
1332
+ def prepare_inputs_for_generation(
1333
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1334
+ ):
1335
+ if past_key_values is not None:
1336
+ if isinstance(past_key_values, Cache):
1337
+ cache_length = past_key_values.get_seq_length()
1338
+ past_length = past_key_values.seen_tokens
1339
+ max_cache_length = past_key_values.get_max_length()
1340
+ else:
1341
+ cache_length = past_length = past_key_values[0][0].shape[2]
1342
+ max_cache_length = None
1343
+
1344
+ # Keep only the unprocessed tokens:
1345
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1346
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1347
+ # input)
1348
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1349
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1350
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1351
+ # input_ids based on the past_length.
1352
+ elif past_length < input_ids.shape[1]:
1353
+ input_ids = input_ids[:, past_length:]
1354
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1355
+
1356
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1357
+ if (
1358
+ max_cache_length is not None
1359
+ and attention_mask is not None
1360
+ and cache_length + input_ids.shape[1] > max_cache_length
1361
+ ):
1362
+ attention_mask = attention_mask[:, -max_cache_length:]
1363
+
1364
+ position_ids = kwargs.get('position_ids', None)
1365
+ if attention_mask is not None and position_ids is None:
1366
+ # create position_ids on the fly for batch generation
1367
+ position_ids = attention_mask.long().cumsum(-1) - 1
1368
+ position_ids.masked_fill_(attention_mask == 0, 1)
1369
+ if past_key_values:
1370
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1371
+
1372
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1373
+ if (inputs_embeds is not None and past_key_values is None) or (inputs_embeds is not None and len(past_key_values) == 0):
1374
+ model_inputs = {'inputs_embeds': inputs_embeds}
1375
+ else:
1376
+ model_inputs = {'input_ids': input_ids}
1377
+
1378
+ model_inputs.update(
1379
+ {
1380
+ 'position_ids': position_ids,
1381
+ 'past_key_values': past_key_values,
1382
+ 'use_cache': kwargs.get('use_cache'),
1383
+ 'attention_mask': attention_mask,
1384
+ }
1385
+ )
1386
+ return model_inputs
1387
+
1388
+ @staticmethod
1389
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1390
+ def _reorder_cache(past_key_values, beam_idx):
1391
+ reordered_past = ()
1392
+ for layer_past in past_key_values:
1393
+ reordered_past += (
1394
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1395
+ )
1396
+ return reordered_past
1397
+
1398
+
1399
+ @add_start_docstrings(
1400
+ """
1401
+ The [`Phi3Model`] with a sequence classification head on top (linear layer).
1402
+
1403
+ [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1404
+ (e.g. GPT-2) do.
1405
+
1406
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1407
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1408
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1409
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1410
+ each row of the batch).
1411
+ """,
1412
+ PHI3_START_DOCSTRING,
1413
+ )
1414
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs
1415
+ class Phi3ForSequenceClassification(Phi3PreTrainedModel):
1416
+ def __init__(self, config):
1417
+ super().__init__(config)
1418
+ self.num_labels = config.num_labels
1419
+ self.model = Phi3Model(config)
1420
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1421
+
1422
+ # Initialize weights and apply final processing
1423
+ self.post_init()
1424
+
1425
+ def get_input_embeddings(self):
1426
+ return self.model.embed_tokens
1427
+
1428
+ def set_input_embeddings(self, value):
1429
+ self.model.embed_tokens = value
1430
+
1431
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1432
+ def forward(
1433
+ self,
1434
+ input_ids: torch.LongTensor = None,
1435
+ attention_mask: Optional[torch.Tensor] = None,
1436
+ position_ids: Optional[torch.LongTensor] = None,
1437
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1438
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1439
+ labels: Optional[torch.LongTensor] = None,
1440
+ use_cache: Optional[bool] = None,
1441
+ output_attentions: Optional[bool] = None,
1442
+ output_hidden_states: Optional[bool] = None,
1443
+ return_dict: Optional[bool] = None,
1444
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1445
+ r"""
1446
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1447
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1448
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1449
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1450
+ """
1451
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1452
+
1453
+ model_outputs = self.model(
1454
+ input_ids,
1455
+ attention_mask=attention_mask,
1456
+ position_ids=position_ids,
1457
+ past_key_values=past_key_values,
1458
+ inputs_embeds=inputs_embeds,
1459
+ use_cache=use_cache,
1460
+ output_attentions=output_attentions,
1461
+ output_hidden_states=output_hidden_states,
1462
+ return_dict=return_dict,
1463
+ )
1464
+ hidden_states = model_outputs[0]
1465
+ logits = self.score(hidden_states)
1466
+
1467
+ if input_ids is not None:
1468
+ batch_size = input_ids.shape[0]
1469
+ else:
1470
+ batch_size = inputs_embeds.shape[0]
1471
+
1472
+ if self.config.pad_token_id is None and batch_size != 1:
1473
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
1474
+ if self.config.pad_token_id is None:
1475
+ sequence_lengths = -1
1476
+ else:
1477
+ if input_ids is not None:
1478
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1479
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1480
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1481
+ sequence_lengths = sequence_lengths.to(logits.device)
1482
+ else:
1483
+ sequence_lengths = -1
1484
+
1485
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1486
+
1487
+ loss = None
1488
+ if labels is not None:
1489
+ labels = labels.to(logits.device)
1490
+ if self.config.problem_type is None:
1491
+ if self.num_labels == 1:
1492
+ self.config.problem_type = 'regression'
1493
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1494
+ self.config.problem_type = 'single_label_classification'
1495
+ else:
1496
+ self.config.problem_type = 'multi_label_classification'
1497
+
1498
+ if self.config.problem_type == 'regression':
1499
+ loss_fct = MSELoss()
1500
+ if self.num_labels == 1:
1501
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1502
+ else:
1503
+ loss = loss_fct(pooled_logits, labels)
1504
+ elif self.config.problem_type == 'single_label_classification':
1505
+ loss_fct = CrossEntropyLoss()
1506
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1507
+ elif self.config.problem_type == 'multi_label_classification':
1508
+ loss_fct = BCEWithLogitsLoss()
1509
+ loss = loss_fct(pooled_logits, labels)
1510
+ if not return_dict:
1511
+ output = (pooled_logits,) + model_outputs[1:]
1512
+ return ((loss,) + output) if loss is not None else output
1513
+
1514
+ return SequenceClassifierOutputWithPast(
1515
+ loss=loss,
1516
+ logits=pooled_logits,
1517
+ past_key_values=model_outputs.past_key_values,
1518
+ hidden_states=model_outputs.hidden_states,
1519
+ attentions=model_outputs.attentions,
1520
+ )
1521
+
1522
+
1523
+ @add_start_docstrings(
1524
+ """
1525
+ [`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1526
+ Named-Entity-Recognition (NER) tasks.
1527
+ """,
1528
+ PHI3_START_DOCSTRING,
1529
+ )
1530
+ # Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs
1531
+ class Phi3ForTokenClassification(Phi3PreTrainedModel):
1532
+ def __init__(self, config: Phi3Config):
1533
+ super().__init__(config)
1534
+ self.num_labels = config.num_labels
1535
+
1536
+ self.model = Phi3Model(config)
1537
+ if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None:
1538
+ classifier_dropout = config.classifier_dropout
1539
+ elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None:
1540
+ classifier_dropout = config.hidden_dropout
1541
+ else:
1542
+ classifier_dropout = 0.1
1543
+ self.dropout = nn.Dropout(classifier_dropout)
1544
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1545
+
1546
+ # Initialize weights and apply final processing
1547
+ self.post_init()
1548
+
1549
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1550
+ @add_code_sample_docstrings(
1551
+ checkpoint=_CHECKPOINT_FOR_DOC,
1552
+ output_type=TokenClassifierOutput,
1553
+ config_class=_CONFIG_FOR_DOC,
1554
+ )
1555
+ def forward(
1556
+ self,
1557
+ input_ids: Optional[torch.LongTensor] = None,
1558
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1559
+ attention_mask: Optional[torch.Tensor] = None,
1560
+ inputs_embeds: Optional[torch.Tensor] = None,
1561
+ labels: Optional[torch.Tensor] = None,
1562
+ use_cache: Optional[bool] = None,
1563
+ output_attentions: Optional[bool] = None,
1564
+ output_hidden_states: Optional[bool] = None,
1565
+ return_dict: Optional[bool] = None,
1566
+ **deprecated_arguments,
1567
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1568
+ r"""
1569
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1570
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1571
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1572
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1573
+ """
1574
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1575
+
1576
+ model_outputs = self.model(
1577
+ input_ids,
1578
+ past_key_values=past_key_values,
1579
+ attention_mask=attention_mask,
1580
+ inputs_embeds=inputs_embeds,
1581
+ use_cache=use_cache,
1582
+ output_attentions=output_attentions,
1583
+ output_hidden_states=output_hidden_states,
1584
+ return_dict=return_dict,
1585
+ )
1586
+
1587
+ hidden_states = model_outputs[0]
1588
+ hidden_states = self.dropout(hidden_states)
1589
+ logits = self.classifier(hidden_states)
1590
+
1591
+ loss = None
1592
+ if labels is not None:
1593
+ # move labels to correct device to enable model parallelism
1594
+ labels = labels.to(logits.device)
1595
+ batch_size, seq_length = labels.shape
1596
+ loss_fct = CrossEntropyLoss()
1597
+ loss = loss_fct(
1598
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1599
+ )
1600
+
1601
+ if not return_dict:
1602
+ output = (logits,) + model_outputs[2:]
1603
+ return ((loss,) + output) if loss is not None else output
1604
+
1605
+ return TokenClassifierOutput(
1606
+ loss=loss,
1607
+ logits=logits,
1608
+ hidden_states=model_outputs.hidden_states,
1609
+ attentions=model_outputs.attentions,
1610
+ )