Rudrresh commited on
Commit
fc8ea2e
·
verified ·
1 Parent(s): 17a651c

Upload modelling_phi3.py

Browse files
Files changed (1) hide show
  1. modelling_phi3.py +1137 -0
modelling_phi3.py ADDED
@@ -0,0 +1,1137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, List, Optional, Tuple, Union
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ from transformers.activations import ACT2FN
7
+ from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
8
+ from transformers.generation import GenerationMixin
9
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
10
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
11
+ from transformers.modeling_outputs import (
12
+ BaseModelOutputWithPast,
13
+ CausalLMOutputWithPast,
14
+ SequenceClassifierOutputWithPast,
15
+ TokenClassifierOutput,
16
+ )
17
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
18
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
19
+ from transformers.processing_utils import Unpack
20
+ from transformers.utils import (
21
+ LossKwargs,
22
+ add_code_sample_docstrings,
23
+ add_start_docstrings,
24
+ add_start_docstrings_to_model_forward,
25
+ logging,
26
+ replace_return_docstrings,
27
+ )
28
+ from transformers.utils.deprecation import deprecate_kwarg
29
+ from .configuration_phi3 import Phi3Config
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ _CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
35
+ _CONFIG_FOR_DOC = "Phi3Config"
36
+
37
+
38
+ class Phi3MLP(nn.Module):
39
+ def __init__(self, config):
40
+ super().__init__()
41
+
42
+ self.config = config
43
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
44
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
45
+ self.activation_fn = ACT2FN[config.hidden_act]
46
+
47
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
48
+ up_states = self.gate_up_proj(hidden_states)
49
+
50
+ gate, up_states = up_states.chunk(2, dim=-1)
51
+ up_states = up_states * self.activation_fn(gate)
52
+
53
+ return self.down_proj(up_states)
54
+
55
+
56
+ def rotate_half(x):
57
+ """Rotates half the hidden dims of the input."""
58
+ x1 = x[..., : x.shape[-1] // 2]
59
+ x2 = x[..., x.shape[-1] // 2 :]
60
+ return torch.cat((-x2, x1), dim=-1)
61
+
62
+
63
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
64
+ """
65
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
66
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
67
+ """
68
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
69
+ if n_rep == 1:
70
+ return hidden_states
71
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
72
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
73
+
74
+
75
+ def eager_attention_forward(
76
+ module: nn.Module,
77
+ query: torch.Tensor,
78
+ key: torch.Tensor,
79
+ value: torch.Tensor,
80
+ attention_mask: Optional[torch.Tensor],
81
+ scaling: float,
82
+ dropout: float = 0.0,
83
+ **kwargs,
84
+ ):
85
+ key_states = repeat_kv(key, module.num_key_value_groups)
86
+ value_states = repeat_kv(value, module.num_key_value_groups)
87
+
88
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
89
+ if attention_mask is not None:
90
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
91
+ attn_weights = attn_weights + causal_mask
92
+
93
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
94
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
95
+ attn_output = torch.matmul(attn_weights, value_states)
96
+ attn_output = attn_output.transpose(1, 2).contiguous()
97
+
98
+ return attn_output, attn_weights
99
+
100
+
101
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
102
+ """Applies Rotary Position Embedding to the query and key tensors.
103
+ Args:
104
+ q (`torch.Tensor`): The query tensor.
105
+ k (`torch.Tensor`): The key tensor.
106
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
107
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
108
+ position_ids (`torch.Tensor`, *optional*):
109
+ Deprecated and unused.
110
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
111
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
112
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
113
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
114
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
115
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
116
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
117
+ Returns:
118
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
119
+ """
120
+ cos = cos.unsqueeze(unsqueeze_dim)
121
+ sin = sin.unsqueeze(unsqueeze_dim)
122
+
123
+ rotary_dim = cos.shape[-1]
124
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
125
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
126
+
127
+ q_embed = torch.cat([(q_rot * cos) + (rotate_half(q_rot) * sin), q_pass], dim=-1)
128
+ k_embed = torch.cat([(k_rot * cos) + (rotate_half(k_rot) * sin), k_pass], dim=-1)
129
+ return q_embed, k_embed
130
+
131
+
132
+ class Phi3Attention(nn.Module):
133
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
134
+
135
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
136
+ super().__init__()
137
+ self.config = config
138
+ self.layer_idx = layer_idx
139
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
140
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
141
+ self.num_key_value_heads = config.num_key_value_heads
142
+ self.scaling = self.head_dim**-0.5
143
+ self.attention_dropout = config.attention_dropout
144
+ self.is_causal = True
145
+
146
+ op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim)
147
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
148
+ self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False)
149
+
150
+ def forward(
151
+ self,
152
+ hidden_states: torch.Tensor,
153
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
154
+ attention_mask: Optional[torch.Tensor],
155
+ past_key_value: Optional[Cache] = None,
156
+ cache_position: Optional[torch.LongTensor] = None,
157
+ **kwargs: Unpack[FlashAttentionKwargs],
158
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
159
+ input_shape = hidden_states.shape[:-1]
160
+ hidden_shape = (*input_shape, -1, self.head_dim)
161
+
162
+ qkv = self.qkv_proj(hidden_states)
163
+ query_pos = self.config.num_attention_heads * self.head_dim
164
+ query_states = qkv[..., :query_pos]
165
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
166
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
167
+
168
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
169
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
170
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
171
+
172
+ cos, sin = position_embeddings
173
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
174
+
175
+ if past_key_value is not None:
176
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
177
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
178
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
179
+
180
+ attention_interface: Callable = eager_attention_forward
181
+ if self.config._attn_implementation != "eager":
182
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
183
+ logger.warning_once(
184
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
185
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
186
+ )
187
+ else:
188
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
189
+
190
+ attn_output, attn_weights = attention_interface(
191
+ self,
192
+ query_states,
193
+ key_states,
194
+ value_states,
195
+ attention_mask,
196
+ dropout=0.0 if not self.training else self.attention_dropout,
197
+ scaling=self.scaling,
198
+ sliding_window=getattr(self.config, "sliding_window", None),
199
+ **kwargs,
200
+ )
201
+
202
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
203
+ attn_output = self.o_proj(attn_output)
204
+ return attn_output, attn_weights
205
+
206
+
207
+ class Phi3RMSNorm(nn.Module):
208
+ def __init__(self, hidden_size, eps=1e-6):
209
+ """
210
+ Phi3RMSNorm is equivalent to T5LayerNorm
211
+ """
212
+ super().__init__()
213
+ self.weight = nn.Parameter(torch.ones(hidden_size))
214
+ self.variance_epsilon = eps
215
+
216
+ def forward(self, hidden_states):
217
+ input_dtype = hidden_states.dtype
218
+ hidden_states = hidden_states.to(torch.float32)
219
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
220
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
221
+ return self.weight * hidden_states.to(input_dtype)
222
+
223
+ def extra_repr(self):
224
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
225
+
226
+
227
+ class Phi3DecoderLayer(nn.Module):
228
+ def __init__(self, config: Phi3Config, layer_idx: int):
229
+ super().__init__()
230
+ self.hidden_size = config.hidden_size
231
+ self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx)
232
+ self.mlp = Phi3MLP(config)
233
+ self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
234
+ self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
235
+ self.config = config
236
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
237
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
238
+
239
+ def forward(
240
+ self,
241
+ hidden_states: torch.Tensor,
242
+ attention_mask: Optional[torch.Tensor] = None,
243
+ position_ids: Optional[torch.LongTensor] = None,
244
+ past_key_value: Optional[Cache] = None,
245
+ output_attentions: Optional[bool] = False,
246
+ use_cache: Optional[bool] = False,
247
+ cache_position: Optional[torch.LongTensor] = None,
248
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
249
+ **kwargs: Unpack[FlashAttentionKwargs],
250
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
251
+ """
252
+ Args:
253
+ hidden_states (`torch.FloatTensor`):
254
+ input to the layer of shape `(batch, seq_len, embed_dim)`
255
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
256
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
257
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
258
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
259
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
260
+ past_key_value (`Cache`, *optional*): cached past key and value projection states
261
+ output_attentions (`bool`, *optional*):
262
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
263
+ returned tensors for more detail.
264
+ use_cache (`bool`, *optional*):
265
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
266
+ (see `past_key_values`).
267
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
268
+ Indices depicting the position of the input sequence tokens in the sequence
269
+ kwargs (`dict`, *optional*):
270
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
271
+ into the model
272
+ """
273
+ residual = hidden_states
274
+
275
+ hidden_states = self.input_layernorm(hidden_states)
276
+
277
+ # Self Attention
278
+ hidden_states, self_attn_weights = self.self_attn(
279
+ hidden_states=hidden_states,
280
+ attention_mask=attention_mask,
281
+ position_ids=position_ids,
282
+ past_key_value=past_key_value,
283
+ output_attentions=output_attentions,
284
+ use_cache=use_cache,
285
+ cache_position=cache_position,
286
+ position_embeddings=position_embeddings,
287
+ **kwargs,
288
+ )
289
+ hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama
290
+
291
+ residual = hidden_states
292
+ hidden_states = self.post_attention_layernorm(hidden_states)
293
+ hidden_states = self.mlp(hidden_states)
294
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama
295
+
296
+ outputs = (hidden_states,)
297
+ if output_attentions:
298
+ outputs += (self_attn_weights,)
299
+
300
+ return outputs
301
+
302
+
303
+ class Phi3RotaryEmbedding(nn.Module):
304
+ def __init__(self, config: Phi3Config, device=None):
305
+ super().__init__()
306
+ # BC: "rope_type" was originally "type"
307
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
308
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
309
+ else:
310
+ self.rope_type = "default"
311
+ self.max_seq_len_cached = config.max_position_embeddings
312
+ self.original_max_seq_len = config.max_position_embeddings
313
+
314
+ self.config = config
315
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
316
+
317
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
318
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
319
+ self.original_inv_freq = self.inv_freq
320
+
321
+ def _dynamic_frequency_update(self, position_ids, device):
322
+ """
323
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
324
+ 1 - growing beyond the cached sequence length (allow scaling)
325
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
326
+ """
327
+ seq_len = torch.max(position_ids) + 1
328
+ if seq_len > self.max_seq_len_cached: # growth
329
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
330
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
331
+ self.max_seq_len_cached = seq_len
332
+
333
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
334
+ # This .to() is needed if the model has been moved to a device after being initialized (because
335
+ # the buffer is automatically moved, but not the original copy)
336
+ self.original_inv_freq = self.original_inv_freq.to(device)
337
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
338
+ self.max_seq_len_cached = self.original_max_seq_len
339
+
340
+ @torch.no_grad()
341
+ def forward(self, x, position_ids):
342
+ if "dynamic" in self.rope_type:
343
+ self._dynamic_frequency_update(position_ids, device=x.device)
344
+ elif self.rope_type == "longrope":
345
+ self._longrope_frequency_update(position_ids, device=x.device)
346
+
347
+ # Core RoPE block
348
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
349
+ position_ids_expanded = position_ids[:, None, :].float()
350
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
351
+ device_type = x.device.type
352
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
353
+ with torch.autocast(device_type=device_type, enabled=False):
354
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
355
+ emb = torch.cat((freqs, freqs), dim=-1)
356
+ cos = emb.cos()
357
+ sin = emb.sin()
358
+
359
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
360
+ cos = cos * self.attention_scaling
361
+ sin = sin * self.attention_scaling
362
+
363
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
364
+
365
+ def _longrope_frequency_update(self, position_ids, device):
366
+ """Longrope uses long factor if sequence is larger than original pretraining length, short otherwise."""
367
+ seq_len = torch.max(position_ids) + 1
368
+ if hasattr(self.config, "original_max_position_embeddings"):
369
+ original_max_position_embeddings = self.config.original_max_position_embeddings
370
+ else:
371
+ original_max_position_embeddings = self.config.max_position_embeddings
372
+ if seq_len > original_max_position_embeddings:
373
+ if not hasattr(self, "long_inv_freq"):
374
+ self.long_inv_freq, _ = self.rope_init_fn(
375
+ self.config, device, seq_len=original_max_position_embeddings + 1
376
+ )
377
+ self.register_buffer("inv_freq", self.long_inv_freq, persistent=False)
378
+ else:
379
+ # This .to() is needed if the model has been moved to a device after being initialized (because
380
+ # the buffer is automatically moved, but not the original copy)
381
+ self.original_inv_freq = self.original_inv_freq.to(device)
382
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
383
+
384
+
385
+ PHI3_START_DOCSTRING = r"""
386
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
387
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
388
+ etc.)
389
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
390
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
391
+ and behavior.
392
+ Parameters:
393
+ config ([`Phi3Config`]):
394
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
395
+ load the weights associated with the model, only the configuration. Check out the
396
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
397
+ """
398
+
399
+
400
+ @add_start_docstrings(
401
+ "The bare Phi3 Model outputting raw hidden-states without any specific head on top.",
402
+ PHI3_START_DOCSTRING,
403
+ )
404
+ class Phi3PreTrainedModel(PreTrainedModel):
405
+ config_class = Phi3Config
406
+ base_model_prefix = "model"
407
+ supports_gradient_checkpointing = True
408
+ _no_split_modules = ["Phi3DecoderLayer"]
409
+ _skip_keys_device_placement = ["past_key_values"]
410
+ _supports_flash_attn_2 = True
411
+ _supports_sdpa = True
412
+ _supports_flex_attn = True
413
+ _supports_cache_class = True
414
+ _supports_quantized_cache = True
415
+ _supports_static_cache = True
416
+ _supports_attention_backend = True
417
+ _version = "0.0.5"
418
+
419
+ def _init_weights(self, module):
420
+ std = self.config.initializer_range
421
+ if isinstance(module, nn.Linear):
422
+ module.weight.data.normal_(mean=0.0, std=std)
423
+ if module.bias is not None:
424
+ module.bias.data.zero_()
425
+ elif isinstance(module, nn.Embedding):
426
+ module.weight.data.normal_(mean=0.0, std=std)
427
+ if module.padding_idx is not None:
428
+ module.weight.data[module.padding_idx].zero_()
429
+
430
+
431
+ PHI3_INPUTS_DOCSTRING = r"""
432
+ Args:
433
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
434
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
435
+ it.
436
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
437
+ [`PreTrainedTokenizer.__call__`] for details.
438
+ [What are input IDs?](../glossary#input-ids)
439
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
440
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
441
+ - 1 for tokens that are **not masked**,
442
+ - 0 for tokens that are **masked**.
443
+ [What are attention masks?](../glossary#attention-mask)
444
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
445
+ [`PreTrainedTokenizer.__call__`] for details.
446
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
447
+ `past_key_values`).
448
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
449
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
450
+ information on the default strategy.
451
+ - 1 indicates the head is **not masked**,
452
+ - 0 indicates the head is **masked**.
453
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
454
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
455
+ config.n_positions - 1]`.
456
+ [What are position IDs?](../glossary#position-ids)
457
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
458
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
459
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
460
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
461
+ Two formats are allowed:
462
+ - a [`~cache_utils.Cache`] instance, see our
463
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
464
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
465
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
466
+ cache format.
467
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
468
+ legacy cache format will be returned.
469
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
470
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
471
+ of shape `(batch_size, sequence_length)`.
472
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
473
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
474
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
475
+ model's internal embedding lookup matrix.
476
+ use_cache (`bool`, *optional*):
477
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
478
+ `past_key_values`).
479
+ output_attentions (`bool`, *optional*):
480
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
481
+ tensors for more detail.
482
+ output_hidden_states (`bool`, *optional*):
483
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
484
+ more detail.
485
+ return_dict (`bool`, *optional*):
486
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
487
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
488
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
489
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
490
+ the complete sequence length.
491
+ """
492
+
493
+
494
+ @add_start_docstrings(
495
+ "The bare Phi3 Model outputting raw hidden-states without any specific head on top.",
496
+ PHI3_START_DOCSTRING,
497
+ )
498
+ class Phi3Model(Phi3PreTrainedModel):
499
+ """
500
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
501
+ Args:
502
+ config: Phi3Config
503
+ """
504
+
505
+ def __init__(self, config: Phi3Config):
506
+ super().__init__(config)
507
+ self.padding_idx = config.pad_token_id
508
+ self.vocab_size = config.vocab_size
509
+
510
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
511
+ self.layers = nn.ModuleList(
512
+ [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
513
+ )
514
+ self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
515
+ self.rotary_emb = Phi3RotaryEmbedding(config=config)
516
+ self.gradient_checkpointing = False
517
+
518
+ # Initialize weights and apply final processing
519
+ self.post_init()
520
+
521
+ def get_input_embeddings(self):
522
+ return self.embed_tokens
523
+
524
+ def set_input_embeddings(self, value):
525
+ self.embed_tokens = value
526
+
527
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
528
+ def forward(
529
+ self,
530
+ input_ids: torch.LongTensor = None,
531
+ attention_mask: Optional[torch.Tensor] = None,
532
+ position_ids: Optional[torch.LongTensor] = None,
533
+ past_key_values: Optional[Cache] = None,
534
+ inputs_embeds: Optional[torch.FloatTensor] = None,
535
+ use_cache: Optional[bool] = None,
536
+ output_attentions: Optional[bool] = None,
537
+ output_hidden_states: Optional[bool] = None,
538
+ return_dict: Optional[bool] = None,
539
+ cache_position: Optional[torch.LongTensor] = None,
540
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
541
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
542
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
543
+ output_hidden_states = (
544
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
545
+ )
546
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
547
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
548
+
549
+ if (input_ids is None) ^ (inputs_embeds is not None):
550
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
551
+
552
+ if self.gradient_checkpointing and self.training and use_cache:
553
+ logger.warning_once(
554
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
555
+ )
556
+ use_cache = False
557
+
558
+ if inputs_embeds is None:
559
+ inputs_embeds = self.embed_tokens(input_ids)
560
+
561
+ if use_cache and past_key_values is None:
562
+ past_key_values = DynamicCache()
563
+
564
+ if cache_position is None:
565
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
566
+ cache_position = torch.arange(
567
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
568
+ )
569
+
570
+ if position_ids is None:
571
+ position_ids = cache_position.unsqueeze(0)
572
+
573
+ causal_mask = self._update_causal_mask(
574
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
575
+ )
576
+
577
+ hidden_states = inputs_embeds
578
+
579
+ # create position embeddings to be shared across the decoder layers
580
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
581
+
582
+ # decoder layers
583
+ all_hidden_states = () if output_hidden_states else None
584
+ all_self_attns = () if output_attentions else None
585
+
586
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
587
+ if output_hidden_states:
588
+ all_hidden_states += (hidden_states,)
589
+
590
+ if self.gradient_checkpointing and self.training:
591
+ layer_outputs = self._gradient_checkpointing_func(
592
+ decoder_layer.__call__,
593
+ hidden_states,
594
+ causal_mask,
595
+ position_ids,
596
+ past_key_values,
597
+ output_attentions,
598
+ use_cache,
599
+ cache_position,
600
+ position_embeddings,
601
+ )
602
+ else:
603
+ layer_outputs = decoder_layer(
604
+ hidden_states,
605
+ attention_mask=causal_mask,
606
+ position_ids=position_ids,
607
+ past_key_value=past_key_values,
608
+ output_attentions=output_attentions,
609
+ use_cache=use_cache,
610
+ cache_position=cache_position,
611
+ position_embeddings=position_embeddings,
612
+ **flash_attn_kwargs,
613
+ )
614
+
615
+ hidden_states = layer_outputs[0]
616
+
617
+ if output_attentions:
618
+ all_self_attns += (layer_outputs[1],)
619
+
620
+ hidden_states = self.norm(hidden_states)
621
+
622
+ # add hidden states from the last decoder layer
623
+ if output_hidden_states:
624
+ all_hidden_states += (hidden_states,)
625
+
626
+ output = BaseModelOutputWithPast(
627
+ last_hidden_state=hidden_states,
628
+ past_key_values=past_key_values if use_cache else None,
629
+ hidden_states=all_hidden_states,
630
+ attentions=all_self_attns,
631
+ )
632
+ return output if return_dict else output.to_tuple()
633
+
634
+ def _update_causal_mask(
635
+ self,
636
+ attention_mask: torch.Tensor,
637
+ input_tensor: torch.Tensor,
638
+ cache_position: torch.Tensor,
639
+ past_key_values: Cache,
640
+ output_attentions: bool,
641
+ ):
642
+ if self.config._attn_implementation == "flash_attention_2":
643
+ if attention_mask is not None and past_key_values is not None:
644
+ is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
645
+ if is_padding_right:
646
+ raise ValueError(
647
+ "You are attempting to perform batched generation with padding_side='right'"
648
+ " this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to "
649
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
650
+ )
651
+ if attention_mask is not None and 0.0 in attention_mask:
652
+ return attention_mask
653
+ return None
654
+
655
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
656
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
657
+ # to infer the attention mask.
658
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
659
+ using_static_cache = isinstance(past_key_values, StaticCache)
660
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
661
+
662
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
663
+ if (
664
+ self.config._attn_implementation == "sdpa"
665
+ and not (using_static_cache or using_sliding_window_cache)
666
+ and not output_attentions
667
+ ):
668
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
669
+ attention_mask,
670
+ inputs_embeds=input_tensor,
671
+ past_key_values_length=past_seen_tokens,
672
+ sliding_window=self.config.sliding_window,
673
+ is_training=self.training,
674
+ ):
675
+ return None
676
+
677
+ dtype, device = input_tensor.dtype, input_tensor.device
678
+ min_dtype = torch.finfo(dtype).min
679
+ sequence_length = input_tensor.shape[1]
680
+ # SlidingWindowCache or StaticCache
681
+ if using_sliding_window_cache or using_static_cache:
682
+ target_length = past_key_values.get_max_cache_shape()
683
+ # DynamicCache or no cache
684
+ else:
685
+ target_length = (
686
+ attention_mask.shape[-1]
687
+ if isinstance(attention_mask, torch.Tensor)
688
+ else past_seen_tokens + sequence_length + 1
689
+ )
690
+
691
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
692
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
693
+ attention_mask,
694
+ sequence_length=sequence_length,
695
+ target_length=target_length,
696
+ dtype=dtype,
697
+ device=device,
698
+ cache_position=cache_position,
699
+ batch_size=input_tensor.shape[0],
700
+ config=self.config,
701
+ past_key_values=past_key_values,
702
+ )
703
+
704
+ if (
705
+ self.config._attn_implementation == "sdpa"
706
+ and attention_mask is not None
707
+ and attention_mask.device.type in ["cuda", "xpu"]
708
+ and not output_attentions
709
+ ):
710
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
711
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
712
+ # Details: https://github.com/pytorch/pytorch/issues/110213
713
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
714
+
715
+ return causal_mask
716
+
717
+ @staticmethod
718
+ def _prepare_4d_causal_attention_mask_with_cache_position(
719
+ attention_mask: torch.Tensor,
720
+ sequence_length: int,
721
+ target_length: int,
722
+ dtype: torch.dtype,
723
+ device: torch.device,
724
+ cache_position: torch.Tensor,
725
+ batch_size: int,
726
+ config: Phi3Config,
727
+ past_key_values: Cache,
728
+ ):
729
+ """
730
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
731
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
732
+ Args:
733
+ attention_mask (`torch.Tensor`):
734
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
735
+ sequence_length (`int`):
736
+ The sequence length being processed.
737
+ target_length (`int`):
738
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
739
+ dtype (`torch.dtype`):
740
+ The dtype to use for the 4D attention mask.
741
+ device (`torch.device`):
742
+ The device to plcae the 4D attention mask on.
743
+ cache_position (`torch.Tensor`):
744
+ Indices depicting the position of the input sequence tokens in the sequence.
745
+ batch_size (`torch.Tensor`):
746
+ Batch size.
747
+ config (`Phi3Config`):
748
+ The model's configuration class
749
+ past_key_values (`Cache`):
750
+ The cache class that is being used currently to generate
751
+ """
752
+ if attention_mask is not None and attention_mask.dim() == 4:
753
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
754
+ causal_mask = attention_mask
755
+ else:
756
+ min_dtype = torch.finfo(dtype).min
757
+ causal_mask = torch.full(
758
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
759
+ )
760
+ diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
761
+ if config.sliding_window is not None:
762
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
763
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
764
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
765
+ sliding_attend_mask = torch.arange(target_length, device=device) <= (
766
+ cache_position.reshape(-1, 1) - config.sliding_window
767
+ )
768
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
769
+ causal_mask *= diagonal_attend_mask
770
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
771
+ if attention_mask is not None:
772
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
773
+ if attention_mask.shape[-1] > target_length:
774
+ attention_mask = attention_mask[:, :target_length]
775
+ mask_length = attention_mask.shape[-1]
776
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
777
+ causal_mask.device
778
+ )
779
+ padding_mask = padding_mask == 0
780
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
781
+ padding_mask, min_dtype
782
+ )
783
+ return causal_mask
784
+
785
+
786
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
787
+
788
+
789
+ class Phi3ForCausalLM(Phi3PreTrainedModel, GenerationMixin):
790
+ _tied_weights_keys = ["lm_head.weight"]
791
+ _tp_plan = {"lm_head": "colwise_rep"}
792
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
793
+
794
+ def __init__(self, config):
795
+ super().__init__(config)
796
+ self.model = Phi3Model(config)
797
+ self.vocab_size = config.vocab_size
798
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
799
+
800
+ # Initialize weights and apply final processing
801
+ self.post_init()
802
+
803
+ def get_input_embeddings(self):
804
+ return self.model.embed_tokens
805
+
806
+ def set_input_embeddings(self, value):
807
+ self.model.embed_tokens = value
808
+
809
+ def get_output_embeddings(self):
810
+ return self.lm_head
811
+
812
+ def set_output_embeddings(self, new_embeddings):
813
+ self.lm_head = new_embeddings
814
+
815
+ def set_decoder(self, decoder):
816
+ self.model = decoder
817
+
818
+ def get_decoder(self):
819
+ return self.model
820
+
821
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
822
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
823
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
824
+ def forward(
825
+ self,
826
+ input_ids: torch.LongTensor = None,
827
+ attention_mask: Optional[torch.Tensor] = None,
828
+ position_ids: Optional[torch.LongTensor] = None,
829
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
830
+ inputs_embeds: Optional[torch.FloatTensor] = None,
831
+ labels: Optional[torch.LongTensor] = None,
832
+ use_cache: Optional[bool] = None,
833
+ output_attentions: Optional[bool] = None,
834
+ output_hidden_states: Optional[bool] = None,
835
+ return_dict: Optional[bool] = None,
836
+ cache_position: Optional[torch.LongTensor] = None,
837
+ logits_to_keep: Union[int, torch.Tensor] = 0,
838
+ **kwargs: Unpack[KwargsForCausalLM],
839
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
840
+ r"""
841
+ Args:
842
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
843
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
844
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
845
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
846
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
847
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
848
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
849
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
850
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
851
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
852
+ Returns:
853
+ Example:
854
+ ```python
855
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
856
+ >>> model = Phi3ForCausalLM.from_pretrained("meta-phi3/Phi3-2-7b-hf")
857
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-phi3/Phi3-2-7b-hf")
858
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
859
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
860
+ >>> # Generate
861
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
862
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
863
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
864
+ ```"""
865
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
866
+ output_hidden_states = (
867
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
868
+ )
869
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
870
+
871
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
872
+ outputs = self.model(
873
+ input_ids=input_ids,
874
+ attention_mask=attention_mask,
875
+ position_ids=position_ids,
876
+ past_key_values=past_key_values,
877
+ inputs_embeds=inputs_embeds,
878
+ use_cache=use_cache,
879
+ output_attentions=output_attentions,
880
+ output_hidden_states=output_hidden_states,
881
+ return_dict=return_dict,
882
+ cache_position=cache_position,
883
+ **kwargs,
884
+ )
885
+
886
+ hidden_states = outputs[0]
887
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
888
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
889
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
890
+
891
+ loss = None
892
+ if labels is not None:
893
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
894
+
895
+ if not return_dict:
896
+ output = (logits,) + outputs[1:]
897
+ return (loss,) + output if loss is not None else output
898
+
899
+ return CausalLMOutputWithPast(
900
+ loss=loss,
901
+ logits=logits,
902
+ past_key_values=outputs.past_key_values,
903
+ hidden_states=outputs.hidden_states,
904
+ attentions=outputs.attentions,
905
+ )
906
+
907
+ def prepare_inputs_for_generation(
908
+ self,
909
+ input_ids,
910
+ past_key_values=None,
911
+ attention_mask=None,
912
+ inputs_embeds=None,
913
+ cache_position=None,
914
+ position_ids=None,
915
+ use_cache=True,
916
+ logits_to_keep=None,
917
+ **kwargs,
918
+ ):
919
+ # Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
920
+ # process
921
+
922
+ # When the first time input length reached long and short factor switching point, enforce re-compute cache
923
+ # It will cause downside of slower at this single token position, however, better than current failure.
924
+ if (
925
+ past_key_values
926
+ and self.config.rope_scaling
927
+ and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
928
+ ):
929
+ past_length = cache_position[0]
930
+ if past_length <= self.config.original_max_position_embeddings:
931
+ past_key_values = None
932
+
933
+ model_inputs = super().prepare_inputs_for_generation(
934
+ input_ids=input_ids,
935
+ past_key_values=past_key_values,
936
+ attention_mask=attention_mask,
937
+ inputs_embeds=inputs_embeds,
938
+ cache_position=cache_position,
939
+ position_ids=position_ids,
940
+ use_cache=use_cache,
941
+ logits_to_keep=logits_to_keep,
942
+ **kwargs,
943
+ )
944
+ return model_inputs
945
+
946
+
947
+ @add_start_docstrings(
948
+ """
949
+ The Phi3 Model transformer with a sequence classification head on top (linear layer).
950
+ [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
951
+ (e.g. GPT-2) do.
952
+ Since it does classification on the last token, it requires to know the position of the last token. If a
953
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
954
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
955
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
956
+ each row of the batch).
957
+ """,
958
+ PHI3_START_DOCSTRING,
959
+ )
960
+ class Phi3ForSequenceClassification(Phi3PreTrainedModel):
961
+ def __init__(self, config):
962
+ super().__init__(config)
963
+ self.num_labels = config.num_labels
964
+ self.model = Phi3Model(config)
965
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
966
+
967
+ # Initialize weights and apply final processing
968
+ self.post_init()
969
+
970
+ def get_input_embeddings(self):
971
+ return self.model.embed_tokens
972
+
973
+ def set_input_embeddings(self, value):
974
+ self.model.embed_tokens = value
975
+
976
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
977
+ def forward(
978
+ self,
979
+ input_ids: Optional[torch.LongTensor] = None,
980
+ attention_mask: Optional[torch.Tensor] = None,
981
+ position_ids: Optional[torch.LongTensor] = None,
982
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
983
+ inputs_embeds: Optional[torch.FloatTensor] = None,
984
+ labels: Optional[torch.LongTensor] = None,
985
+ use_cache: Optional[bool] = None,
986
+ output_attentions: Optional[bool] = None,
987
+ output_hidden_states: Optional[bool] = None,
988
+ return_dict: Optional[bool] = None,
989
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
990
+ r"""
991
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
992
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
993
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
994
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
995
+ """
996
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
997
+
998
+ transformer_outputs = self.model(
999
+ input_ids,
1000
+ attention_mask=attention_mask,
1001
+ position_ids=position_ids,
1002
+ past_key_values=past_key_values,
1003
+ inputs_embeds=inputs_embeds,
1004
+ use_cache=use_cache,
1005
+ output_attentions=output_attentions,
1006
+ output_hidden_states=output_hidden_states,
1007
+ return_dict=return_dict,
1008
+ )
1009
+ hidden_states = transformer_outputs[0]
1010
+ logits = self.score(hidden_states)
1011
+
1012
+ if input_ids is not None:
1013
+ batch_size = input_ids.shape[0]
1014
+ else:
1015
+ batch_size = inputs_embeds.shape[0]
1016
+
1017
+ if self.config.pad_token_id is None and batch_size != 1:
1018
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1019
+ if self.config.pad_token_id is None:
1020
+ last_non_pad_token = -1
1021
+ elif input_ids is not None:
1022
+ # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
1023
+ non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
1024
+ token_indices = torch.arange(input_ids.shape[-1], device=logits.device)
1025
+ last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
1026
+ else:
1027
+ last_non_pad_token = -1
1028
+ logger.warning_once(
1029
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1030
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1031
+ )
1032
+
1033
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
1034
+
1035
+ loss = None
1036
+ if labels is not None:
1037
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
1038
+
1039
+ if not return_dict:
1040
+ output = (pooled_logits,) + transformer_outputs[1:]
1041
+ return ((loss,) + output) if loss is not None else output
1042
+
1043
+ return SequenceClassifierOutputWithPast(
1044
+ loss=loss,
1045
+ logits=pooled_logits,
1046
+ past_key_values=transformer_outputs.past_key_values,
1047
+ hidden_states=transformer_outputs.hidden_states,
1048
+ attentions=transformer_outputs.attentions,
1049
+ )
1050
+
1051
+
1052
+ @add_start_docstrings(
1053
+ """
1054
+ The Phi3 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1055
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1056
+ """,
1057
+ PHI3_START_DOCSTRING,
1058
+ )
1059
+ class Phi3ForTokenClassification(Phi3PreTrainedModel):
1060
+ def __init__(self, config):
1061
+ super().__init__(config)
1062
+ self.num_labels = config.num_labels
1063
+ self.model = Phi3Model(config)
1064
+ if getattr(config, "classifier_dropout", None) is not None:
1065
+ classifier_dropout = config.classifier_dropout
1066
+ elif getattr(config, "hidden_dropout", None) is not None:
1067
+ classifier_dropout = config.hidden_dropout
1068
+ else:
1069
+ classifier_dropout = 0.1
1070
+ self.dropout = nn.Dropout(classifier_dropout)
1071
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1072
+
1073
+ # Initialize weights and apply final processing
1074
+ self.post_init()
1075
+
1076
+ def get_input_embeddings(self):
1077
+ return self.model.embed_tokens
1078
+
1079
+ def set_input_embeddings(self, value):
1080
+ self.model.embed_tokens = value
1081
+
1082
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1083
+ @add_code_sample_docstrings(
1084
+ checkpoint=_CHECKPOINT_FOR_DOC,
1085
+ output_type=TokenClassifierOutput,
1086
+ config_class=_CONFIG_FOR_DOC,
1087
+ )
1088
+ def forward(
1089
+ self,
1090
+ input_ids: Optional[torch.LongTensor] = None,
1091
+ attention_mask: Optional[torch.Tensor] = None,
1092
+ position_ids: Optional[torch.LongTensor] = None,
1093
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1094
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1095
+ labels: Optional[torch.LongTensor] = None,
1096
+ use_cache: Optional[bool] = None,
1097
+ output_attentions: Optional[bool] = None,
1098
+ output_hidden_states: Optional[bool] = None,
1099
+ return_dict: Optional[bool] = None,
1100
+ ) -> Union[Tuple, TokenClassifierOutput]:
1101
+ r"""
1102
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1103
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1104
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1105
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1106
+ """
1107
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1108
+
1109
+ outputs = self.model(
1110
+ input_ids,
1111
+ attention_mask=attention_mask,
1112
+ position_ids=position_ids,
1113
+ past_key_values=past_key_values,
1114
+ inputs_embeds=inputs_embeds,
1115
+ use_cache=use_cache,
1116
+ output_attentions=output_attentions,
1117
+ output_hidden_states=output_hidden_states,
1118
+ return_dict=return_dict,
1119
+ )
1120
+ sequence_output = outputs[0]
1121
+ sequence_output = self.dropout(sequence_output)
1122
+ logits = self.score(sequence_output)
1123
+
1124
+ loss = None
1125
+ if labels is not None:
1126
+ loss = self.loss_function(logits, labels, self.config)
1127
+
1128
+ if not return_dict:
1129
+ output = (logits,) + outputs[2:]
1130
+ return ((loss,) + output) if loss is not None else output
1131
+
1132
+ return TokenClassifierOutput(
1133
+ loss=loss,
1134
+ logits=logits,
1135
+ hidden_states=outputs.hidden_states,
1136
+ attentions=outputs.attentions,
1137
+ )