freeCS-dot-org commited on
Commit
f6ea40a
·
verified ·
1 Parent(s): d66130e

Create modeling_phi.py

Browse files
Files changed (1) hide show
  1. modeling_phi.py +943 -0
modeling_phi.py ADDED
@@ -0,0 +1,943 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ #
4
+ # Copyright (c) 2022, Tri Dao, trid@cs.stanford.edu.
5
+ # Licensed under the BSD 3-Clause License.
6
+
7
+ from __future__ import annotations
8
+
9
+ import math
10
+ from dataclasses import dataclass, field
11
+ from typing import Any, Dict, Optional, Tuple, Union
12
+
13
+ import torch
14
+ import torch.nn as nn
15
+ from einops import rearrange, repeat
16
+ from transformers import PretrainedConfig, PreTrainedModel
17
+ from transformers.activations import ACT2FN
18
+ from transformers.modeling_outputs import CausalLMOutputWithPast
19
+
20
+ from .configuration_phi import PhiConfig
21
+
22
+ try:
23
+ from flash_attn.bert_padding import pad_input, unpad_input
24
+ from flash_attn.layers.rotary import RotaryEmbedding as FlashRotaryEmbedding
25
+ from flash_attn.modules.mha import FlashCrossAttention, FlashSelfAttention
26
+ from flash_attn.ops.fused_dense import FusedDense
27
+ except:
28
+ pad_input, unpad_input = None, None
29
+ FlashRotaryEmbedding = None
30
+ FlashSelfAttention, FlashCrossAttention = None, None
31
+ FusedDense = None
32
+
33
+
34
+ @dataclass
35
+ class InferenceParams:
36
+ """Inference parameters passed to model to efficiently calculate
37
+ and store context during inference.
38
+ Reference:
39
+ https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/utils/generation.py.
40
+ Args:
41
+ max_seqlen: Maximum sequence length.
42
+ max_batch_size: Maximum batch size.
43
+ seqlen_offset: Sequence length offset.
44
+ batch_size_offset: Batch size offset.
45
+ key_value_memory_dict: Key value memory dictionary.
46
+ lengths_per_sample: Lengths per sample.
47
+ """
48
+
49
+ max_seqlen: int = field(metadata={"help": "Maximum sequence length."})
50
+
51
+ max_batch_size: int = field(metadata={"help": "Maximum batch size."})
52
+
53
+ seqlen_offset: int = field(default=0, metadata={"help": "Sequence length offset."})
54
+
55
+ batch_size_offset: int = field(default=0, metadata={"help": "Batch size offset."})
56
+
57
+ key_value_memory_dict: Dict[str, Any] = field(
58
+ default_factory=dict, metadata={"help": "Key value memory dictionary."}
59
+ )
60
+
61
+ lengths_per_sample: torch.Tensor = field(default=None, metadata={"help": "Lengths per sample."})
62
+
63
+
64
+ class Embedding(nn.Module):
65
+ """Token embedding with dropout."""
66
+
67
+ def __init__(self, config: PretrainedConfig) -> None:
68
+ super().__init__()
69
+
70
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
71
+ self.drop = nn.Dropout(config.embd_pdrop)
72
+
73
+ def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor:
74
+ input_shape = input_ids.size()
75
+ input_ids = input_ids.view(-1, input_shape[-1])
76
+
77
+ hidden_states = self.wte(input_ids)
78
+ hidden_states = self.drop(hidden_states)
79
+
80
+ return hidden_states
81
+
82
+
83
+ def _apply_rotary_emb(
84
+ x: torch.FloatTensor,
85
+ cos: torch.FloatTensor,
86
+ sin: torch.FloatTensor,
87
+ ) -> torch.FloatTensor:
88
+ _, seqlen, _, _ = x.shape
89
+ _, rotary_dim = cos.shape
90
+ rotary_dim *= 2
91
+
92
+ x_rot = x[:, :, :, :rotary_dim]
93
+ x_pass = x[:, :, :, rotary_dim:]
94
+
95
+ x1, x2 = x_rot.chunk(2, dim=-1)
96
+ c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
97
+ x1, x2, c, s = [t.to(dtype=torch.float32) for t in [x1, x2, c, s]]
98
+
99
+ x_rot = torch.cat([x1 * c - x2 * s, x1 * s + x2 * c], axis=-1).to(x.dtype)
100
+
101
+ return torch.cat([x_rot, x_pass], axis=-1)
102
+
103
+
104
+ def _apply_rotary_emb_kv(
105
+ kv: torch.FloatTensor,
106
+ cos: torch.FloatTensor,
107
+ sin: torch.FloatTensor,
108
+ cos_k: Optional[torch.FloatTensor] = None,
109
+ sin_k: Optional[torch.FloatTensor] = None,
110
+ ) -> torch.FloatTensor:
111
+ _, seqlen, _, _, _ = kv.shape
112
+ _, rotary_dim = cos.shape
113
+ rotary_dim *= 2
114
+
115
+ k_rot = kv[:, :, 0, :, :rotary_dim]
116
+ k_pass = kv[:, :, 0, :, rotary_dim:]
117
+
118
+ k1, k2 = k_rot.chunk(2, dim=-1)
119
+ c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
120
+ k1, k2, c, s = [t.to(dtype=torch.float32) for t in [k1, k2, c, s]]
121
+
122
+ k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(kv.dtype)
123
+
124
+ return torch.cat(
125
+ [
126
+ torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2),
127
+ kv[:, :, 1:2, :, :],
128
+ ],
129
+ axis=2,
130
+ )
131
+
132
+
133
+ def _apply_rotary_emb_qkv(
134
+ qkv: torch.FloatTensor,
135
+ cos: torch.FloatTensor,
136
+ sin: torch.FloatTensor,
137
+ cos_k: Optional[torch.FloatTensor] = None,
138
+ sin_k: Optional[torch.FloatTensor] = None,
139
+ ) -> torch.FloatTensor:
140
+ _, seqlen, _, _, _ = qkv.shape
141
+ _, rotary_dim = cos.shape
142
+ rotary_dim *= 2
143
+
144
+ q_rot = qkv[:, :, 0, :, :rotary_dim]
145
+ q_pass = qkv[:, :, 0, :, rotary_dim:]
146
+
147
+ k_rot = qkv[:, :, 1, :, :rotary_dim]
148
+ k_pass = qkv[:, :, 1, :, rotary_dim:]
149
+
150
+ q1, q2 = q_rot.chunk(2, dim=-1)
151
+ k1, k2 = k_rot.chunk(2, dim=-1)
152
+ c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
153
+ q1, q2, k1, k2, c, s = [t.to(dtype=torch.float32) for t in [q1, q2, k1, k2, c, s]]
154
+
155
+ q_rot = torch.cat([q1 * c - q2 * s, q1 * s + q2 * c], axis=-1).to(qkv.dtype)
156
+ k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(qkv.dtype)
157
+
158
+ return torch.cat(
159
+ [
160
+ torch.cat([q_rot, q_pass], axis=-1).unsqueeze(2),
161
+ torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2),
162
+ qkv[:, :, 2:3, :, :],
163
+ ],
164
+ axis=2,
165
+ )
166
+
167
+
168
+ class RotaryEmbedding(nn.Module):
169
+ """Rotary positional embedding (RoPE).
170
+ Reference:
171
+ RoFormer: Enhanced Transformer with Rotary Position Embedding.
172
+ https://arxiv.org/pdf/2104.09864.pdf.
173
+ """
174
+
175
+ def __init__(
176
+ self,
177
+ dim: int,
178
+ base: int = 10000,
179
+ scale_base: Optional[float] = None,
180
+ pos_idx_in_fp32: bool = True,
181
+ max_position_embeddings: int = 2048,
182
+ device: Optional[str] = None,
183
+ **kwargs,
184
+ ) -> None:
185
+ super().__init__()
186
+
187
+ if scale_base is not None:
188
+ raise NotImplementedError
189
+
190
+ self.dim = dim
191
+ self.base = float(base)
192
+ self.scale_base = scale_base
193
+ self.pos_idx_in_fp32 = pos_idx_in_fp32
194
+ self.max_position_embeddings = max_position_embeddings
195
+ self.device = device
196
+
197
+ # Generate and save the inverse frequency buffer (non-trainable)
198
+ inv_freq = self._compute_inv_freq(device)
199
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
200
+
201
+ # Generate and save the scale buffer (non-trainable)
202
+ scale = (
203
+ (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)
204
+ if scale_base is not None
205
+ else None
206
+ )
207
+ self.register_buffer("scale", scale, persistent=False)
208
+
209
+ # Initialize cached attributes since ONNX can't rely on dynamic initialization
210
+ self._update_cos_sin_cache(max_position_embeddings, device=device, dtype=torch.float32)
211
+
212
+ def _compute_inv_freq(self, device: Optional[str] = None) -> torch.FloatTensor:
213
+ return 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim))
214
+
215
+ def _update_cos_sin_cache(
216
+ self,
217
+ seqlen: int,
218
+ device: Optional[str] = None,
219
+ dtype: Optional[torch.dtype] = None,
220
+ ) -> None:
221
+ self._seq_len_cached = seqlen
222
+
223
+ # fp32 is preferred since the output of `torch.arange` can be quite large
224
+ # and bf16 would lose a lot of precision
225
+ if self.pos_idx_in_fp32:
226
+ t = torch.arange(seqlen, device=device, dtype=torch.float32)
227
+ if self.inv_freq.dtype != torch.float32:
228
+ inv_freq = self._compute_inv_freq(device=device)
229
+ else:
230
+ inv_freq = self.inv_freq
231
+ else:
232
+ t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
233
+ inv_freq = self.inv_freq
234
+
235
+ # `torch.outer` is preferred since `torch.einsum` converts from fp32 to fp16 if used with AMP
236
+ freqs = torch.outer(t, inv_freq)
237
+ if self.scale is None:
238
+ self._cos_cached = torch.cos(freqs).to(dtype)
239
+ self._sin_cached = torch.sin(freqs).to(dtype)
240
+ else:
241
+ power = (
242
+ torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device) - seqlen // 2
243
+ ) / self.scale_base
244
+ scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
245
+
246
+ # Force the scale multiplication to happen in fp32
247
+ self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
248
+ self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
249
+ self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
250
+ self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
251
+
252
+ def forward(
253
+ self,
254
+ qkv: torch.Tensor,
255
+ kv: Optional[torch.Tensor] = None,
256
+ seqlen_offset: int = 0,
257
+ **kwargs,
258
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
259
+ if (
260
+ self._seq_len_cached < qkv.shape[1] + seqlen_offset
261
+ or self._cos_cached.device != qkv.device
262
+ or self._cos_cached.dtype != qkv.dtype
263
+ or (self.training and self._cos_cached.is_inference())
264
+ ):
265
+ self._update_cos_sin_cache(qkv.shape[1] + seqlen_offset, device=qkv.device, dtype=qkv.dtype)
266
+
267
+ if kv is None:
268
+ return _apply_rotary_emb_qkv(
269
+ qkv,
270
+ self._cos_cached[seqlen_offset:],
271
+ self._sin_cached[seqlen_offset:],
272
+ )
273
+ else:
274
+ q = _apply_rotary_emb(
275
+ qkv,
276
+ self._cos_cached[seqlen_offset:],
277
+ self._sin_cached[seqlen_offset:],
278
+ )
279
+ kv = _apply_rotary_emb_kv(
280
+ kv,
281
+ self._cos_cached[seqlen_offset:],
282
+ self._sin_cached[seqlen_offset:],
283
+ )
284
+
285
+ return q, kv
286
+
287
+
288
+ class MLP(nn.Module):
289
+ """Multi-Layer Perceptron.
290
+ Reference:
291
+ Attention Is All You Need.
292
+ https://arxiv.org/pdf/1706.03762.pdf.
293
+ """
294
+
295
+ def __init__(
296
+ self,
297
+ config: PretrainedConfig,
298
+ n_inner: Optional[int] = None,
299
+ act_fn: Optional[str] = None,
300
+ ) -> None:
301
+ super().__init__()
302
+
303
+ act_fn = config.activation_function if act_fn is None else act_fn
304
+
305
+ n_inner = getattr(config, "n_inner", None) if n_inner is None else n_inner
306
+ n_inner = n_inner if n_inner is not None else 4 * config.n_embd
307
+
308
+ self.fc1 = nn.Linear(config.n_embd, n_inner)
309
+ self.fc2 = nn.Linear(n_inner, config.n_embd)
310
+ self.act = ACT2FN[act_fn]
311
+
312
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
313
+ hidden_states = self.fc1(hidden_states)
314
+ hidden_states = self.act(hidden_states)
315
+ hidden_states = self.fc2(hidden_states)
316
+
317
+ return hidden_states
318
+
319
+
320
+ class SelfAttention(nn.Module):
321
+ """Self-attention layer (compatible with PyTorch).
322
+ Reference:
323
+ https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py.
324
+ """
325
+
326
+ def __init__(
327
+ self,
328
+ causal: bool = True,
329
+ softmax_scale: Optional[float] = None,
330
+ attention_dropout: float = 0.0,
331
+ ) -> None:
332
+ super().__init__()
333
+
334
+ self.causal = causal
335
+ self.softmax_scale = softmax_scale
336
+ self.drop = nn.Dropout(attention_dropout)
337
+
338
+ @torch.autocast("cpu", enabled=False)
339
+ @torch.autocast("cuda", enabled=False)
340
+ def forward(
341
+ self,
342
+ qkv: torch.FloatTensor,
343
+ causal: bool = None,
344
+ key_padding_mask: Optional[torch.BoolTensor] = None,
345
+ **kwargs,
346
+ ) -> torch.FloatTensor:
347
+ batch_size, seqlen = qkv.shape[0], qkv.shape[1]
348
+ q, k, v = qkv.unbind(dim=2)
349
+
350
+ q = q.to(torch.float32)
351
+ k = k.to(torch.float32)
352
+
353
+ causal = self.causal if causal is None else causal
354
+ softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
355
+
356
+ # Autocast is manually disabled to avoid `torch.einsum` performing the operation
357
+ # using float16, which might lead to overflow
358
+ scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
359
+
360
+ if key_padding_mask is not None:
361
+ padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device)
362
+ padding_mask.masked_fill_(key_padding_mask, 0.0)
363
+
364
+ scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
365
+
366
+ if causal:
367
+ causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
368
+ scores = scores + causal_mask.to(dtype=scores.dtype)
369
+
370
+ attention = torch.softmax(scores, dim=-1).to(v.dtype)
371
+ attention = self.drop(attention)
372
+
373
+ output = torch.einsum("bhts,bshd->bthd", attention, v)
374
+
375
+ return output
376
+
377
+
378
+ class CrossAttention(nn.Module):
379
+ """Cross-attention layer (compatible with PyTorch).
380
+ Reference:
381
+ https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py.
382
+ """
383
+
384
+ def __init__(
385
+ self,
386
+ causal: bool = True,
387
+ softmax_scale: Optional[float] = None,
388
+ attention_dropout: float = 0.0,
389
+ ) -> None:
390
+ super().__init__()
391
+
392
+ self.causal = causal
393
+ self.softmax_scale = softmax_scale
394
+ self.drop = nn.Dropout(attention_dropout)
395
+
396
+ @torch.autocast("cpu", enabled=False)
397
+ @torch.autocast("cuda", enabled=False)
398
+ def forward(
399
+ self,
400
+ q: torch.FloatTensor,
401
+ kv: torch.FloatTensor,
402
+ causal: bool = None,
403
+ key_padding_mask: Optional[torch.BoolTensor] = None,
404
+ **kwargs,
405
+ ) -> torch.FloatTensor:
406
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
407
+ seqlen_k = kv.shape[1]
408
+
409
+ if kv.shape[3] != q.shape[2]:
410
+ kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3])
411
+ k, v = kv.unbind(dim=2)
412
+
413
+ q = q.to(torch.float32)
414
+ k = k.to(torch.float32)
415
+
416
+ causal = self.causal if causal is None else causal
417
+ softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
418
+
419
+ # Autocast is manually disabled to avoid `torch.einsum` performing the operation
420
+ # using float16, which might lead to overflow
421
+ scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
422
+
423
+ if key_padding_mask is not None:
424
+ padding_mask = torch.full(
425
+ (batch_size, seqlen_k),
426
+ -10000.0,
427
+ dtype=scores.dtype,
428
+ device=scores.device,
429
+ )
430
+ padding_mask.masked_fill_(key_padding_mask, 0.0)
431
+
432
+ scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
433
+
434
+ if causal:
435
+ rows = rearrange(torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1")
436
+ cols = torch.arange(seqlen_k, device=k.device, dtype=torch.long)
437
+ causal_mask = cols > rows + seqlen_k - seqlen_q
438
+
439
+ scores = scores.masked_fill(causal_mask, -10000.0)
440
+
441
+ attention = torch.softmax(scores, dim=-1).to(v.dtype)
442
+ attention = self.drop(attention)
443
+
444
+ output = torch.einsum("bhts,bshd->bthd", attention, v)
445
+
446
+ return output
447
+
448
+
449
+ def _find_mha_dims(
450
+ config: PretrainedConfig,
451
+ n_head: Optional[int] = None,
452
+ n_head_kv: Optional[int] = None,
453
+ head_dim: Optional[int] = None,
454
+ ) -> Tuple[int, int]:
455
+ if n_head is None and head_dim is None:
456
+ head_dim = config.n_embd // config.n_head
457
+ n_head = config.n_head
458
+ elif n_head is None or head_dim is None:
459
+ raise ValueError("`n_head` and `head_dim` must be both specified or `None`.")
460
+
461
+ if n_head_kv is None:
462
+ n_head_kv = getattr(config, "n_head_kv", None) or n_head
463
+
464
+ return n_head, n_head_kv, head_dim
465
+
466
+
467
+ def _update_kv_cache(kv: torch.FloatTensor, inference_params: InferenceParams, layer_idx: int) -> torch.FloatTensor:
468
+ num_heads, head_dim = kv.shape[-2:]
469
+
470
+ if layer_idx not in inference_params.key_value_memory_dict:
471
+ inference_params.key_value_memory_dict[layer_idx] = torch.empty(
472
+ inference_params.max_batch_size,
473
+ inference_params.max_seqlen,
474
+ 2,
475
+ num_heads,
476
+ head_dim,
477
+ dtype=kv.dtype,
478
+ device=kv.device,
479
+ )
480
+
481
+ batch_start = inference_params.batch_size_offset
482
+ batch_end = batch_start + kv.shape[0]
483
+
484
+ sequence_start = inference_params.seqlen_offset
485
+ sequence_end = sequence_start + kv.shape[1]
486
+
487
+ # When the current sequence length is equal to or larger than the maximum sequence length,
488
+ # we need to concatenate the current `kv` with the cached `kv` to expand its length
489
+ if sequence_end >= inference_params.max_seqlen:
490
+ inference_params.key_value_memory_dict[layer_idx] = torch.concatenate((inference_params.key_value_memory_dict[layer_idx], kv), dim=1)
491
+
492
+ inference_params.key_value_memory_dict[layer_idx][batch_start:batch_end, sequence_start:sequence_end, ...] = kv
493
+ kv = inference_params.key_value_memory_dict[layer_idx][batch_start:batch_end, :sequence_end, ...]
494
+
495
+ return kv
496
+
497
+
498
+ class MHA(nn.Module):
499
+ """Multi-head attention layer."""
500
+
501
+ def __init__(
502
+ self,
503
+ config: PretrainedConfig,
504
+ dtype: Optional[torch.dtype] = None,
505
+ device: Optional[str] = None,
506
+ rotary_dim: Optional[int] = None,
507
+ rotary_base: float = 10000.0,
508
+ rotary_scale_base: Optional[float] = None,
509
+ n_head: Optional[int] = None,
510
+ n_head_kv: Optional[int] = None,
511
+ head_dim: Optional[int] = None,
512
+ bias: bool = True,
513
+ causal: bool = True,
514
+ softmax_scale: Optional[float] = None,
515
+ layer_idx: Optional[int] = None,
516
+ return_residual: bool = False,
517
+ checkpointing: bool = False,
518
+ ) -> None:
519
+ super().__init__()
520
+
521
+ # Rotary embedding
522
+ self.rotary_dim = rotary_dim if rotary_dim is not None else getattr(config, "rotary_dim", 0)
523
+ if self.rotary_dim > 0:
524
+ rotary_cls = FlashRotaryEmbedding if config.flash_rotary else RotaryEmbedding
525
+ if rotary_cls is None:
526
+ rotary_cls = RotaryEmbedding
527
+
528
+ rotary_kwargs = {}
529
+ if rotary_cls is RotaryEmbedding:
530
+ rotary_kwargs["max_position_embeddings"] = config.n_positions
531
+
532
+ self.rotary_emb = rotary_cls(
533
+ self.rotary_dim,
534
+ base=rotary_base,
535
+ scale_base=rotary_scale_base,
536
+ device=device,
537
+ **rotary_kwargs,
538
+ )
539
+
540
+ # MLP
541
+ self.n_head, self.n_head_kv, self.head_dim = _find_mha_dims(
542
+ config, n_head=n_head, n_head_kv=n_head_kv, head_dim=head_dim
543
+ )
544
+ op_size = self.head_dim * (self.n_head + 2 * self.n_head_kv)
545
+ hidden_size = config.n_embd
546
+
547
+ linear_cls = FusedDense if config.fused_dense else nn.Linear
548
+ if linear_cls is None:
549
+ linear_cls = nn.Linear
550
+
551
+ self.Wqkv = linear_cls(hidden_size, op_size, bias=bias, device=device, dtype=dtype)
552
+ self.out_proj = linear_cls(hidden_size, hidden_size, bias=bias, device=device, dtype=dtype)
553
+
554
+ # Attention
555
+ attn_cls = FlashSelfAttention if config.flash_attn else SelfAttention
556
+ if attn_cls is None:
557
+ attn_cls = SelfAttention
558
+
559
+ cross_attn_cls = FlashCrossAttention if config.flash_attn else CrossAttention
560
+ if cross_attn_cls is None:
561
+ cross_attn_cls = CrossAttention
562
+
563
+ self.inner_attn = attn_cls(
564
+ causal=causal,
565
+ softmax_scale=softmax_scale,
566
+ attention_dropout=config.attn_pdrop,
567
+ )
568
+ self.inner_cross_attn = cross_attn_cls(
569
+ causal=causal,
570
+ softmax_scale=softmax_scale,
571
+ attention_dropout=config.attn_pdrop,
572
+ )
573
+
574
+ self.flash_attn = config.flash_attn and attn_cls is FlashSelfAttention
575
+ self.layer_idx = layer_idx
576
+ self.return_residual = return_residual
577
+ self.checkpointing = checkpointing
578
+
579
+ def _forward_self_attn(
580
+ self, x: torch.FloatTensor, key_padding_mask: Optional[torch.BoolTensor]
581
+ ) -> torch.FloatTensor:
582
+ qkv = self.Wqkv(x)
583
+ qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim)
584
+
585
+ if self.rotary_dim > 0:
586
+ qkv = self.rotary_emb(qkv)
587
+
588
+ if self.flash_attn:
589
+ batch_size, seqlen = qkv.shape[0], qkv.shape[1]
590
+
591
+ cu_seqlens, max_seqlen = None, None
592
+ if key_padding_mask is not None:
593
+ # If `key_padding_mask` is supplied, we need to unpad the input and retrieve
594
+ # the `cu_seqlens` and `max_seqlen` to be used by `flash-attn`
595
+ qkv, indices, cu_seqlens, max_seqlen = unpad_input(qkv, key_padding_mask)
596
+
597
+ if self.checkpointing:
598
+ attn_output = torch.utils.checkpoint.checkpoint(
599
+ self.inner_attn, qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen
600
+ )
601
+ else:
602
+ attn_output = self.inner_attn(qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen).to(qkv.device)
603
+
604
+ # If `key_padding_mask` is supplied, we need to pad the output back to the original shape
605
+ return pad_input(attn_output, indices, batch_size, seqlen) if key_padding_mask is not None else attn_output
606
+
607
+ if self.checkpointing:
608
+ return torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, key_padding_mask=key_padding_mask)
609
+
610
+ return self.inner_attn(qkv, key_padding_mask=key_padding_mask)
611
+
612
+ def _forward_cross_attn(
613
+ self,
614
+ x: torch.FloatTensor,
615
+ past_key_values: Optional[InferenceParams],
616
+ key_padding_mask: Optional[torch.BoolTensor],
617
+ ) -> torch.FloatTensor:
618
+ batch_size = x.shape[0]
619
+
620
+ qkv = self.Wqkv(x)
621
+
622
+ q = qkv[..., : self.n_head * self.head_dim]
623
+ q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim)
624
+
625
+ kv = qkv[..., self.n_head * self.head_dim :]
626
+ kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim)
627
+
628
+ seqlen_offset = past_key_values.seqlen_offset if past_key_values is not None else 0
629
+ causal = None if seqlen_offset == 0 else False
630
+ if self.rotary_dim > 0:
631
+ q, kv = self.rotary_emb(q, kv=kv, seqlen_offset=seqlen_offset)
632
+
633
+ if past_key_values is not None:
634
+ kv = _update_kv_cache(kv, past_key_values, self.layer_idx)
635
+
636
+ if self.flash_attn:
637
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
638
+ seqlen_k = kv.shape[1]
639
+
640
+ cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k = (
641
+ None,
642
+ None,
643
+ None,
644
+ None,
645
+ )
646
+ if key_padding_mask is not None:
647
+ kv, _, cu_seqlens_k, max_seqlen_k = unpad_input(kv, key_padding_mask)
648
+
649
+ if seqlen_q == 1:
650
+ key_padding_mask = torch.ones(batch_size, 1, device=q.device)
651
+ elif seqlen_q != seqlen_k:
652
+ key_padding_mask = key_padding_mask[:, -seqlen_q:]
653
+
654
+ q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, key_padding_mask)
655
+
656
+ if self.checkpointing:
657
+ attn_output = torch.utils.checkpoint.checkpoint(
658
+ self.inner_cross_attn,
659
+ q,
660
+ kv,
661
+ causal=causal,
662
+ cu_seqlens=cu_seqlens_q,
663
+ max_seqlen=max_seqlen_q,
664
+ cu_seqlens_k=cu_seqlens_k,
665
+ max_seqlen_k=max_seqlen_k,
666
+ )
667
+ else:
668
+ attn_output = self.inner_cross_attn(
669
+ q,
670
+ kv,
671
+ causal=causal,
672
+ cu_seqlens=cu_seqlens_q,
673
+ max_seqlen=max_seqlen_q,
674
+ cu_seqlens_k=cu_seqlens_k,
675
+ max_seqlen_k=max_seqlen_k,
676
+ )
677
+
678
+ return (
679
+ pad_input(attn_output, indices_q, batch_size, max_seqlen_q)
680
+ if key_padding_mask is not None
681
+ else attn_output
682
+ )
683
+
684
+ if self.checkpointing:
685
+ return torch.utils.checkpoint.checkpoint(
686
+ self.inner_cross_attn,
687
+ q,
688
+ kv,
689
+ key_padding_mask=key_padding_mask,
690
+ causal=causal,
691
+ )
692
+
693
+ return self.inner_cross_attn(q, kv, key_padding_mask=key_padding_mask, causal=causal)
694
+
695
+ def forward(
696
+ self,
697
+ x: torch.FloatTensor,
698
+ past_key_values: Optional[InferenceParams] = None,
699
+ attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None,
700
+ **kwargs,
701
+ ) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
702
+ if attention_mask is not None:
703
+ attention_mask = attention_mask.bool()
704
+ else:
705
+ attention_mask = None
706
+
707
+ # MHA
708
+ if self.n_head == self.n_head_kv:
709
+ if past_key_values is None:
710
+ # If `past_key_values` are not supplied, we run self-attention
711
+ attn_output = self._forward_self_attn(x, attention_mask)
712
+ else:
713
+ # If `past_key_values` are supplied, it means that we might have cached values and
714
+ # could take advantage of cross-attention
715
+ attn_output = self._forward_cross_attn(x, past_key_values, attention_mask)
716
+ # MQA / GQA
717
+ else:
718
+ # Regardless of `past_key_values` being supplied or not, it always use cross-attention
719
+ # because `q` and `kv` lengths might be different
720
+ attn_output = self._forward_cross_attn(x, past_key_values, attention_mask)
721
+
722
+ output = rearrange(attn_output, "... h d -> ... (h d)")
723
+ output = self.out_proj(output)
724
+
725
+ return output if not self.return_residual else (output, x)
726
+
727
+
728
+ class ParallelBlock(nn.Module):
729
+ """Parallel block.
730
+ This block applies parallel mixer and MLP layers to the input (used in GPT-J and CodeGen).
731
+ """
732
+
733
+ def __init__(
734
+ self,
735
+ config: PretrainedConfig,
736
+ block_idx: Optional[int] = None,
737
+ ) -> None:
738
+ super().__init__()
739
+
740
+ self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
741
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
742
+ self.block_idx = block_idx
743
+
744
+ self.mixer = MHA(config, layer_idx=block_idx)
745
+ self.mlp = MLP(config)
746
+
747
+ def forward(
748
+ self,
749
+ hidden_states: torch.FloatTensor,
750
+ past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
751
+ attention_mask: Optional[torch.BoolTensor] = None,
752
+ **kwargs,
753
+ ) -> torch.FloatTensor:
754
+ residual = hidden_states
755
+ hidden_states = self.ln(hidden_states)
756
+
757
+ attn_outputs = self.mixer(
758
+ hidden_states,
759
+ past_key_values=past_key_values,
760
+ attention_mask=attention_mask,
761
+ )
762
+ if isinstance(attn_outputs, tuple):
763
+ attn_outputs = attn_outputs[0]
764
+
765
+ attn_outputs = self.resid_dropout(attn_outputs)
766
+ feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states))
767
+
768
+ hidden_states = attn_outputs + feed_forward_hidden_states + residual
769
+
770
+ return hidden_states
771
+
772
+
773
+ class CausalLMHead(nn.Module):
774
+ """Causal Language Modeling head.
775
+ Reference:
776
+ Improving Language Understanding by Generative Pre-Training.
777
+ https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf.
778
+ """
779
+
780
+ def __init__(self, config: PretrainedConfig) -> None:
781
+ super().__init__()
782
+
783
+ self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
784
+ self.linear = nn.Linear(config.n_embd, config.vocab_size)
785
+
786
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
787
+ hidden_states = self.ln(hidden_states)
788
+ logits = self.linear(hidden_states).to(torch.float32)
789
+
790
+ return logits
791
+
792
+
793
+ class CausalLMLoss(nn.Module):
794
+ """Causal Language Modeling loss.
795
+ Reference:
796
+ Improving Language Understanding by Generative Pre-Training.
797
+ https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf.
798
+ """
799
+
800
+ def __init__(self, shift_labels: bool = True) -> None:
801
+ super().__init__()
802
+
803
+ self.shift_labels = shift_labels
804
+ self.loss_fct = nn.CrossEntropyLoss()
805
+
806
+ def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor) -> torch.FloatTensor:
807
+ if self.shift_labels:
808
+ logits = logits[..., :-1, :].contiguous()
809
+ labels = labels[..., 1:].contiguous()
810
+
811
+ loss = self.loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
812
+
813
+ return loss
814
+
815
+
816
+ class PhiPreTrainedModel(PreTrainedModel):
817
+ """Phi pre-trained model."""
818
+
819
+ config_class = PhiConfig
820
+ base_model_prefix = "transformer"
821
+ supports_gradient_checkpointing = False
822
+ _no_split_modules = ["ParallelBlock"]
823
+
824
+ def __init__(self, *inputs, **kwargs) -> None:
825
+ super().__init__(*inputs, **kwargs)
826
+
827
+ def _init_weights(self, module: nn.Module) -> None:
828
+ if isinstance(module, (nn.Linear,)):
829
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
830
+ if module.bias is not None:
831
+ module.bias.data.zero_()
832
+ elif isinstance(module, nn.Embedding):
833
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
834
+ if module.padding_idx is not None:
835
+ module.weight.data[module.padding_idx].zero_()
836
+ elif isinstance(module, nn.LayerNorm):
837
+ if module.bias is not None:
838
+ module.bias.data.zero_()
839
+ module.weight.data.fill_(1.0)
840
+
841
+ def prepare_inputs_for_generation(
842
+ self,
843
+ input_ids: torch.LongTensor,
844
+ past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
845
+ attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None,
846
+ **kwargs,
847
+ ) -> Dict[str, Any]:
848
+ if past_key_values is None or not (isinstance(past_key_values, InferenceParams)):
849
+ past_key_values = InferenceParams(
850
+ max_seqlen=self.config.n_positions,
851
+ max_batch_size=input_ids.shape[0],
852
+ seqlen_offset=0,
853
+ batch_size_offset=0,
854
+ key_value_memory_dict={},
855
+ lengths_per_sample=None,
856
+ )
857
+ else:
858
+ # Assume that `past_key_values` has cached all tokens up to the last token in `input_ids`
859
+ past_key_values.seqlen_offset = input_ids.shape[1] - 1
860
+ input_ids = input_ids[:, -1].unsqueeze(-1)
861
+
862
+ return {
863
+ "input_ids": input_ids,
864
+ "past_key_values": past_key_values,
865
+ "attention_mask": attention_mask,
866
+ }
867
+
868
+
869
+ class PhiModel(PhiPreTrainedModel):
870
+ """Phi model."""
871
+
872
+ _keys_to_ignore_on_load_missing = [""]
873
+ _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"]
874
+
875
+ def __init__(self, config: PhiConfig) -> None:
876
+ super().__init__(config)
877
+
878
+ self.embd = Embedding(config)
879
+ self.h = nn.ModuleList([ParallelBlock(config, block_idx=i) for i in range(config.n_layer)])
880
+ self.gradient_checkpointing = False
881
+ self.post_init()
882
+
883
+ def get_input_embeddings(self) -> nn.Embedding:
884
+ return self.embd.wte
885
+
886
+ def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
887
+ self.embd.wte = new_embeddings
888
+
889
+ def forward(
890
+ self,
891
+ input_ids: torch.LongTensor,
892
+ past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
893
+ attention_mask: Optional[torch.BoolTensor] = None,
894
+ ) -> torch.FloatTensor:
895
+ hidden_states = self.embd(input_ids)
896
+
897
+ for layer in self.h:
898
+ hidden_states = layer(
899
+ hidden_states,
900
+ past_key_values=past_key_values,
901
+ attention_mask=attention_mask,
902
+ )
903
+
904
+ return hidden_states
905
+
906
+
907
+ class PhiForCausalLM(PhiPreTrainedModel):
908
+ """Phi for Causal Language Modeling."""
909
+
910
+ _keys_to_ignore_on_load_missing = [""]
911
+ _keys_to_ignore_on_load_unexpected = [r"transformer\.h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"]
912
+
913
+ def __init__(self, config: PhiConfig) -> None:
914
+ super().__init__(config)
915
+
916
+ self.transformer = PhiModel(config)
917
+ self.lm_head = CausalLMHead(config)
918
+ self.loss = CausalLMLoss()
919
+
920
+ self.post_init()
921
+
922
+ def get_output_embeddings(self) -> nn.Linear:
923
+ return self.lm_head.linear
924
+
925
+ def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
926
+ self.lm_head.linear = new_embeddings
927
+
928
+ def forward(
929
+ self,
930
+ input_ids: torch.LongTensor,
931
+ past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
932
+ attention_mask: Optional[torch.BoolTensor] = None,
933
+ labels: Optional[torch.LongTensor] = None,
934
+ **kwargs,
935
+ ) -> CausalLMOutputWithPast:
936
+ hidden_states = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask)
937
+ lm_logits = self.lm_head(hidden_states)
938
+
939
+ loss = None
940
+ if labels is not None:
941
+ loss = self.loss(lm_logits, labels)
942
+
943
+ return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=past_key_values)