Upload sr_tp_modeling.py

#2
by petil777 - opened
Files changed (1) hide show
  1. sr_tp_modeling.py +314 -0
sr_tp_modeling.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch SRV1 model."""
2
+ import sys
3
+ import os
4
+ from os import path
5
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
6
+ print(sys.path)
7
+ import math
8
+ from typing import List, Optional, Tuple, Union
9
+
10
+ import torch
11
+ import torch.utils.checkpoint
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss
14
+ from transformers.activations import ACT2FN
15
+ from transformers import AutoTokenizer, AutoConfig
16
+ from .configuration_srv1 import SRV1Config
17
+
18
+ from transformers.modeling_outputs import (
19
+ BaseModelOutputWithPast,
20
+ CausalLMOutputWithPast,
21
+ )
22
+ from transformers.modeling_utils import PreTrainedModel
23
+ from transformers.utils import (
24
+ add_start_docstrings,
25
+ add_start_docstrings_to_model_forward,
26
+ logging,
27
+ replace_return_docstrings,
28
+ )
29
+
30
+ from .layers import (
31
+ TensorParallelColumnLinear,
32
+ TensorParallelEmbedding,
33
+ TensorParallelHead,
34
+ TensorParallelRowLinear,
35
+ load_layer_norm_no_bias,
36
+ )
37
+ from .dist import initialize_torch_distributed
38
+ from .weights import Weights
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CONFIG_FOR_DOC = SRV1Config
43
+
44
+
45
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
46
+ def _make_causal_mask(
47
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
48
+ ):
49
+ """
50
+ Make causal mask used for bi-directional self-attention.
51
+ """
52
+ bsz, tgt_len = input_ids_shape
53
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
54
+ mask_cond = torch.arange(mask.size(-1), device=device)
55
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
56
+ mask = mask.to(dtype)
57
+
58
+ if past_key_values_length > 0:
59
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
60
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
61
+
62
+
63
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
64
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
65
+ """
66
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
67
+ """
68
+ bsz, src_len = mask.size()
69
+ tgt_len = tgt_len if tgt_len is not None else src_len
70
+
71
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
72
+
73
+ inverted_mask = 1.0 - expanded_mask
74
+
75
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
76
+
77
+
78
+ class SRV1RMSNorm(nn.Module):
79
+ def __init__(self, hidden_size, eps=1e-6):
80
+ """
81
+ SRV1RMSNorm is equivalent to T5LayerNorm
82
+ """
83
+ super().__init__()
84
+ self.weight = nn.Parameter(torch.ones(hidden_size))
85
+ self.variance_epsilon = eps
86
+
87
+ def forward(self, hidden_states):
88
+ input_dtype = hidden_states.dtype
89
+ hidden_states = hidden_states.to(torch.float32)
90
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
91
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
92
+ return self.weight * hidden_states.to(input_dtype)
93
+
94
+
95
+ SRV1RMSNorm.load_no_bias = load_layer_norm_no_bias
96
+
97
+
98
+ class SRV1RotaryEmbedding(torch.nn.Module):
99
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
100
+ super().__init__()
101
+
102
+ self.dim = dim
103
+ self.max_position_embeddings = max_position_embeddings
104
+ self.base = base
105
+ self.inv_freq = self._create_inv_freq(dim=dim, base=base, device=device)
106
+
107
+ # Build here to make `torch.jit.trace` work.
108
+ self._set_cos_sin_cache(
109
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
110
+ )
111
+
112
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
113
+ self.max_seq_len_cached = seq_len
114
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
115
+
116
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
117
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
118
+ emb = torch.cat((freqs, freqs), dim=-1)
119
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
120
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
121
+
122
+ def forward(self, x, seq_len=None):
123
+ # x: [bs, num_attention_heads, seq_len, head_size]
124
+ if seq_len > self.max_seq_len_cached:
125
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
126
+
127
+ return (
128
+ self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
129
+ self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
130
+ )
131
+
132
+ def _create_inv_freq(self, dim, base, device):
133
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim))
134
+ return inv_freq
135
+
136
+ class SRV1RotaryEmbedding(SRV1RotaryEmbedding):
137
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
138
+ self.scaling_factor = scaling_factor
139
+ super().__init__(dim, max_position_embeddings, base, device)
140
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
141
+ self.max_seq_len_cached = seq_len
142
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
143
+ t = t / self.scaling_factor
144
+
145
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
146
+
147
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
148
+ emb = torch.cat((freqs, freqs), dim=-1)
149
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
150
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
151
+
152
+ def rotate_half(x):
153
+ """Rotates half the hidden dims of the input."""
154
+ x1 = x[..., : x.shape[-1] // 2]
155
+ x2 = x[..., x.shape[-1] // 2 :]
156
+ return torch.cat((-x2, x1), dim=-1)
157
+
158
+
159
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
160
+ # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
161
+ cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
162
+ sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
163
+ cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
164
+ sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
165
+ q_embed = (q * cos) + (rotate_half(q) * sin)
166
+ k_embed = (k * cos) + (rotate_half(k) * sin)
167
+ return q_embed, k_embed
168
+
169
+
170
+ class SRV1MLP(nn.Module):
171
+ def __init__(self, prefix, config: SRV1Config, weigths):
172
+ super().__init__()
173
+ self.gate_proj = TensorParallelColumnLinear.load(
174
+ config=config, prefix=f"{prefix}.gate_proj", weights=weigths, bias=False
175
+ )
176
+ self.up_proj = TensorParallelColumnLinear.load(
177
+ config=config, prefix=f"{prefix}.up_proj", weights=weigths, bias=False
178
+ )
179
+ self.down_proj = TensorParallelRowLinear.load(
180
+ config=config, prefix=f"{prefix}.down_proj", weights=weigths, bias=False
181
+ )
182
+ self.act_fn = ACT2FN[config.hidden_act]
183
+
184
+ def forward(self, x):
185
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
186
+ return down_proj
187
+
188
+
189
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
190
+ """
191
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
192
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
193
+ """
194
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
195
+ if n_rep == 1:
196
+ return hidden_states
197
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
198
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
199
+
200
+
201
+ class SRV1Attention(nn.Module):
202
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
203
+
204
+ def __init__(self, prefix, config: SRV1Config, weights):
205
+ super().__init__()
206
+ self.config = config
207
+ self.hidden_size = config.hidden_size
208
+ self.num_heads = config.num_attention_heads
209
+ self.head_dim = self.hidden_size // self.num_heads
210
+ self.num_key_value_heads = config.num_key_value_heads
211
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
212
+ self.max_position_embeddings = config.max_position_embeddings
213
+ self.rope_theta = getattr(config, "rope_theta", 10000)
214
+
215
+ if (self.head_dim * self.num_heads) != self.hidden_size:
216
+ raise ValueError(
217
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
218
+ f" and `num_heads`: {self.num_heads})."
219
+ )
220
+
221
+ # for 1d tensor model parallel
222
+ process_group = weights.process_group
223
+ self.hidden_size = self.hidden_size // process_group.size()
224
+ self.num_heads = self.num_heads // process_group.size()
225
+ self.num_key_value_heads = self.num_key_value_heads // process_group.size()
226
+
227
+ self.q_proj = TensorParallelColumnLinear.load(config, prefix=f"{prefix}.q_proj", weights=weights, bias=False)
228
+ self.k_proj = TensorParallelColumnLinear.load(config, prefix=f"{prefix}.k_proj", weights=weights, bias=False)
229
+ self.v_proj = TensorParallelColumnLinear.load(config, prefix=f"{prefix}.v_proj", weights=weights, bias=False)
230
+ self.o_proj = TensorParallelRowLinear.load(config, prefix=f"{prefix}.o_proj", weights=weights, bias=False)
231
+ if self.config.rope_scaling is not None and self.config.rope_scaling['type'] == "linear":
232
+ # Note, Not to use weights.device since rope should be calc on device cpu
233
+ # have to model.to(cur_rank) !!!
234
+ self.rotary_emb = SRV1RotaryEmbedding(
235
+ self.head_dim, self.max_position_embeddings, base=self.rope_theta, scaling_factor=self.config.rope_scaling['factor']
236
+ )
237
+ else:
238
+ self.rotary_emb = SRV1RotaryEmbedding(
239
+ self.head_dim, self.max_position_embeddings, base=self.rope_theta
240
+ )
241
+
242
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
243
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
244
+
245
+ def forward(
246
+ self,
247
+ hidden_states: torch.Tensor,
248
+ attention_mask: Optional[torch.Tensor] = None,
249
+ position_ids: Optional[torch.LongTensor] = None,
250
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
251
+ output_attentions: bool = False,
252
+ use_cache: bool = False,
253
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
254
+ bsz, q_len, _ = hidden_states.size()
255
+
256
+ query_states = self.q_proj(hidden_states)
257
+ key_states = self.k_proj(hidden_states)
258
+ value_states = self.v_proj(hidden_states)
259
+
260
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
261
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
262
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
263
+
264
+ kv_seq_len = key_states.shape[-2]
265
+ if past_key_value is not None:
266
+ kv_seq_len += past_key_value[0].shape[-2]
267
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
268
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
269
+
270
+ if past_key_value is not None:
271
+ # reuse k, v, self_attention
272
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
273
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
274
+
275
+ past_key_value = (key_states, value_states) if use_cache else None
276
+
277
+ # repeat k/v heads if n_kv_heads < n_heads
278
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
279
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
280
+
281
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
282
+
283
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
284
+ raise ValueError(
285
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
286
+ f" {attn_weights.size()}"
287
+ )
288
+
289
+ if attention_mask is not None:
290
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
291
+ raise ValueError(
292
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
293
+ )
294
+ attn_weights = attn_weights + attention_mask
295
+
296
+ # upcast attention to fp32
297
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
298
+ attn_output = torch.matmul(attn_weights, value_states)
299
+
300
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
301
+ raise ValueError(
302
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
303
+ f" {attn_output.size()}"
304
+ )
305
+
306
+ attn_output = attn_output.transpose(1, 2).contiguous()
307
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
308
+ attn_output = self.o_proj(attn_output)
309
+
310
+ if not output_attentions:
311
+ attn_weights = None
312
+
313
+ return attn_output, attn_weights, past_key_value
314
+