sudongpo commited on
Commit
e96203e
·
verified ·
1 Parent(s): adf3c87

Update modeling_utu

Browse files
Files changed (1) hide show
  1. modeling_utu.py +886 -0
modeling_utu.py ADDED
@@ -0,0 +1,886 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI, the HuggingFace Inc. team and Tencent Youtu Lab. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ ## Youtu-Embedding architecture, based on Llama3.2 ##
22
+ from functools import partial
23
+ from typing import Callable, Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
31
+ from transformers.generation import GenerationMixin
32
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
33
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
34
+ from transformers.modeling_outputs import (
35
+ BaseModelOutputWithPast,
36
+ CausalLMOutputWithPast,
37
+ QuestionAnsweringModelOutput,
38
+ SequenceClassifierOutputWithPast,
39
+ TokenClassifierOutput,
40
+ )
41
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
42
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
43
+ from transformers.processing_utils import Unpack
44
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
45
+ from transformers.utils import (
46
+ LossKwargs,
47
+ add_code_sample_docstrings,
48
+ add_start_docstrings,
49
+ add_start_docstrings_to_model_forward,
50
+ can_return_tuple,
51
+ is_torch_flex_attn_available,
52
+ logging,
53
+ replace_return_docstrings,
54
+ )
55
+ from transformers.utils.deprecation import deprecate_kwarg
56
+ from .configuration_utu import UTUConfig
57
+
58
+ if is_torch_flex_attn_available():
59
+ from torch.nn.attention.flex_attention import BlockMask
60
+
61
+ from transformers.integrations.flex_attention import make_flex_block_causal_mask
62
+
63
+
64
+ logger = logging.get_logger(__name__)
65
+
66
+ _CHECKPOINT_FOR_DOC = "tencent-youtu/utu-2b"
67
+ _CONFIG_FOR_DOC = "UTUConfig"
68
+
69
+
70
+ class UTURMSNorm(nn.Module):
71
+ def __init__(self, hidden_size, eps=1e-6):
72
+ """
73
+ UTURMSNorm is equivalent to T5LayerNorm
74
+ """
75
+ super().__init__()
76
+ self.weight = nn.Parameter(torch.ones(hidden_size))
77
+ self.variance_epsilon = eps
78
+
79
+ def forward(self, hidden_states):
80
+ input_dtype = hidden_states.dtype
81
+ hidden_states = hidden_states.to(torch.float32)
82
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
83
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
84
+ return self.weight * hidden_states.to(input_dtype)
85
+
86
+ def extra_repr(self):
87
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
88
+
89
+
90
+ ALL_LAYERNORM_LAYERS.append(UTURMSNorm)
91
+
92
+
93
+ class UTURotaryEmbedding(nn.Module):
94
+ def __init__(self, config: UTUConfig, device=None):
95
+ super().__init__()
96
+ # BC: "rope_type" was originally "type"
97
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
98
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
99
+ else:
100
+ self.rope_type = "default"
101
+ self.max_seq_len_cached = config.max_position_embeddings
102
+ self.original_max_seq_len = config.max_position_embeddings
103
+
104
+ self.config = config
105
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
106
+
107
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
108
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
109
+ self.original_inv_freq = self.inv_freq
110
+
111
+ @torch.no_grad()
112
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
113
+ def forward(self, x, position_ids):
114
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
115
+ position_ids_expanded = position_ids[:, None, :].float()
116
+
117
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
118
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
119
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
120
+ emb = torch.cat((freqs, freqs), dim=-1)
121
+ cos = emb.cos() * self.attention_scaling
122
+ sin = emb.sin() * self.attention_scaling
123
+
124
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
125
+
126
+
127
+ def rotate_half(x):
128
+ """Rotates half the hidden dims of the input."""
129
+ x1 = x[..., : x.shape[-1] // 2]
130
+ x2 = x[..., x.shape[-1] // 2 :]
131
+ return torch.cat((-x2, x1), dim=-1)
132
+
133
+
134
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
135
+ """Applies Rotary Position Embedding to the query and key tensors.
136
+
137
+ Args:
138
+ q (`torch.Tensor`): The query tensor.
139
+ k (`torch.Tensor`): The key tensor.
140
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
141
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
142
+ position_ids (`torch.Tensor`, *optional*):
143
+ Deprecated and unused.
144
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
145
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
146
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
147
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
148
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
149
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
150
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
151
+ Returns:
152
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
153
+ """
154
+ cos = cos.unsqueeze(unsqueeze_dim)
155
+ sin = sin.unsqueeze(unsqueeze_dim)
156
+ q_embed = (q * cos) + (rotate_half(q) * sin)
157
+ k_embed = (k * cos) + (rotate_half(k) * sin)
158
+ return q_embed, k_embed
159
+
160
+
161
+ class UTUMLP(nn.Module):
162
+ def __init__(self, config):
163
+ super().__init__()
164
+ self.config = config
165
+ self.hidden_size = config.hidden_size
166
+ self.intermediate_size = config.intermediate_size
167
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
168
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
169
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
170
+ self.act_fn = ACT2FN[config.hidden_act]
171
+
172
+ def forward(self, x):
173
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
174
+ return down_proj
175
+
176
+
177
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
178
+ """
179
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
180
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
181
+ """
182
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
183
+ if n_rep == 1:
184
+ return hidden_states
185
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
186
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
187
+
188
+
189
+ def eager_attention_forward(
190
+ module: nn.Module,
191
+ query: torch.Tensor,
192
+ key: torch.Tensor,
193
+ value: torch.Tensor,
194
+ attention_mask: Optional[torch.Tensor],
195
+ scaling: float,
196
+ dropout: float = 0.0,
197
+ **kwargs,
198
+ ):
199
+ key_states = repeat_kv(key, module.num_key_value_groups)
200
+ value_states = repeat_kv(value, module.num_key_value_groups)
201
+
202
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
203
+ if attention_mask is not None:
204
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
205
+ attn_weights = attn_weights + causal_mask
206
+
207
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
208
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
209
+ attn_output = torch.matmul(attn_weights, value_states)
210
+ attn_output = attn_output.transpose(1, 2).contiguous()
211
+
212
+ return attn_output, attn_weights
213
+
214
+
215
+ class UTUAttention(nn.Module):
216
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
217
+
218
+ def __init__(self, config: UTUConfig, layer_idx: int):
219
+ super().__init__()
220
+ self.config = config
221
+ self.layer_idx = layer_idx
222
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
223
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
224
+ self.scaling = self.head_dim**-0.5
225
+ self.attention_dropout = config.attention_dropout
226
+ self.is_causal = True
227
+
228
+ self.q_proj = nn.Linear(
229
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
230
+ )
231
+ self.k_proj = nn.Linear(
232
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
233
+ )
234
+ self.v_proj = nn.Linear(
235
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
236
+ )
237
+ self.o_proj = nn.Linear(
238
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
239
+ )
240
+ self.q_norm = UTURMSNorm(self.head_dim, config.rms_norm_eps)
241
+ self.k_norm = UTURMSNorm(self.head_dim, config.rms_norm_eps)
242
+
243
+ def forward(
244
+ self,
245
+ hidden_states: torch.Tensor,
246
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
247
+ attention_mask: Optional[torch.Tensor],
248
+ past_key_value: Optional[Cache] = None,
249
+ cache_position: Optional[torch.LongTensor] = None,
250
+ **kwargs: Unpack[FlashAttentionKwargs],
251
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
252
+ input_shape = hidden_states.shape[:-1]
253
+ hidden_shape = (*input_shape, -1, self.head_dim)
254
+
255
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
256
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
257
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
258
+
259
+ cos, sin = position_embeddings
260
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
261
+
262
+ if past_key_value is not None:
263
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
264
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
265
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
266
+
267
+ attention_interface: Callable = eager_attention_forward
268
+ if self.config._attn_implementation != "eager":
269
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
270
+ logger.warning_once(
271
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
272
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
273
+ )
274
+ else:
275
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
276
+
277
+ attn_output, attn_weights = attention_interface(
278
+ self,
279
+ query_states,
280
+ key_states,
281
+ value_states,
282
+ attention_mask,
283
+ dropout=0.0 if not self.training else self.attention_dropout,
284
+ scaling=self.scaling,
285
+ **kwargs,
286
+ )
287
+
288
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
289
+ attn_output = self.o_proj(attn_output)
290
+ return attn_output, attn_weights
291
+
292
+
293
+ class UTUDecoderLayer(nn.Module):
294
+ def __init__(self, config: UTUConfig, layer_idx: int):
295
+ super().__init__()
296
+ self.hidden_size = config.hidden_size
297
+
298
+ self.self_attn = UTUAttention(config=config, layer_idx=layer_idx)
299
+
300
+ self.mlp = UTUMLP(config)
301
+ self.input_layernorm = UTURMSNorm(config.hidden_size, eps=config.rms_norm_eps)
302
+ self.post_attention_layernorm = UTURMSNorm(config.hidden_size, eps=config.rms_norm_eps)
303
+
304
+ def forward(
305
+ self,
306
+ hidden_states: torch.Tensor,
307
+ attention_mask: Optional[torch.Tensor] = None,
308
+ position_ids: Optional[torch.LongTensor] = None,
309
+ past_key_value: Optional[Cache] = None,
310
+ output_attentions: Optional[bool] = False,
311
+ use_cache: Optional[bool] = False,
312
+ cache_position: Optional[torch.LongTensor] = None,
313
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
314
+ **kwargs: Unpack[FlashAttentionKwargs],
315
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
316
+ residual = hidden_states
317
+
318
+ hidden_states = self.input_layernorm(hidden_states)
319
+
320
+ # Self Attention
321
+ hidden_states, self_attn_weights = self.self_attn(
322
+ hidden_states=hidden_states,
323
+ attention_mask=attention_mask,
324
+ position_ids=position_ids,
325
+ past_key_value=past_key_value,
326
+ output_attentions=output_attentions,
327
+ use_cache=use_cache,
328
+ cache_position=cache_position,
329
+ position_embeddings=position_embeddings,
330
+ **kwargs,
331
+ )
332
+ hidden_states = residual + hidden_states
333
+
334
+ # Fully Connected
335
+ residual = hidden_states
336
+ hidden_states = self.post_attention_layernorm(hidden_states)
337
+ hidden_states = self.mlp(hidden_states)
338
+ hidden_states = residual + hidden_states
339
+
340
+ outputs = (hidden_states,)
341
+ if output_attentions:
342
+ outputs += (self_attn_weights,)
343
+
344
+ return outputs
345
+
346
+
347
+ UTU_START_DOCSTRING = r"""
348
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
349
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
350
+ etc.)
351
+
352
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
353
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
354
+ and behavior.
355
+
356
+ Parameters:
357
+ config ([`UTUConfig`]):
358
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
359
+ load the weights associated with the model, only the configuration. Check out the
360
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
361
+ """
362
+
363
+
364
+ @add_start_docstrings(
365
+ "The bare UTU Model outputting raw hidden-states without any specific head on top.",
366
+ UTU_START_DOCSTRING,
367
+ )
368
+ class UTUPreTrainedModel(PreTrainedModel):
369
+ config_class = UTUConfig
370
+ base_model_prefix = "model"
371
+ supports_gradient_checkpointing = True
372
+ _no_split_modules = ["UTUDecoderLayer"]
373
+ _skip_keys_device_placement = ["past_key_values"]
374
+ _supports_flash_attn_2 = True
375
+ _supports_sdpa = True
376
+ _supports_flex_attn = True
377
+ _supports_cache_class = True
378
+ _supports_quantized_cache = True
379
+ _supports_static_cache = True
380
+ _supports_attention_backend = True
381
+
382
+ def init_weights(self):
383
+ """
384
+ If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any
385
+ initialization logic in `_init_weights`.
386
+ """
387
+ # Prune heads if needed
388
+ if self.config.pruned_heads:
389
+ self.prune_heads(self.config.pruned_heads)
390
+
391
+ if "-init" in self.name_or_path:
392
+ # Initialize weights
393
+ self.apply(self._initialize_weights)
394
+
395
+ # Adjust weights of o_proj in Attention and down_proj in MLP
396
+ for name, module in self.named_modules():
397
+ if "o_proj" in name or "down_proj" in name:
398
+ # For the output projection, we reinitialize the weights
399
+ scaled_std = self.config.initializer_range * (1.0 / self.config.num_hidden_layers) ** 0.5
400
+ module.weight.data.normal_(mean=0.0, std=scaled_std)
401
+
402
+ # Tie weights should be skipped when not initializing all weights
403
+ # since from_pretrained(...) calls tie weights anyways
404
+ self.tie_weights()
405
+
406
+ def _init_weights(self, module):
407
+ std = self.config.initializer_range
408
+ embedding_std = self.config.embedding_initializer_range
409
+ if isinstance(module, nn.Linear):
410
+ module.weight.data.normal_(mean=0.0, std=std)
411
+ if module.bias is not None:
412
+ module.bias.data.zero_()
413
+ elif isinstance(module, nn.Embedding):
414
+ module.weight.data.normal_(mean=0.0, std=embedding_std)
415
+ if module.padding_idx is not None:
416
+ module.weight.data[module.padding_idx].zero_()
417
+ elif isinstance(module, UTURMSNorm):
418
+ module.weight.data.fill_(1.0)
419
+
420
+
421
+ UTU_INPUTS_DOCSTRING = r"""
422
+ Args:
423
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
424
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
425
+ it.
426
+
427
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
428
+ [`PreTrainedTokenizer.__call__`] for details.
429
+
430
+ [What are input IDs?](../glossary#input-ids)
431
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
432
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
433
+
434
+ - 1 for tokens that are **not masked**,
435
+ - 0 for tokens that are **masked**.
436
+
437
+ [What are attention masks?](../glossary#attention-mask)
438
+
439
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
440
+ [`PreTrainedTokenizer.__call__`] for details.
441
+
442
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
443
+ `past_key_values`).
444
+
445
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
446
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
447
+ information on the default strategy.
448
+
449
+ - 1 indicates the head is **not masked**,
450
+ - 0 indicates the head is **masked**.
451
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
452
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
453
+ config.n_positions - 1]`.
454
+
455
+ [What are position IDs?](../glossary#position-ids)
456
+ past_key_values (`Cache`, *optional*):
457
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
458
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
459
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
460
+
461
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
462
+
463
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
464
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
465
+ of shape `(batch_size, sequence_length)`.
466
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
467
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
468
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
469
+ model's internal embedding lookup matrix.
470
+ use_cache (`bool`, *optional*):
471
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
472
+ `past_key_values`).
473
+ output_attentions (`bool`, *optional*):
474
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
475
+ tensors for more detail.
476
+ output_hidden_states (`bool`, *optional*):
477
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
478
+ more detail.
479
+ return_dict (`bool`, *optional*):
480
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
481
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
482
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
483
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
484
+ the complete sequence length.
485
+ """
486
+
487
+
488
+ @add_start_docstrings(
489
+ "The bare UTU Model outputting raw hidden-states without any specific head on top.",
490
+ UTU_START_DOCSTRING,
491
+ )
492
+ class UTUModel(UTUPreTrainedModel):
493
+ """
494
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`UTUDecoderLayer`]
495
+
496
+ Args:
497
+ config: UTUConfig
498
+ """
499
+
500
+ def __init__(self, config: UTUConfig):
501
+ super().__init__(config)
502
+ self.padding_idx = config.pad_token_id
503
+ self.vocab_size = config.vocab_size
504
+
505
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
506
+ self.layers = nn.ModuleList(
507
+ [UTUDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
508
+ )
509
+ self.norm = UTURMSNorm(config.hidden_size, eps=config.rms_norm_eps)
510
+ self.rotary_emb = UTURotaryEmbedding(config=config)
511
+ self.gradient_checkpointing = False
512
+
513
+ # Initialize weights and apply final processing
514
+ self.post_init()
515
+
516
+ def get_input_embeddings(self):
517
+ return self.embed_tokens
518
+
519
+ def set_input_embeddings(self, value):
520
+ self.embed_tokens = value
521
+
522
+ @can_return_tuple
523
+ @add_start_docstrings_to_model_forward(UTU_INPUTS_DOCSTRING)
524
+ def forward(
525
+ self,
526
+ input_ids: Optional[torch.LongTensor] = None,
527
+ attention_mask: Optional[torch.Tensor] = None,
528
+ position_ids: Optional[torch.LongTensor] = None,
529
+ past_key_values: Optional[Cache] = None,
530
+ inputs_embeds: Optional[torch.FloatTensor] = None,
531
+ use_cache: Optional[bool] = None,
532
+ output_attentions: Optional[bool] = None,
533
+ output_hidden_states: Optional[bool] = None,
534
+ cache_position: Optional[torch.LongTensor] = None,
535
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
536
+ ) -> BaseModelOutputWithPast:
537
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
538
+ output_hidden_states = (
539
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
540
+ )
541
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
542
+
543
+ if (input_ids is None) ^ (inputs_embeds is not None):
544
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
545
+
546
+ if self.gradient_checkpointing and self.training and use_cache:
547
+ logger.warning_once(
548
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
549
+ )
550
+ use_cache = False
551
+
552
+ # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
553
+ if not isinstance(past_key_values, (type(None), Cache)):
554
+ raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
555
+
556
+ if inputs_embeds is None:
557
+ inputs_embeds = self.embed_tokens(input_ids)
558
+
559
+ if use_cache and past_key_values is None:
560
+ past_key_values = DynamicCache()
561
+
562
+ if cache_position is None:
563
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
564
+ cache_position = torch.arange(
565
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
566
+ )
567
+
568
+ if position_ids is None:
569
+ position_ids = cache_position.unsqueeze(0)
570
+
571
+ causal_mask = self._update_causal_mask(
572
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
573
+ )
574
+
575
+ hidden_states = inputs_embeds
576
+
577
+ # create position embeddings to be shared across the decoder layers
578
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
579
+
580
+ # decoder layers
581
+ all_hidden_states = () if output_hidden_states else None
582
+ all_self_attns = () if output_attentions else None
583
+
584
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
585
+ if output_hidden_states:
586
+ all_hidden_states += (hidden_states,)
587
+
588
+ if self.gradient_checkpointing and self.training:
589
+ layer_outputs = self._gradient_checkpointing_func(
590
+ partial(decoder_layer.__call__, **flash_attn_kwargs),
591
+ hidden_states,
592
+ causal_mask,
593
+ position_ids,
594
+ past_key_values,
595
+ output_attentions,
596
+ use_cache,
597
+ cache_position,
598
+ position_embeddings,
599
+ )
600
+ else:
601
+ layer_outputs = decoder_layer(
602
+ hidden_states,
603
+ attention_mask=causal_mask,
604
+ position_ids=position_ids,
605
+ past_key_value=past_key_values,
606
+ output_attentions=output_attentions,
607
+ use_cache=use_cache,
608
+ cache_position=cache_position,
609
+ position_embeddings=position_embeddings,
610
+ **flash_attn_kwargs,
611
+ )
612
+
613
+ hidden_states = layer_outputs[0]
614
+
615
+ if output_attentions:
616
+ all_self_attns += (layer_outputs[1],)
617
+
618
+ hidden_states = self.norm(hidden_states)
619
+
620
+ # add hidden states from the last decoder layer
621
+ if output_hidden_states:
622
+ all_hidden_states += (hidden_states,)
623
+
624
+ return BaseModelOutputWithPast(
625
+ last_hidden_state=hidden_states,
626
+ past_key_values=past_key_values if use_cache else None,
627
+ hidden_states=all_hidden_states,
628
+ attentions=all_self_attns,
629
+ )
630
+
631
+ def _update_causal_mask(
632
+ self,
633
+ attention_mask: torch.Tensor,
634
+ input_tensor: torch.Tensor,
635
+ cache_position: torch.Tensor,
636
+ past_key_values: Cache,
637
+ output_attentions: bool = False,
638
+ ):
639
+ if self.config._attn_implementation == "flash_attention_2":
640
+ if attention_mask is not None and (attention_mask == 0.0).any():
641
+ return attention_mask
642
+ return None
643
+ if self.config._attn_implementation == "flex_attention":
644
+ if isinstance(attention_mask, torch.Tensor):
645
+ attention_mask = make_flex_block_causal_mask(attention_mask)
646
+ if isinstance(attention_mask, BlockMask):
647
+ return attention_mask
648
+
649
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
650
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
651
+ # to infer the attention mask.
652
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
653
+ using_static_cache = isinstance(past_key_values, StaticCache)
654
+
655
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
656
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
657
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
658
+ attention_mask,
659
+ inputs_embeds=input_tensor,
660
+ past_key_values_length=past_seen_tokens,
661
+ is_training=self.training,
662
+ ):
663
+ return None
664
+
665
+ dtype, device = input_tensor.dtype, input_tensor.device
666
+ sequence_length = input_tensor.shape[1]
667
+ if using_static_cache:
668
+ target_length = past_key_values.get_max_cache_shape()
669
+ else:
670
+ target_length = (
671
+ attention_mask.shape[-1]
672
+ if isinstance(attention_mask, torch.Tensor)
673
+ else past_seen_tokens + sequence_length + 1
674
+ )
675
+
676
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
677
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
678
+ attention_mask,
679
+ sequence_length=sequence_length,
680
+ target_length=target_length,
681
+ dtype=dtype,
682
+ device=device,
683
+ cache_position=cache_position,
684
+ batch_size=input_tensor.shape[0],
685
+ )
686
+
687
+ if (
688
+ self.config._attn_implementation == "sdpa"
689
+ and attention_mask is not None
690
+ and attention_mask.device.type in ["cuda", "xpu"]
691
+ and not output_attentions
692
+ ):
693
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
694
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
695
+ # Details: https://github.com/pytorch/pytorch/issues/110213
696
+ min_dtype = torch.finfo(dtype).min
697
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
698
+
699
+ return causal_mask
700
+
701
+ @staticmethod
702
+ def _prepare_4d_causal_attention_mask_with_cache_position(
703
+ attention_mask: torch.Tensor,
704
+ sequence_length: int,
705
+ target_length: int,
706
+ dtype: torch.dtype,
707
+ device: torch.device,
708
+ cache_position: torch.Tensor,
709
+ batch_size: int,
710
+ **kwargs,
711
+ ):
712
+ """
713
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
714
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
715
+
716
+ Args:
717
+ attention_mask (`torch.Tensor`):
718
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
719
+ `(batch_size, 1, query_length, key_value_length)`.
720
+ sequence_length (`int`):
721
+ The sequence length being processed.
722
+ target_length (`int`):
723
+ The target length: when generating with static cache, the mask should be as long as the static cache,
724
+ to account for the 0 padding, the part of the cache that is not filled yet.
725
+ dtype (`torch.dtype`):
726
+ The dtype to use for the 4D attention mask.
727
+ device (`torch.device`):
728
+ The device to place the 4D attention mask on.
729
+ cache_position (`torch.Tensor`):
730
+ Indices depicting the position of the input sequence tokens in the sequence.
731
+ batch_size (`torch.Tensor`):
732
+ Batch size.
733
+ """
734
+ if attention_mask is not None and attention_mask.dim() == 4:
735
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
736
+ causal_mask = attention_mask
737
+ else:
738
+ min_dtype = torch.finfo(dtype).min
739
+ causal_mask = torch.full(
740
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
741
+ )
742
+ if sequence_length != 1:
743
+ causal_mask = torch.triu(causal_mask, diagonal=1)
744
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
745
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
746
+ if attention_mask is not None:
747
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
748
+ mask_length = attention_mask.shape[-1]
749
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
750
+ causal_mask.device
751
+ )
752
+ padding_mask = padding_mask == 0
753
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
754
+ padding_mask, min_dtype
755
+ )
756
+
757
+ return causal_mask
758
+
759
+
760
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
761
+
762
+
763
+ class UTUForCausalLM(UTUPreTrainedModel, GenerationMixin):
764
+ _tied_weights_keys = ["lm_head.weight"]
765
+ _tp_plan = {"lm_head": "colwise_rep"}
766
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
767
+
768
+ def __init__(self, config):
769
+ super().__init__(config)
770
+ self.model = UTUModel(config)
771
+ self.vocab_size = config.vocab_size
772
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
773
+
774
+ # Initialize weights and apply final processing
775
+ self.post_init()
776
+
777
+ def get_input_embeddings(self):
778
+ return self.model.embed_tokens
779
+
780
+ def set_input_embeddings(self, value):
781
+ self.model.embed_tokens = value
782
+
783
+ def get_output_embeddings(self):
784
+ return self.lm_head
785
+
786
+ def set_output_embeddings(self, new_embeddings):
787
+ self.lm_head = new_embeddings
788
+
789
+ def set_decoder(self, decoder):
790
+ self.model = decoder
791
+
792
+ def get_decoder(self):
793
+ return self.model
794
+
795
+ @can_return_tuple
796
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
797
+ @add_start_docstrings_to_model_forward(UTU_INPUTS_DOCSTRING)
798
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
799
+ def forward(
800
+ self,
801
+ input_ids: Optional[torch.LongTensor] = None,
802
+ attention_mask: Optional[torch.Tensor] = None,
803
+ position_ids: Optional[torch.LongTensor] = None,
804
+ past_key_values: Optional[Cache] = None,
805
+ inputs_embeds: Optional[torch.FloatTensor] = None,
806
+ labels: Optional[torch.LongTensor] = None,
807
+ use_cache: Optional[bool] = None,
808
+ output_attentions: Optional[bool] = None,
809
+ output_hidden_states: Optional[bool] = None,
810
+ cache_position: Optional[torch.LongTensor] = None,
811
+ logits_to_keep: Union[int, torch.Tensor] = 0,
812
+ **kwargs: Unpack[KwargsForCausalLM],
813
+ ) -> CausalLMOutputWithPast:
814
+ r"""
815
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
816
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
817
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
818
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
819
+
820
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
821
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
822
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
823
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
824
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
825
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
826
+
827
+ Returns:
828
+
829
+ Example:
830
+
831
+ ```python
832
+ >>> from transformers import AutoTokenizer, UTUForCausalLM
833
+
834
+ >>> model = UTUForCausalLM.from_pretrained("meta-UTU/UTU-2-7b-hf")
835
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-UTU/UTU-2-7b-hf")
836
+
837
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
838
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
839
+
840
+ >>> # Generate
841
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
842
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
843
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
844
+ ```"""
845
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
846
+ output_hidden_states = (
847
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
848
+ )
849
+
850
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
851
+ outputs: BaseModelOutputWithPast = self.model(
852
+ input_ids=input_ids,
853
+ attention_mask=attention_mask,
854
+ position_ids=position_ids,
855
+ past_key_values=past_key_values,
856
+ inputs_embeds=inputs_embeds,
857
+ use_cache=use_cache,
858
+ output_attentions=output_attentions,
859
+ output_hidden_states=output_hidden_states,
860
+ cache_position=cache_position,
861
+ **kwargs,
862
+ )
863
+
864
+ hidden_states = outputs.last_hidden_state
865
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
866
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
867
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
868
+
869
+ loss = None
870
+ if labels is not None:
871
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
872
+
873
+ return CausalLMOutputWithPast(
874
+ loss=loss,
875
+ logits=logits,
876
+ past_key_values=outputs.past_key_values,
877
+ hidden_states=outputs.hidden_states,
878
+ attentions=outputs.attentions,
879
+ )
880
+
881
+
882
+ __all__ = [
883
+ "UTUForCausalLM",
884
+ "UTUModel",
885
+ "UTUPreTrainedModel"
886
+ ]