sudongpo commited on
Commit
adf3c87
·
verified ·
1 Parent(s): 4b6a88b

Delete modeling_utu-liger.py

Browse files
Files changed (1) hide show
  1. modeling_utu-liger.py +0 -918
modeling_utu-liger.py DELETED
@@ -1,918 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
- #
4
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
- # and OPT implementations in this library. It has been modified from its
6
- # original forms to accommodate minor architectural differences compared
7
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
- from functools import partial
21
- from typing import Callable, Optional, Tuple, Union
22
-
23
- import torch
24
- import torch.utils.checkpoint
25
- from torch import nn
26
-
27
- from transformers.activations import ACT2FN
28
- from transformers.cache_utils import Cache, DynamicCache, StaticCache
29
- from transformers.generation import GenerationMixin
30
- from transformers.modeling_attn_mask_utils import AttentionMaskConverter
31
- from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
32
- from transformers.modeling_outputs import (
33
- BaseModelOutputWithPast,
34
- CausalLMOutputWithPast,
35
- QuestionAnsweringModelOutput,
36
- SequenceClassifierOutputWithPast,
37
- TokenClassifierOutput,
38
- )
39
- from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
40
- from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
41
- from transformers.processing_utils import Unpack
42
- from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
43
- from transformers.utils import (
44
- LossKwargs,
45
- add_code_sample_docstrings,
46
- add_start_docstrings,
47
- add_start_docstrings_to_model_forward,
48
- can_return_tuple,
49
- is_torch_flex_attn_available,
50
- logging,
51
- replace_return_docstrings,
52
- )
53
- from transformers.utils.deprecation import deprecate_kwarg
54
- from .configuration_utu import UTUConfig
55
-
56
- from liger_kernel.transformers.fused_linear_cross_entropy import LigerFusedLinearCrossEntropyLoss
57
- from liger_kernel.transformers.rms_norm import LigerRMSNorm
58
- from liger_kernel.transformers.rope import liger_rotary_pos_emb
59
- from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
60
-
61
- if is_torch_flex_attn_available():
62
- from torch.nn.attention.flex_attention import BlockMask
63
-
64
- from transformers.integrations.flex_attention import make_flex_block_causal_mask
65
-
66
-
67
- logger = logging.get_logger(__name__)
68
-
69
- _CHECKPOINT_FOR_DOC = "tencent-youtu/utu-2b"
70
- _CONFIG_FOR_DOC = "UTUConfig"
71
-
72
-
73
- def fixed_cross_entropy(shift_hidden_states, shift_labels, lm_head_weights, num_items_in_batch=None, ignore_index=-100, **kwargs):
74
- reduction = "sum" if num_items_in_batch is not None else "mean"
75
- lce = LigerFusedLinearCrossEntropyLoss(reduction=reduction, ignore_index=ignore_index)
76
- loss = lce(lm_head_weights, shift_hidden_states, shift_labels)
77
- if reduction == "sum":
78
- loss = loss / num_items_in_batch
79
- return loss
80
-
81
-
82
- def ForCausalLMLoss(
83
- hidden_states, labels, lm_head_weights, hidden_size, vocab_size, num_items_in_batch=None, ignore_index=-100, **kwargs
84
- ):
85
- shift_hidden_states = hidden_states[..., :-1, :].contiguous()
86
- shift_labels = labels[..., 1:].contiguous()
87
-
88
- # flatten tokens
89
- shift_hidden_states = shift_hidden_states.view(-1, hidden_size)
90
- shift_labels = shift_labels.view(-1)
91
-
92
- loss = fixed_cross_entropy(shift_hidden_states=shift_hidden_states, shift_labels=shift_labels, lm_head_weights=lm_head_weights,
93
- num_items_in_batch=num_items_in_batch, ignore_index=ignore_index, **kwargs)
94
- return loss
95
-
96
-
97
- class UTURMSNorm(nn.Module):
98
- def __init__(self, hidden_size, eps=1e-6):
99
- """
100
- UTURMSNorm is equivalent to T5LayerNorm
101
- """
102
- super().__init__()
103
- self.weight = nn.Parameter(torch.ones(hidden_size))
104
- self.variance_epsilon = eps
105
-
106
- def forward(self, hidden_states):
107
- input_dtype = hidden_states.dtype
108
- hidden_states = hidden_states.to(torch.float32)
109
- variance = hidden_states.pow(2).mean(-1, keepdim=True)
110
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
111
- return self.weight * hidden_states.to(input_dtype)
112
-
113
- def extra_repr(self):
114
- return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
115
-
116
-
117
- # ALL_LAYERNORM_LAYERS.append(UTURMSNorm)
118
- ALL_LAYERNORM_LAYERS.append(LigerRMSNorm)
119
-
120
-
121
- class UTURotaryEmbedding(nn.Module):
122
- def __init__(self, config: UTUConfig, device=None):
123
- super().__init__()
124
- # BC: "rope_type" was originally "type"
125
- if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
126
- self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
127
- else:
128
- self.rope_type = "default"
129
- self.max_seq_len_cached = config.max_position_embeddings
130
- self.original_max_seq_len = config.max_position_embeddings
131
-
132
- self.config = config
133
- self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
134
-
135
- inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
136
- self.register_buffer("inv_freq", inv_freq, persistent=False)
137
- self.original_inv_freq = self.inv_freq
138
-
139
- @torch.no_grad()
140
- @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
141
- def forward(self, x, position_ids):
142
- inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
143
- position_ids_expanded = position_ids[:, None, :].float()
144
-
145
- device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
146
- with torch.autocast(device_type=device_type, enabled=False): # Force float32
147
- freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
148
- emb = torch.cat((freqs, freqs), dim=-1)
149
- cos = emb.cos() * self.attention_scaling
150
- sin = emb.sin() * self.attention_scaling
151
-
152
- return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
153
-
154
-
155
- def rotate_half(x):
156
- """Rotates half the hidden dims of the input."""
157
- x1 = x[..., : x.shape[-1] // 2]
158
- x2 = x[..., x.shape[-1] // 2 :]
159
- return torch.cat((-x2, x1), dim=-1)
160
-
161
-
162
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
163
- """Applies Rotary Position Embedding to the query and key tensors.
164
-
165
- Args:
166
- q (`torch.Tensor`): The query tensor.
167
- k (`torch.Tensor`): The key tensor.
168
- cos (`torch.Tensor`): The cosine part of the rotary embedding.
169
- sin (`torch.Tensor`): The sine part of the rotary embedding.
170
- position_ids (`torch.Tensor`, *optional*):
171
- Deprecated and unused.
172
- unsqueeze_dim (`int`, *optional*, defaults to 1):
173
- The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
174
- sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
175
- that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
176
- k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
177
- cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
178
- the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
179
- Returns:
180
- `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
181
- """
182
- cos = cos.unsqueeze(unsqueeze_dim)
183
- sin = sin.unsqueeze(unsqueeze_dim)
184
- q_embed = (q * cos) + (rotate_half(q) * sin)
185
- k_embed = (k * cos) + (rotate_half(k) * sin)
186
- return q_embed, k_embed
187
-
188
-
189
- class UTUMLP(nn.Module):
190
- def __init__(self, config):
191
- super().__init__()
192
- self.config = config
193
- self.hidden_size = config.hidden_size
194
- self.intermediate_size = config.intermediate_size
195
- self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
196
- self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
197
- self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
198
- self.act_fn = ACT2FN[config.hidden_act]
199
-
200
- def forward(self, x):
201
- down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
202
- return down_proj
203
-
204
-
205
- def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
206
- """
207
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
208
- num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
209
- """
210
- batch, num_key_value_heads, slen, head_dim = hidden_states.shape
211
- if n_rep == 1:
212
- return hidden_states
213
- hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
214
- return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
215
-
216
-
217
- def eager_attention_forward(
218
- module: nn.Module,
219
- query: torch.Tensor,
220
- key: torch.Tensor,
221
- value: torch.Tensor,
222
- attention_mask: Optional[torch.Tensor],
223
- scaling: float,
224
- dropout: float = 0.0,
225
- **kwargs,
226
- ):
227
- key_states = repeat_kv(key, module.num_key_value_groups)
228
- value_states = repeat_kv(value, module.num_key_value_groups)
229
-
230
- attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
231
- if attention_mask is not None:
232
- causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
233
- attn_weights = attn_weights + causal_mask
234
-
235
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
236
- attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
237
- attn_output = torch.matmul(attn_weights, value_states)
238
- attn_output = attn_output.transpose(1, 2).contiguous()
239
-
240
- return attn_output, attn_weights
241
-
242
-
243
- class UTUAttention(nn.Module):
244
- """Multi-headed attention from 'Attention Is All You Need' paper"""
245
-
246
- def __init__(self, config: UTUConfig, layer_idx: int):
247
- super().__init__()
248
- self.config = config
249
- self.layer_idx = layer_idx
250
- self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
251
- self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
252
- self.scaling = self.head_dim**-0.5
253
- self.attention_dropout = config.attention_dropout
254
- self.is_causal = True
255
-
256
- self.q_proj = nn.Linear(
257
- config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
258
- )
259
- self.k_proj = nn.Linear(
260
- config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
261
- )
262
- self.v_proj = nn.Linear(
263
- config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
264
- )
265
- self.o_proj = nn.Linear(
266
- config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
267
- )
268
- self.q_norm = LigerRMSNorm(self.head_dim, config.rms_norm_eps)
269
- self.k_norm = LigerRMSNorm(self.head_dim, config.rms_norm_eps)
270
-
271
- def forward(
272
- self,
273
- hidden_states: torch.Tensor,
274
- position_embeddings: Tuple[torch.Tensor, torch.Tensor],
275
- attention_mask: Optional[torch.Tensor],
276
- past_key_value: Optional[Cache] = None,
277
- cache_position: Optional[torch.LongTensor] = None,
278
- **kwargs: Unpack[FlashAttentionKwargs],
279
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
280
- input_shape = hidden_states.shape[:-1]
281
- hidden_shape = (*input_shape, -1, self.head_dim)
282
-
283
- query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
284
- key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
285
- value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
286
-
287
- cos, sin = position_embeddings
288
- query_states, key_states = liger_rotary_pos_emb(query_states, key_states, cos, sin)
289
-
290
- if past_key_value is not None:
291
- # sin and cos are specific to RoPE models; cache_position needed for the static cache
292
- cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
293
- key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
294
-
295
- attention_interface: Callable = eager_attention_forward
296
- if self.config._attn_implementation != "eager":
297
- if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
298
- logger.warning_once(
299
- "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
300
- 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
301
- )
302
- else:
303
- attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
304
-
305
- attn_output, attn_weights = attention_interface(
306
- self,
307
- query_states,
308
- key_states,
309
- value_states,
310
- attention_mask,
311
- dropout=0.0 if not self.training else self.attention_dropout,
312
- scaling=self.scaling,
313
- **kwargs,
314
- )
315
-
316
- attn_output = attn_output.reshape(*input_shape, -1).contiguous()
317
- attn_output = self.o_proj(attn_output)
318
- return attn_output, attn_weights
319
-
320
-
321
- class UTUDecoderLayer(nn.Module):
322
- def __init__(self, config: UTUConfig, layer_idx: int):
323
- super().__init__()
324
- self.hidden_size = config.hidden_size
325
-
326
- self.self_attn = UTUAttention(config=config, layer_idx=layer_idx)
327
-
328
- self.mlp = LigerSwiGLUMLP(config)
329
- self.input_layernorm = LigerRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
330
- self.post_attention_layernorm = LigerRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
331
-
332
- def forward(
333
- self,
334
- hidden_states: torch.Tensor,
335
- attention_mask: Optional[torch.Tensor] = None,
336
- position_ids: Optional[torch.LongTensor] = None,
337
- past_key_value: Optional[Cache] = None,
338
- output_attentions: Optional[bool] = False,
339
- use_cache: Optional[bool] = False,
340
- cache_position: Optional[torch.LongTensor] = None,
341
- position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
342
- **kwargs: Unpack[FlashAttentionKwargs],
343
- ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
344
- residual = hidden_states
345
-
346
- hidden_states = self.input_layernorm(hidden_states)
347
-
348
- # Self Attention
349
- hidden_states, self_attn_weights = self.self_attn(
350
- hidden_states=hidden_states,
351
- attention_mask=attention_mask,
352
- position_ids=position_ids,
353
- past_key_value=past_key_value,
354
- output_attentions=output_attentions,
355
- use_cache=use_cache,
356
- cache_position=cache_position,
357
- position_embeddings=position_embeddings,
358
- **kwargs,
359
- )
360
- hidden_states = residual + hidden_states
361
-
362
- # Fully Connected
363
- residual = hidden_states
364
- hidden_states = self.post_attention_layernorm(hidden_states)
365
- hidden_states = self.mlp(hidden_states)
366
- hidden_states = residual + hidden_states
367
-
368
- outputs = (hidden_states,)
369
- if output_attentions:
370
- outputs += (self_attn_weights,)
371
-
372
- return outputs
373
-
374
-
375
- UTU_START_DOCSTRING = r"""
376
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
377
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
378
- etc.)
379
-
380
- This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
381
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
382
- and behavior.
383
-
384
- Parameters:
385
- config ([`UTUConfig`]):
386
- Model configuration class with all the parameters of the model. Initializing with a config file does not
387
- load the weights associated with the model, only the configuration. Check out the
388
- [`~PreTrainedModel.from_pretrained`] method to load the model weights.
389
- """
390
-
391
-
392
- @add_start_docstrings(
393
- "The bare UTU Model outputting raw hidden-states without any specific head on top.",
394
- UTU_START_DOCSTRING,
395
- )
396
- class UTUPreTrainedModel(PreTrainedModel):
397
- config_class = UTUConfig
398
- base_model_prefix = "model"
399
- supports_gradient_checkpointing = True
400
- _no_split_modules = ["UTUDecoderLayer"]
401
- _skip_keys_device_placement = ["past_key_values"]
402
- _supports_flash_attn_2 = True
403
- _supports_sdpa = True
404
- _supports_flex_attn = True
405
- _supports_cache_class = True
406
- _supports_quantized_cache = True
407
- _supports_static_cache = True
408
- _supports_attention_backend = True
409
-
410
- def init_weights(self):
411
- """
412
- If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any
413
- initialization logic in `_init_weights`.
414
- """
415
- # Prune heads if needed
416
- if self.config.pruned_heads:
417
- self.prune_heads(self.config.pruned_heads)
418
-
419
- if "-init" in self.name_or_path:
420
- # Initialize weights
421
- self.apply(self._initialize_weights)
422
-
423
- # Adjust weights of o_proj in Attention and down_proj in MLP
424
- for name, module in self.named_modules():
425
- if "o_proj" in name or "down_proj" in name:
426
- # For the output projection, we reinitialize the weights
427
- scaled_std = self.config.initializer_range * (1.0 / self.config.num_hidden_layers) ** 0.5
428
- module.weight.data.normal_(mean=0.0, std=scaled_std)
429
-
430
- # Tie weights should be skipped when not initializing all weights
431
- # since from_pretrained(...) calls tie weights anyways
432
- self.tie_weights()
433
-
434
- def _init_weights(self, module):
435
- std = self.config.initializer_range
436
- embedding_std = self.config.embedding_initializer_range
437
- if isinstance(module, nn.Linear):
438
- module.weight.data.normal_(mean=0.0, std=std)
439
- if module.bias is not None:
440
- module.bias.data.zero_()
441
- elif isinstance(module, nn.Embedding):
442
- module.weight.data.normal_(mean=0.0, std=embedding_std)
443
- if module.padding_idx is not None:
444
- module.weight.data[module.padding_idx].zero_()
445
- elif isinstance(module, UTURMSNorm) or isinstance(module, LigerRMSNorm):
446
- module.weight.data.fill_(1.0)
447
-
448
-
449
- UTU_INPUTS_DOCSTRING = r"""
450
- Args:
451
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
452
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
453
- it.
454
-
455
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
456
- [`PreTrainedTokenizer.__call__`] for details.
457
-
458
- [What are input IDs?](../glossary#input-ids)
459
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
460
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
461
-
462
- - 1 for tokens that are **not masked**,
463
- - 0 for tokens that are **masked**.
464
-
465
- [What are attention masks?](../glossary#attention-mask)
466
-
467
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
468
- [`PreTrainedTokenizer.__call__`] for details.
469
-
470
- If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
471
- `past_key_values`).
472
-
473
- If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
474
- and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
475
- information on the default strategy.
476
-
477
- - 1 indicates the head is **not masked**,
478
- - 0 indicates the head is **masked**.
479
- position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
480
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
481
- config.n_positions - 1]`.
482
-
483
- [What are position IDs?](../glossary#position-ids)
484
- past_key_values (`Cache`, *optional*):
485
- Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
486
- blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
487
- returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
488
-
489
- It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
490
-
491
- If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
492
- have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
493
- of shape `(batch_size, sequence_length)`.
494
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
495
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
496
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
497
- model's internal embedding lookup matrix.
498
- use_cache (`bool`, *optional*):
499
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
500
- `past_key_values`).
501
- output_attentions (`bool`, *optional*):
502
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
503
- tensors for more detail.
504
- output_hidden_states (`bool`, *optional*):
505
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
506
- more detail.
507
- return_dict (`bool`, *optional*):
508
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
509
- cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
510
- Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
511
- this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
512
- the complete sequence length.
513
- """
514
-
515
-
516
- @add_start_docstrings(
517
- "The bare UTU Model outputting raw hidden-states without any specific head on top.",
518
- UTU_START_DOCSTRING,
519
- )
520
- class UTUModel(UTUPreTrainedModel):
521
- """
522
- Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`UTUDecoderLayer`]
523
-
524
- Args:
525
- config: UTUConfig
526
- """
527
-
528
- def __init__(self, config: UTUConfig):
529
- super().__init__(config)
530
- self.padding_idx = config.pad_token_id
531
- self.vocab_size = config.vocab_size
532
-
533
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
534
- self.layers = nn.ModuleList(
535
- [UTUDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
536
- )
537
- self.norm = LigerRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
538
- self.rotary_emb = UTURotaryEmbedding(config=config)
539
- self.gradient_checkpointing = False
540
-
541
- # Initialize weights and apply final processing
542
- self.post_init()
543
-
544
- def get_input_embeddings(self):
545
- return self.embed_tokens
546
-
547
- def set_input_embeddings(self, value):
548
- self.embed_tokens = value
549
-
550
- @can_return_tuple
551
- @add_start_docstrings_to_model_forward(UTU_INPUTS_DOCSTRING)
552
- def forward(
553
- self,
554
- input_ids: Optional[torch.LongTensor] = None,
555
- attention_mask: Optional[torch.Tensor] = None,
556
- position_ids: Optional[torch.LongTensor] = None,
557
- past_key_values: Optional[Cache] = None,
558
- inputs_embeds: Optional[torch.FloatTensor] = None,
559
- use_cache: Optional[bool] = None,
560
- output_attentions: Optional[bool] = None,
561
- output_hidden_states: Optional[bool] = None,
562
- cache_position: Optional[torch.LongTensor] = None,
563
- **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
564
- ) -> BaseModelOutputWithPast:
565
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
566
- output_hidden_states = (
567
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
568
- )
569
- use_cache = use_cache if use_cache is not None else self.config.use_cache
570
-
571
- if (input_ids is None) ^ (inputs_embeds is not None):
572
- raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
573
-
574
- if self.gradient_checkpointing and self.training and use_cache:
575
- logger.warning_once(
576
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
577
- )
578
- use_cache = False
579
-
580
- # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
581
- if not isinstance(past_key_values, (type(None), Cache)):
582
- raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
583
-
584
- if inputs_embeds is None:
585
- inputs_embeds = self.embed_tokens(input_ids)
586
-
587
- if use_cache and past_key_values is None:
588
- past_key_values = DynamicCache()
589
-
590
- if cache_position is None:
591
- past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
592
- cache_position = torch.arange(
593
- past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
594
- )
595
-
596
- if position_ids is None:
597
- position_ids = cache_position.unsqueeze(0)
598
-
599
- causal_mask = self._update_causal_mask(
600
- attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
601
- )
602
-
603
- hidden_states = inputs_embeds
604
-
605
- # create position embeddings to be shared across the decoder layers
606
- position_embeddings = self.rotary_emb(hidden_states, position_ids)
607
-
608
- # decoder layers
609
- all_hidden_states = () if output_hidden_states else None
610
- all_self_attns = () if output_attentions else None
611
-
612
- for decoder_layer in self.layers[: self.config.num_hidden_layers]:
613
- if output_hidden_states:
614
- all_hidden_states += (hidden_states,)
615
-
616
- if self.gradient_checkpointing and self.training:
617
- layer_outputs = self._gradient_checkpointing_func(
618
- partial(decoder_layer.__call__, **flash_attn_kwargs),
619
- hidden_states,
620
- causal_mask,
621
- position_ids,
622
- past_key_values,
623
- output_attentions,
624
- use_cache,
625
- cache_position,
626
- position_embeddings,
627
- )
628
- else:
629
- layer_outputs = decoder_layer(
630
- hidden_states,
631
- attention_mask=causal_mask,
632
- position_ids=position_ids,
633
- past_key_value=past_key_values,
634
- output_attentions=output_attentions,
635
- use_cache=use_cache,
636
- cache_position=cache_position,
637
- position_embeddings=position_embeddings,
638
- **flash_attn_kwargs,
639
- )
640
-
641
- hidden_states = layer_outputs[0]
642
-
643
- if output_attentions:
644
- all_self_attns += (layer_outputs[1],)
645
-
646
- hidden_states = self.norm(hidden_states)
647
-
648
- # add hidden states from the last decoder layer
649
- if output_hidden_states:
650
- all_hidden_states += (hidden_states,)
651
-
652
- return BaseModelOutputWithPast(
653
- last_hidden_state=hidden_states,
654
- past_key_values=past_key_values if use_cache else None,
655
- hidden_states=all_hidden_states,
656
- attentions=all_self_attns,
657
- )
658
-
659
- def _update_causal_mask(
660
- self,
661
- attention_mask: torch.Tensor,
662
- input_tensor: torch.Tensor,
663
- cache_position: torch.Tensor,
664
- past_key_values: Cache,
665
- output_attentions: bool = False,
666
- ):
667
- if self.config._attn_implementation == "flash_attention_2":
668
- if attention_mask is not None and (attention_mask == 0.0).any():
669
- return attention_mask
670
- return None
671
- if self.config._attn_implementation == "flex_attention":
672
- if isinstance(attention_mask, torch.Tensor):
673
- attention_mask = make_flex_block_causal_mask(attention_mask)
674
- if isinstance(attention_mask, BlockMask):
675
- return attention_mask
676
-
677
- # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
678
- # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
679
- # to infer the attention mask.
680
- past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
681
- using_static_cache = isinstance(past_key_values, StaticCache)
682
-
683
- # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
684
- if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
685
- if AttentionMaskConverter._ignore_causal_mask_sdpa(
686
- attention_mask,
687
- inputs_embeds=input_tensor,
688
- past_key_values_length=past_seen_tokens,
689
- is_training=self.training,
690
- ):
691
- return None
692
-
693
- dtype, device = input_tensor.dtype, input_tensor.device
694
- sequence_length = input_tensor.shape[1]
695
- if using_static_cache:
696
- target_length = past_key_values.get_max_cache_shape()
697
- else:
698
- target_length = (
699
- attention_mask.shape[-1]
700
- if isinstance(attention_mask, torch.Tensor)
701
- else past_seen_tokens + sequence_length + 1
702
- )
703
-
704
- # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
705
- causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
706
- attention_mask,
707
- sequence_length=sequence_length,
708
- target_length=target_length,
709
- dtype=dtype,
710
- device=device,
711
- cache_position=cache_position,
712
- batch_size=input_tensor.shape[0],
713
- )
714
-
715
- if (
716
- self.config._attn_implementation == "sdpa"
717
- and attention_mask is not None
718
- and attention_mask.device.type in ["cuda", "xpu"]
719
- and not output_attentions
720
- ):
721
- # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
722
- # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
723
- # Details: https://github.com/pytorch/pytorch/issues/110213
724
- min_dtype = torch.finfo(dtype).min
725
- causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
726
-
727
- return causal_mask
728
-
729
- @staticmethod
730
- def _prepare_4d_causal_attention_mask_with_cache_position(
731
- attention_mask: torch.Tensor,
732
- sequence_length: int,
733
- target_length: int,
734
- dtype: torch.dtype,
735
- device: torch.device,
736
- cache_position: torch.Tensor,
737
- batch_size: int,
738
- **kwargs,
739
- ):
740
- """
741
- Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
742
- `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
743
-
744
- Args:
745
- attention_mask (`torch.Tensor`):
746
- A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
747
- `(batch_size, 1, query_length, key_value_length)`.
748
- sequence_length (`int`):
749
- The sequence length being processed.
750
- target_length (`int`):
751
- The target length: when generating with static cache, the mask should be as long as the static cache,
752
- to account for the 0 padding, the part of the cache that is not filled yet.
753
- dtype (`torch.dtype`):
754
- The dtype to use for the 4D attention mask.
755
- device (`torch.device`):
756
- The device to place the 4D attention mask on.
757
- cache_position (`torch.Tensor`):
758
- Indices depicting the position of the input sequence tokens in the sequence.
759
- batch_size (`torch.Tensor`):
760
- Batch size.
761
- """
762
- if attention_mask is not None and attention_mask.dim() == 4:
763
- # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
764
- causal_mask = attention_mask
765
- else:
766
- min_dtype = torch.finfo(dtype).min
767
- causal_mask = torch.full(
768
- (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
769
- )
770
- if sequence_length != 1:
771
- causal_mask = torch.triu(causal_mask, diagonal=1)
772
- causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
773
- causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
774
- if attention_mask is not None:
775
- causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
776
- mask_length = attention_mask.shape[-1]
777
- padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
778
- causal_mask.device
779
- )
780
- padding_mask = padding_mask == 0
781
- causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
782
- padding_mask, min_dtype
783
- )
784
-
785
- return causal_mask
786
-
787
-
788
- class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
789
-
790
-
791
- class UTUForCausalLM(UTUPreTrainedModel, GenerationMixin):
792
- _tied_weights_keys = ["lm_head.weight"]
793
- _tp_plan = {"lm_head": "colwise_rep"}
794
- _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
795
-
796
- def __init__(self, config):
797
- super().__init__(config)
798
- self.model = UTUModel(config)
799
- self.vocab_size = config.vocab_size
800
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
801
-
802
- # Initialize weights and apply final processing
803
- self.post_init()
804
-
805
- def get_input_embeddings(self):
806
- return self.model.embed_tokens
807
-
808
- def set_input_embeddings(self, value):
809
- self.model.embed_tokens = value
810
-
811
- def get_output_embeddings(self):
812
- return self.lm_head
813
-
814
- def set_output_embeddings(self, new_embeddings):
815
- self.lm_head = new_embeddings
816
-
817
- def set_decoder(self, decoder):
818
- self.model = decoder
819
-
820
- def get_decoder(self):
821
- return self.model
822
-
823
- @can_return_tuple
824
- @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
825
- @add_start_docstrings_to_model_forward(UTU_INPUTS_DOCSTRING)
826
- @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
827
- def forward(
828
- self,
829
- input_ids: Optional[torch.LongTensor] = None,
830
- attention_mask: Optional[torch.Tensor] = None,
831
- position_ids: Optional[torch.LongTensor] = None,
832
- past_key_values: Optional[Cache] = None,
833
- inputs_embeds: Optional[torch.FloatTensor] = None,
834
- labels: Optional[torch.LongTensor] = None,
835
- use_cache: Optional[bool] = None,
836
- output_attentions: Optional[bool] = None,
837
- output_hidden_states: Optional[bool] = None,
838
- cache_position: Optional[torch.LongTensor] = None,
839
- logits_to_keep: Union[int, torch.Tensor] = 0,
840
- **kwargs: Unpack[KwargsForCausalLM],
841
- ) -> CausalLMOutputWithPast:
842
- r"""
843
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
844
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
845
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
846
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
847
-
848
- logits_to_keep (`int` or `torch.Tensor`, *optional*):
849
- If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
850
- `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
851
- token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
852
- If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
853
- This is useful when using packed tensor format (single dimension for batch and sequence length).
854
-
855
- Returns:
856
-
857
- Example:
858
-
859
- ```python
860
- >>> from transformers import AutoTokenizer, UTUForCausalLM
861
-
862
- >>> model = UTUForCausalLM.from_pretrained("meta-UTU/UTU-2-7b-hf")
863
- >>> tokenizer = AutoTokenizer.from_pretrained("meta-UTU/UTU-2-7b-hf")
864
-
865
- >>> prompt = "Hey, are you conscious? Can you talk to me?"
866
- >>> inputs = tokenizer(prompt, return_tensors="pt")
867
-
868
- >>> # Generate
869
- >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
870
- >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
871
- "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
872
- ```"""
873
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
874
- output_hidden_states = (
875
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
876
- )
877
-
878
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
879
- outputs: BaseModelOutputWithPast = self.model(
880
- input_ids=input_ids,
881
- attention_mask=attention_mask,
882
- position_ids=position_ids,
883
- past_key_values=past_key_values,
884
- inputs_embeds=inputs_embeds,
885
- use_cache=use_cache,
886
- output_attentions=output_attentions,
887
- output_hidden_states=output_hidden_states,
888
- cache_position=cache_position,
889
- **kwargs,
890
- )
891
-
892
- hidden_states = outputs.last_hidden_state
893
- # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
894
- slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
895
- if self.training:
896
- logits = None
897
- else:
898
- logits = self.lm_head(hidden_states[:, slice_indices, :])
899
-
900
- loss = None
901
- if labels is not None:
902
- loss = ForCausalLMLoss(hidden_states=hidden_states[:, slice_indices, :], labels=labels, lm_head_weights=self.lm_head.weight,
903
- hidden_size=self.config.hidden_size, vocab_size=self.config.vocab_size, **kwargs)
904
-
905
- return CausalLMOutputWithPast(
906
- loss=loss,
907
- logits=logits,
908
- past_key_values=outputs.past_key_values,
909
- hidden_states=outputs.hidden_states,
910
- attentions=outputs.attentions,
911
- )
912
-
913
-
914
- __all__ = [
915
- "UTUForCausalLM",
916
- "UTUModel",
917
- "UTUPreTrainedModel"
918
- ]