BarryFutureman commited on
Commit
c3d40de
·
verified ·
1 Parent(s): 760a697

Upload modeling_ivy.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_ivy.py +627 -0
modeling_ivy.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, List, Optional, Tuple, Union
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ from transformers.activations import ACT2FN
7
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
8
+ from transformers.generation import GenerationMixin
9
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
10
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
11
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
12
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
13
+ from transformers.processing_utils import Unpack
14
+ from transformers.utils import (
15
+ LossKwargs,
16
+ add_code_sample_docstrings,
17
+ add_start_docstrings,
18
+ add_start_docstrings_to_model_forward,
19
+ logging,
20
+ replace_return_docstrings,
21
+ )
22
+
23
+ from transformers.configuration_utils import PretrainedConfig
24
+ from transformers.modeling_rope_utils import rope_config_validation
25
+
26
+
27
+ class IvyConfig(PretrainedConfig):
28
+ model_type = "ivy"
29
+
30
+ def __init__(
31
+ self,
32
+ obs_size=16,
33
+ out_size=8,
34
+ hidden_size=4096,
35
+ intermediate_size=22016,
36
+ num_hidden_layers=32,
37
+ num_attention_heads=32,
38
+ num_key_value_heads=32,
39
+ hidden_act="silu",
40
+ max_position_embeddings=32768,
41
+ initializer_range=0.02,
42
+ rms_norm_eps=1e-6,
43
+ use_cache=True,
44
+ tie_word_embeddings=False,
45
+ rope_theta=10000.0,
46
+ rope_scaling=None,
47
+ use_sliding_window=False,
48
+ sliding_window=4096,
49
+ max_window_layers=28,
50
+ attention_dropout=0.0,
51
+ **kwargs,
52
+ ):
53
+ self.obs_size = obs_size
54
+ self.act_size = out_size
55
+ self.max_position_embeddings = max_position_embeddings
56
+ self.hidden_size = hidden_size
57
+ self.intermediate_size = intermediate_size
58
+ self.num_hidden_layers = num_hidden_layers
59
+ self.num_attention_heads = num_attention_heads
60
+ self.use_sliding_window = use_sliding_window
61
+ self.sliding_window = sliding_window if use_sliding_window else None
62
+ self.max_window_layers = max_window_layers
63
+
64
+ # for backward compatibility
65
+ if num_key_value_heads is None:
66
+ num_key_value_heads = num_attention_heads
67
+
68
+ self.num_key_value_heads = num_key_value_heads
69
+ self.hidden_act = hidden_act
70
+ self.initializer_range = initializer_range
71
+ self.rms_norm_eps = rms_norm_eps
72
+ self.use_cache = use_cache
73
+ self.rope_theta = rope_theta
74
+ self.rope_scaling = rope_scaling
75
+ self.attention_dropout = attention_dropout
76
+ # Validate the correctness of rotary position embeddings parameters
77
+ # BC: if there is a 'type' field, move it to 'rope_type'.
78
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
79
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
80
+ rope_config_validation(self)
81
+
82
+ super().__init__(
83
+ tie_word_embeddings=tie_word_embeddings,
84
+ **kwargs,
85
+ )
86
+
87
+
88
+ logger = logging.get_logger(__name__)
89
+ _CONFIG_FOR_DOC = "IvyConfig"
90
+
91
+
92
+ class IvyMLP(nn.Module):
93
+ def __init__(self, config):
94
+ super().__init__()
95
+ self.config = config
96
+ self.hidden_size = config.hidden_size
97
+ self.intermediate_size = config.intermediate_size
98
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
99
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
100
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
101
+ self.act_fn = ACT2FN[config.hidden_act]
102
+
103
+ def forward(self, x):
104
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
105
+ return down_proj
106
+
107
+
108
+ def rotate_half(x):
109
+ """Rotates half the hidden dims of the input."""
110
+ x1 = x[..., : x.shape[-1] // 2]
111
+ x2 = x[..., x.shape[-1] // 2 :]
112
+ return torch.cat((-x2, x1), dim=-1)
113
+
114
+
115
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
116
+ """Applies Rotary Position Embedding to the query and key tensors.
117
+
118
+ Args:
119
+ q (`torch.Tensor`): The query tensor.
120
+ k (`torch.Tensor`): The key tensor.
121
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
122
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
123
+ position_ids (`torch.Tensor`, *optional*):
124
+ Deprecated and unused.
125
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
126
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
127
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
128
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
129
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
130
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
131
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
132
+ Returns:
133
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
134
+ """
135
+ cos = cos.unsqueeze(unsqueeze_dim)
136
+ sin = sin.unsqueeze(unsqueeze_dim)
137
+ q_embed = (q * cos) + (rotate_half(q) * sin)
138
+ k_embed = (k * cos) + (rotate_half(k) * sin)
139
+ return q_embed, k_embed
140
+
141
+
142
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
143
+ """
144
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
145
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
146
+ """
147
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
148
+ if n_rep == 1:
149
+ return hidden_states
150
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
151
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
152
+
153
+
154
+ def eager_attention_forward(
155
+ module: nn.Module,
156
+ query: torch.Tensor,
157
+ key: torch.Tensor,
158
+ value: torch.Tensor,
159
+ attention_mask: Optional[torch.Tensor],
160
+ scaling: float,
161
+ dropout: float = 0.0,
162
+ **kwargs,
163
+ ):
164
+ key_states = repeat_kv(key, module.num_key_value_groups)
165
+ value_states = repeat_kv(value, module.num_key_value_groups)
166
+
167
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
168
+ if attention_mask is not None:
169
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
170
+ attn_weights = attn_weights + causal_mask
171
+
172
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
173
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
174
+ attn_output = torch.matmul(attn_weights, value_states)
175
+ attn_output = attn_output.transpose(1, 2).contiguous()
176
+
177
+ return attn_output, attn_weights
178
+
179
+
180
+ class IvyAttention(nn.Module):
181
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
182
+
183
+ def __init__(self, config: IvyConfig, layer_idx: int):
184
+ super().__init__()
185
+ self.config = config
186
+ self.layer_idx = layer_idx
187
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
188
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
189
+ self.scaling = self.head_dim**-0.5
190
+ self.attention_dropout = config.attention_dropout
191
+ self.is_causal = True
192
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
193
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
194
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
195
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
196
+
197
+ def forward(
198
+ self,
199
+ hidden_states: torch.Tensor,
200
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
201
+ attention_mask: Optional[torch.Tensor],
202
+ past_key_value: Optional[Cache] = None,
203
+ cache_position: Optional[torch.LongTensor] = None,
204
+ **kwargs: Unpack[FlashAttentionKwargs],
205
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
206
+ input_shape = hidden_states.shape[:-1]
207
+ hidden_shape = (*input_shape, -1, self.head_dim)
208
+
209
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
210
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
211
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
212
+
213
+ cos, sin = position_embeddings
214
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
215
+
216
+ if past_key_value is not None:
217
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
218
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
219
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
220
+
221
+ sliding_window = None
222
+ if (
223
+ self.config.use_sliding_window
224
+ and getattr(self.config, "sliding_window", None) is not None
225
+ and self.layer_idx >= self.config.max_window_layers
226
+ ):
227
+ sliding_window = self.config.sliding_window
228
+
229
+ attention_interface: Callable = eager_attention_forward
230
+ if self.config._attn_implementation != "eager":
231
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
232
+ logger.warning_once(
233
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
234
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
235
+ )
236
+ else:
237
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
238
+
239
+ attn_output, attn_weights = attention_interface(
240
+ self,
241
+ query_states,
242
+ key_states,
243
+ value_states,
244
+ attention_mask,
245
+ dropout=0.0 if not self.training else self.attention_dropout,
246
+ scaling=self.scaling,
247
+ sliding_window=sliding_window, # main diff with Llama
248
+ **kwargs,
249
+ )
250
+
251
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
252
+ attn_output = self.o_proj(attn_output)
253
+ return attn_output, attn_weights
254
+
255
+
256
+ class IvyRMSNorm(nn.Module):
257
+ def __init__(self, hidden_size, eps=1e-6):
258
+ """
259
+ IvyRMSNorm is equivalent to T5LayerNorm
260
+ """
261
+ super().__init__()
262
+ self.weight = nn.Parameter(torch.ones(hidden_size))
263
+ self.variance_epsilon = eps
264
+
265
+ def forward(self, hidden_states):
266
+ input_dtype = hidden_states.dtype
267
+ hidden_states = hidden_states.to(torch.float32)
268
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
269
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
270
+ return self.weight * hidden_states.to(input_dtype)
271
+
272
+ def extra_repr(self):
273
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
274
+
275
+
276
+ class IvyDecoderLayer(nn.Module):
277
+ def __init__(self, config: IvyConfig, layer_idx: int):
278
+ super().__init__()
279
+ self.hidden_size = config.hidden_size
280
+ self.self_attn = IvyAttention(config=config, layer_idx=layer_idx)
281
+ self.mlp = IvyMLP(config)
282
+ self.input_layernorm = IvyRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
283
+ self.post_attention_layernorm = IvyRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
284
+ if config.sliding_window and config._attn_implementation != "flash_attention_2":
285
+ logger.warning_once(
286
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
287
+ "unexpected results may be encountered."
288
+ )
289
+
290
+ def forward(
291
+ self,
292
+ hidden_states: torch.Tensor,
293
+ attention_mask: Optional[torch.Tensor] = None,
294
+ position_ids: Optional[torch.LongTensor] = None,
295
+ past_key_value: Optional[Cache] = None,
296
+ output_attentions: Optional[bool] = False,
297
+ use_cache: Optional[bool] = False,
298
+ cache_position: Optional[torch.LongTensor] = None,
299
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
300
+ **kwargs: Unpack[FlashAttentionKwargs],
301
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
302
+ residual = hidden_states
303
+
304
+ hidden_states = self.input_layernorm(hidden_states)
305
+
306
+ # Self Attention
307
+ hidden_states, self_attn_weights = self.self_attn(
308
+ hidden_states=hidden_states,
309
+ attention_mask=attention_mask,
310
+ position_ids=position_ids,
311
+ past_key_value=past_key_value,
312
+ output_attentions=output_attentions,
313
+ use_cache=use_cache,
314
+ cache_position=cache_position,
315
+ position_embeddings=position_embeddings,
316
+ **kwargs,
317
+ )
318
+ hidden_states = residual + hidden_states
319
+
320
+ # Fully Connected
321
+ residual = hidden_states
322
+ hidden_states = self.post_attention_layernorm(hidden_states)
323
+ hidden_states = self.mlp(hidden_states)
324
+ hidden_states = residual + hidden_states
325
+
326
+ outputs = (hidden_states,)
327
+ if output_attentions:
328
+ outputs += (self_attn_weights,)
329
+
330
+ return outputs
331
+
332
+
333
+ class IvyRotaryEmbedding(nn.Module):
334
+ def __init__(self, config: IvyConfig, device=None):
335
+ super().__init__()
336
+ # BC: "rope_type" was originally "type"
337
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
338
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
339
+ else:
340
+ self.rope_type = "default"
341
+ self.max_seq_len_cached = config.max_position_embeddings
342
+ self.original_max_seq_len = config.max_position_embeddings
343
+
344
+ self.config = config
345
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
346
+
347
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
348
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
349
+ self.original_inv_freq = self.inv_freq
350
+
351
+ def _dynamic_frequency_update(self, position_ids, device):
352
+ """
353
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
354
+ 1 - growing beyond the cached sequence length (allow scaling)
355
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
356
+ """
357
+ seq_len = torch.max(position_ids) + 1
358
+ if seq_len > self.max_seq_len_cached: # growth
359
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
360
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
361
+ self.max_seq_len_cached = seq_len
362
+
363
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
364
+ # This .to() is needed if the model has been moved to a device after being initialized (because
365
+ # the buffer is automatically moved, but not the original copy)
366
+ self.original_inv_freq = self.original_inv_freq.to(device)
367
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
368
+ self.max_seq_len_cached = self.original_max_seq_len
369
+
370
+ @torch.no_grad()
371
+ def forward(self, x, position_ids):
372
+ if "dynamic" in self.rope_type:
373
+ self._dynamic_frequency_update(position_ids, device=x.device)
374
+
375
+ # Core RoPE block
376
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
377
+ position_ids_expanded = position_ids[:, None, :].float()
378
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
379
+ device_type = x.device.type
380
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
381
+ with torch.autocast(device_type=device_type, enabled=False):
382
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
383
+ emb = torch.cat((freqs, freqs), dim=-1)
384
+ cos = emb.cos()
385
+ sin = emb.sin()
386
+
387
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
388
+ cos = cos * self.attention_scaling
389
+ sin = sin * self.attention_scaling
390
+
391
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
392
+
393
+
394
+ class IvyPreTrainedModel(PreTrainedModel):
395
+ config_class = IvyConfig
396
+ base_model_prefix = "model"
397
+ supports_gradient_checkpointing = True
398
+ _no_split_modules = ["IvyDecoderLayer"]
399
+ _skip_keys_device_placement = ["past_key_values"]
400
+ _supports_flash_attn_2 = True
401
+ _supports_sdpa = True
402
+ _supports_flex_attn = True
403
+ _supports_cache_class = True
404
+ _supports_quantized_cache = True
405
+ _supports_static_cache = True
406
+ _supports_attention_backend = True
407
+
408
+ def _init_weights(self, module):
409
+ std = self.config.initializer_range
410
+ if isinstance(module, nn.Linear):
411
+ module.weight.data.normal_(mean=0.0, std=std)
412
+ if module.bias is not None:
413
+ module.bias.data.zero_()
414
+ elif isinstance(module, nn.Embedding):
415
+ module.weight.data.normal_(mean=0.0, std=std)
416
+ if module.padding_idx is not None:
417
+ module.weight.data[module.padding_idx].zero_()
418
+
419
+
420
+ class Ivy4RL(IvyPreTrainedModel):
421
+ def __init__(self, config: IvyConfig):
422
+ super().__init__(config)
423
+ self.padding_idx = config.pad_token_id
424
+
425
+ self.embed_tokens = nn.Linear(config.obs_size, self.config.hidden_size)
426
+ self.layers = nn.ModuleList(
427
+ [IvyDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
428
+ )
429
+ self.norm = IvyRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
430
+ self.rotary_emb = IvyRotaryEmbedding(config=config)
431
+
432
+ self.rl_head = nn.Linear(config.hidden_size, self.config.act_size)
433
+
434
+ self.gradient_checkpointing = False
435
+
436
+ # Initialize weights and apply final processing
437
+ self.post_init()
438
+
439
+ def get_input_embeddings(self):
440
+ return self.embed_tokens
441
+
442
+ def set_input_embeddings(self, value):
443
+ self.embed_tokens = value
444
+
445
+ def forward(
446
+ self,
447
+ input_ids: torch.LongTensor = None,
448
+ attention_mask: Optional[torch.Tensor] = None,
449
+ position_ids: Optional[torch.LongTensor] = None,
450
+ past_key_values: Optional[Cache] = None,
451
+ inputs_embeds: Optional[torch.FloatTensor] = None,
452
+ use_cache: Optional[bool] = None,
453
+ output_attentions: Optional[bool] = None,
454
+ output_hidden_states: Optional[bool] = None,
455
+ return_dict: Optional[bool] = None,
456
+ cache_position: Optional[torch.LongTensor] = None,
457
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
458
+ ):
459
+
460
+ use_cache = False # Yeah
461
+
462
+ inputs_embeds = self.embed_tokens(inputs_embeds) # Yeah, ikr
463
+
464
+ if use_cache and past_key_values is None:
465
+ past_key_values = DynamicCache()
466
+
467
+ if cache_position is None:
468
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
469
+ cache_position = torch.arange(
470
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
471
+ )
472
+
473
+ if position_ids is None:
474
+ position_ids = cache_position.unsqueeze(0)
475
+
476
+ causal_mask = self._update_causal_mask(
477
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
478
+ )
479
+
480
+ hidden_states = inputs_embeds
481
+
482
+ # create position embeddings to be shared across the decoder layers
483
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
484
+
485
+ # decoder layers
486
+ all_hidden_states = () if output_hidden_states else None
487
+ all_self_attns = () if output_attentions else None
488
+
489
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
490
+ if output_hidden_states:
491
+ all_hidden_states += (hidden_states,)
492
+
493
+ if self.gradient_checkpointing and self.training:
494
+ layer_outputs = self._gradient_checkpointing_func(
495
+ decoder_layer.__call__,
496
+ hidden_states,
497
+ causal_mask,
498
+ position_ids,
499
+ past_key_values,
500
+ output_attentions,
501
+ use_cache,
502
+ cache_position,
503
+ position_embeddings,
504
+ )
505
+ else:
506
+ layer_outputs = decoder_layer(
507
+ hidden_states,
508
+ attention_mask=causal_mask,
509
+ position_ids=position_ids,
510
+ past_key_value=past_key_values,
511
+ output_attentions=output_attentions,
512
+ use_cache=use_cache,
513
+ cache_position=cache_position,
514
+ position_embeddings=position_embeddings,
515
+ **flash_attn_kwargs,
516
+ )
517
+
518
+ hidden_states = layer_outputs[0]
519
+
520
+ if output_attentions:
521
+ all_self_attns += (layer_outputs[1],)
522
+
523
+ hidden_states = self.norm(hidden_states)
524
+
525
+ logits = self.rl_head(hidden_states[:, -1:, :])
526
+ logits = logits.squeeze(1)
527
+
528
+ return logits
529
+
530
+ def _update_causal_mask(
531
+ self,
532
+ attention_mask: torch.Tensor,
533
+ input_tensor: torch.Tensor,
534
+ cache_position: torch.Tensor,
535
+ past_key_values: Cache,
536
+ output_attentions: bool,
537
+ ):
538
+ if self.config._attn_implementation == "flash_attention_2":
539
+ if attention_mask is not None and (attention_mask == 0.0).any():
540
+ return attention_mask
541
+ return None
542
+
543
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
544
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
545
+ # to infer the attention mask.
546
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
547
+ using_static_cache = isinstance(past_key_values, StaticCache)
548
+
549
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
550
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
551
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
552
+ attention_mask,
553
+ inputs_embeds=input_tensor,
554
+ past_key_values_length=past_seen_tokens,
555
+ is_training=self.training,
556
+ ):
557
+ return None
558
+
559
+ dtype, device = input_tensor.dtype, input_tensor.device
560
+ sequence_length = input_tensor.shape[1]
561
+ if using_static_cache:
562
+ target_length = past_key_values.get_max_cache_shape()
563
+ else:
564
+ target_length = (
565
+ attention_mask.shape[-1]
566
+ if isinstance(attention_mask, torch.Tensor)
567
+ else past_seen_tokens + sequence_length + 1
568
+ )
569
+
570
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
571
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
572
+ attention_mask,
573
+ sequence_length=sequence_length,
574
+ target_length=target_length,
575
+ dtype=dtype,
576
+ device=device,
577
+ cache_position=cache_position,
578
+ batch_size=input_tensor.shape[0],
579
+ )
580
+
581
+ if (
582
+ self.config._attn_implementation == "sdpa"
583
+ and attention_mask is not None
584
+ and attention_mask.device.type == "cuda"
585
+ and not output_attentions
586
+ ):
587
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
588
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
589
+ # Details: https://github.com/pytorch/pytorch/issues/110213
590
+ min_dtype = torch.finfo(dtype).min
591
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
592
+
593
+ return causal_mask
594
+
595
+ @staticmethod
596
+ def _prepare_4d_causal_attention_mask_with_cache_position(
597
+ attention_mask: torch.Tensor,
598
+ sequence_length: int,
599
+ target_length: int,
600
+ dtype: torch.dtype,
601
+ device: torch.device,
602
+ cache_position: torch.Tensor,
603
+ batch_size: int,
604
+ **kwargs,
605
+ ):
606
+ if attention_mask is not None and attention_mask.dim() == 4:
607
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
608
+ causal_mask = attention_mask
609
+ else:
610
+ min_dtype = torch.finfo(dtype).min
611
+ causal_mask = torch.full(
612
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
613
+ )
614
+ if sequence_length != 1:
615
+ causal_mask = torch.triu(causal_mask, diagonal=1)
616
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
617
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
618
+ if attention_mask is not None:
619
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
620
+ mask_length = attention_mask.shape[-1]
621
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
622
+ padding_mask = padding_mask == 0
623
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
624
+ padding_mask, min_dtype
625
+ )
626
+
627
+ return causal_mask