exdysa commited on
Commit
00b7d08
·
verified ·
1 Parent(s): c6125f6

Delete modeling_sdar.py

Browse files
Files changed (1) hide show
  1. modeling_sdar.py +0 -895
modeling_sdar.py DELETED
@@ -1,895 +0,0 @@
1
- # SPDX-License-Identifier: MIT
2
- # Adapted from https://huggingface.co/Gen-Verse/TraDo-8B-Instruct/blob/main/modeling_sdar.py
3
- # This file is modified based on https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/qwen3/modeling_qwen3.py.
4
- #
5
-
6
- # coding=utf-8
7
- # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
-
21
- from typing import Callable, Optional, Tuple, Union
22
-
23
- from nnll.init_gpu import device
24
- import torch
25
- from torch import nn
26
- import torch.nn.functional as F
27
- from transformers.activations import ACT2FN
28
- from transformers.cache_utils import (
29
- Cache,
30
- DynamicCache,
31
- SlidingWindowCache,
32
- StaticCache,
33
- )
34
- from transformers.generation import GenerationMixin
35
- from transformers.integrations import use_kernel_forward_from_hub
36
- from transformers.modeling_attn_mask_utils import AttentionMaskConverter
37
- from transformers.modeling_layers import GradientCheckpointingLayer
38
- from transformers.modeling_outputs import (
39
- BaseModelOutputWithPast,
40
- CausalLMOutputWithPast,
41
- QuestionAnsweringModelOutput,
42
- SequenceClassifierOutputWithPast,
43
- TokenClassifierOutput,
44
- )
45
- from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
46
- from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
47
- from transformers.processing_utils import Unpack
48
- from transformers.utils import (
49
- auto_docstring,
50
- can_return_tuple,
51
- is_torch_flex_attn_available,
52
- logging,
53
- )
54
-
55
- from divisor.trado.configuration_sdar import SDARConfig
56
-
57
- logger = logging.get_logger(__name__)
58
-
59
- # Make FlashAttentionKwargs available for all devices (used in type hints)
60
- try:
61
- from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
62
- except ImportError:
63
- # Fallback if not available
64
- from typing import TypedDict
65
-
66
- FlashAttentionKwargs = TypedDict("FlashAttentionKwargs", {})
67
-
68
- # Conditionally import flash attention components (CUDA only)
69
- flash_rms_norm = None
70
- flash_attn_func = None
71
- flash_attn_varlen_func = None
72
- index_first_axis = None
73
- pad_input = None
74
- unpad_input = None
75
-
76
- if device.type == "cuda":
77
- try:
78
- from flash_attn.ops.triton.layer_norm import rms_norm_fn as flash_rms_norm
79
- except (ImportError, ModuleNotFoundError):
80
- logger.warning("Flash attention RMS norm not available. Falling back to standard implementation.")
81
- flash_rms_norm = None
82
-
83
- try:
84
- from flash_attn import flash_attn_func, flash_attn_varlen_func
85
- from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
86
- except (ImportError, ModuleNotFoundError):
87
- logger.warning("Flash attention not available. Falling back to standard attention.")
88
- flash_attn_func = None
89
- flash_attn_varlen_func = None
90
-
91
- if is_torch_flex_attn_available():
92
- try:
93
- from torch.nn.attention.flex_attention import BlockMask, create_block_mask, flex_attention
94
- from transformers.integrations.flex_attention import make_flex_block_causal_mask
95
- except ImportError:
96
- pass
97
-
98
- try:
99
- from liger_kernel.ops.swiglu import LigerSiLUMulFunction # noqa: F401
100
-
101
- liger_kernel_is_available = True
102
- except ImportError:
103
- liger_kernel_is_available = False
104
-
105
-
106
- @use_kernel_forward_from_hub("RMSNorm")
107
- class SDARRMSNorm(nn.Module):
108
- def __init__(self, hidden_size, eps=1e-6):
109
- """
110
- SDARRMSNorm is equivalent to T5LayerNorm
111
- """
112
- super().__init__()
113
- self.weight = nn.Parameter(torch.ones(hidden_size))
114
- self.variance_epsilon = eps
115
-
116
- def forward(self, hidden_states):
117
- # Use flash RMS norm if available (CUDA only), otherwise fall back to standard implementation
118
- if flash_rms_norm is not None and hidden_states.device.type == "cuda":
119
- try:
120
- return flash_rms_norm(hidden_states, weight=self.weight, bias=None, eps=self.variance_epsilon)
121
- except Exception as e:
122
- logger.warning(f"Flash RMS norm failed ({e}). Falling back to standard implementation.")
123
- # Fall through to standard implementation
124
-
125
- # Standard RMS norm implementation (fallback for MPS, CPU, or when flash_rms_norm fails)
126
- input_dtype = hidden_states.dtype
127
- hidden_states = hidden_states.to(torch.float32)
128
- variance = hidden_states.pow(2).mean(-1, keepdim=True)
129
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
130
- return self.weight * hidden_states.to(input_dtype)
131
-
132
- def extra_repr(self):
133
- return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
134
-
135
-
136
- class SDARMLP(nn.Module):
137
- def __init__(self, config):
138
- super().__init__()
139
- self.config = config
140
- self.hidden_size = config.hidden_size
141
- self.intermediate_size = config.intermediate_size
142
- self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
143
- self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
144
- self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
145
- self.act_fn = ACT2FN[config.hidden_act]
146
-
147
- def forward(self, x):
148
- if liger_kernel_is_available:
149
- return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
150
- else:
151
- down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
152
- return down_proj
153
-
154
-
155
- def rotate_half(x):
156
- """Rotates half the hidden dims of the input."""
157
- x1 = x[..., : x.shape[-1] // 2]
158
- x2 = x[..., x.shape[-1] // 2 :]
159
- return torch.cat((-x2, x1), dim=-1)
160
-
161
-
162
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
163
- """Applies Rotary Position Embedding to the query and key tensors.
164
- Args:
165
- q (`torch.Tensor`): The query tensor.
166
- k (`torch.Tensor`): The key tensor.
167
- cos (`torch.Tensor`): The cosine part of the rotary embedding.
168
- sin (`torch.Tensor`): The sine part of the rotary embedding.
169
- position_ids (`torch.Tensor`, *optional*):
170
- Deprecated and unused.
171
- unsqueeze_dim (`int`, *optional*, defaults to 1):
172
- The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
173
- sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
174
- that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
175
- k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
176
- cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
177
- the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
178
- Returns:
179
- `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
180
- """
181
- cos = cos.unsqueeze(unsqueeze_dim)
182
- sin = sin.unsqueeze(unsqueeze_dim)
183
- q_embed = (q * cos) + (rotate_half(q) * sin)
184
- k_embed = (k * cos) + (rotate_half(k) * sin)
185
- return q_embed, k_embed
186
-
187
-
188
- def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
189
- """
190
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
191
- num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
192
- """
193
- batch, num_key_value_heads, slen, head_dim = hidden_states.shape
194
- if n_rep == 1:
195
- return hidden_states
196
- hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
197
- return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
198
-
199
-
200
- def eager_attention_forward(
201
- module: nn.Module,
202
- query: torch.Tensor,
203
- key: torch.Tensor,
204
- value: torch.Tensor,
205
- attention_mask: Optional[torch.Tensor],
206
- scaling: float,
207
- dropout: float = 0.0,
208
- **kwargs,
209
- ):
210
- key_states = repeat_kv(key, module.num_key_value_groups)
211
- value_states = repeat_kv(value, module.num_key_value_groups)
212
-
213
- attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
214
- if attention_mask is not None:
215
- causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
216
- attn_weights = attn_weights + causal_mask
217
-
218
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
219
- attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
220
- attn_output = torch.matmul(attn_weights, value_states)
221
- attn_output = attn_output.transpose(1, 2).contiguous()
222
-
223
- return attn_output, attn_weights
224
-
225
-
226
- class SDARAttention(nn.Module):
227
- """Multi-headed attention from 'Attention Is All You Need' paper"""
228
-
229
- def __init__(self, config: SDARConfig, layer_idx: int):
230
- super().__init__()
231
- self.config = config
232
- self.layer_idx = layer_idx
233
- self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
234
- self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
235
- self.scaling = self.head_dim**-0.5
236
- self.attention_dropout = config.attention_dropout
237
- self.is_causal = True
238
-
239
- self.hidden_size = config.hidden_size
240
- self.num_attention_heads = config.num_attention_heads
241
- self.num_key_value_heads = config.num_key_value_heads
242
-
243
- self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
244
- self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
245
- self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
246
- self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
247
- # unlike olmo, only on the head dim!
248
- self.q_norm = SDARRMSNorm(self.head_dim, eps=config.rms_norm_eps)
249
- # thus post q_norm does not need reshape
250
- self.k_norm = SDARRMSNorm(self.head_dim, eps=config.rms_norm_eps)
251
- self.sliding_window = config.sliding_window
252
- if not (self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers):
253
- self.sliding_window = None
254
-
255
- def forward(
256
- self,
257
- hidden_states: torch.Tensor,
258
- position_embeddings: Tuple[torch.Tensor, torch.Tensor],
259
- attention_mask: Optional[torch.Tensor],
260
- past_key_value: Optional[Cache] = None,
261
- cache_position: Optional[torch.LongTensor] = None,
262
- **kwargs: Unpack[FlashAttentionKwargs],
263
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
264
- input_shape = hidden_states.shape[:-1]
265
- bsz, q_len = input_shape
266
- hidden_shape = (*input_shape, -1, self.head_dim)
267
-
268
- query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
269
- key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
270
- value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
271
-
272
- cos, sin = position_embeddings
273
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
274
-
275
- if past_key_value is not None and kwargs.get("store_kv", False):
276
- # sin and cos are specific to RoPE models; cache_position needed for the static cache
277
- key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
278
- elif past_key_value is not None and not kwargs.get("store_kv", False) and len(past_key_value) > self.layer_idx:
279
- # only retrive, do not store kv
280
- past_key_states, past_value_states = past_key_value[self.layer_idx]
281
- key_states = torch.cat([past_key_states, key_states], dim=-2)
282
- value_states = torch.cat([past_value_states, value_states], dim=-2)
283
-
284
- """
285
- attention_mask = attention_mask.bool() if attention_mask is not None else None
286
- if torch.all(attention_mask): # decoding
287
- query_states = query_states.transpose(1, 2)
288
- key_states = key_states.transpose(1, 2)
289
- value_states = value_states.transpose(1, 2)
290
- attn_output = flash_attn_func(
291
- query_states,
292
- key_states,
293
- value_states,
294
- causal=False,
295
- softmax_scale=self.scaling
296
- )
297
-
298
- else: # prefilling
299
- attn_output = F.scaled_dot_product_attention(
300
- query=query_states,
301
- key=key_states,
302
- value=value_states,
303
- attn_mask=attention_mask,
304
- is_causal=False,
305
- scale=self.scaling,
306
- enable_gqa=True
307
- )
308
- attn_output = attn_output.transpose(1, 2).contiguous()
309
- """
310
-
311
- # print(query_states.shape, key_states.shape, value_states.shape)
312
-
313
- # --- After RoPE and KV-cache handling, expand KV to all heads ---
314
- key_states = repeat_kv(key_states, self.num_key_value_groups) # [B, H, K, D]
315
- value_states = repeat_kv(value_states, self.num_key_value_groups) # [B, H, K, D]
316
-
317
- # --- Convert a 0/1 or bool 4D mask into an *additive* mask, and align to [B, H, Q, K] ---
318
- attn_mask = None
319
- if attention_mask is not None:
320
- k_len = key_states.shape[-2]
321
- am = attention_mask
322
- # Support either 2D [B, K] or 4D [B, 1/H, Q, K]
323
- if am.dim() == 2:
324
- am = am[:, None, None, :k_len] # -> [B,1,1,K]
325
- else:
326
- am = am[:, :, :, :k_len] # -> [B,1/H,Q,K]
327
-
328
- finfo_min = torch.finfo(query_states.dtype).min
329
- # 0/1 or bool -> float additive mask: 1->0, 0->-inf
330
- if am.dtype == torch.bool:
331
- zero = torch.zeros((), dtype=query_states.dtype, device=am.device)
332
- neginf = torch.full((), finfo_min, dtype=query_states.dtype, device=am.device)
333
- am = torch.where(am, zero, neginf)
334
- else:
335
- # For 0/1 float masks: values > 0 are treated as visible
336
- am = am.to(query_states.dtype)
337
- am = torch.where(am > 0, torch.zeros_like(am), torch.full_like(am, finfo_min))
338
-
339
- # Expand to all heads
340
- # if am.shape[1] == 1 and self.num_attention_heads > 1:
341
- # am = am.expand(am.shape[0], self.num_attention_heads, am.shape[2], am.shape[3])
342
-
343
- # attn_mask = am.contiguous()
344
- attn_mask = am
345
-
346
- bsz, q_len = input_shape
347
-
348
- if q_len == 1 and past_key_value is not None:
349
- # --- Decoding: try flash-attn if available (CUDA only), otherwise fall back to SDPA ---
350
- if flash_attn_func is not None and query_states.device.type == "cuda":
351
- try:
352
- q = query_states.transpose(1, 2) # [B,Q,H,D]
353
- k = key_states.transpose(1, 2)
354
- v = value_states.transpose(1, 2)
355
- attn_output = flash_attn_func(
356
- q,
357
- k,
358
- v,
359
- causal=True, # For decoding, explicitly set causal=True
360
- softmax_scale=self.scaling,
361
- )
362
- attn_output = attn_output.transpose(1, 2).contiguous()
363
- except Exception as e:
364
- logger.warning(f"Flash attention failed during decoding ({e}). Falling back to SDPA.")
365
- # Fall through to SDPA implementation below
366
- attn_output = F.scaled_dot_product_attention(
367
- query=query_states, # [B,H,Q,D]
368
- key=key_states, # [B,H,K,D]
369
- value=value_states, # [B,H,K,D]
370
- attn_mask=attn_mask, # float additive mask
371
- is_causal=False, # All constraints are already encoded in the mask
372
- scale=self.scaling,
373
- )
374
- attn_output = attn_output.transpose(1, 2).contiguous() # -> [B,Q,H,D]
375
- else:
376
- # Fallback to SDPA for MPS, CPU, or when flash_attn_func is not available
377
- attn_output = F.scaled_dot_product_attention(
378
- query=query_states, # [B,H,Q,D]
379
- key=key_states, # [B,H,K,D]
380
- value=value_states, # [B,H,K,D]
381
- attn_mask=attn_mask, # float additive mask
382
- is_causal=False, # All constraints are already encoded in the mask
383
- scale=self.scaling,
384
- )
385
- attn_output = attn_output.transpose(1, 2).contiguous() # -> [B,Q,H,D]
386
- else:
387
- attn_output = F.scaled_dot_product_attention(
388
- query=query_states, # [B,H,Q,D]
389
- key=key_states, # [B,H,K,D]
390
- value=value_states, # [B,H,K,D]
391
- attn_mask=attn_mask, # float additive mask
392
- is_causal=False, # All constraints are already encoded in the mask
393
- scale=self.scaling,
394
- )
395
- attn_output = attn_output.transpose(1, 2).contiguous() # -> [B,Q,H,D]
396
-
397
- attn_output = attn_output.reshape(*input_shape, -1).contiguous()
398
- attn_output = self.o_proj(attn_output)
399
- return attn_output, None # , attn_weights
400
-
401
-
402
- class SDARDecoderLayer(GradientCheckpointingLayer):
403
- def __init__(self, config: SDARConfig, layer_idx: int):
404
- super().__init__()
405
- self.hidden_size = config.hidden_size
406
- self.self_attn = SDARAttention(config=config, layer_idx=layer_idx)
407
- self.mlp = SDARMLP(config)
408
- self.input_layernorm = SDARRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
409
- self.post_attention_layernorm = SDARRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
410
- if config.sliding_window and config._attn_implementation != "flash_attention_2": # diff with Llama is this warning
411
- logger.warning_once(f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; unexpected results may be encountered.")
412
-
413
- def forward(
414
- self,
415
- hidden_states: torch.Tensor,
416
- attention_mask: Optional[torch.Tensor] = None,
417
- position_ids: Optional[torch.LongTensor] = None,
418
- past_key_value: Optional[Cache] = None,
419
- output_attentions: Optional[bool] = False,
420
- use_cache: Optional[bool] = False,
421
- store_kv: Optional[bool] = False,
422
- cache_position: Optional[torch.LongTensor] = None,
423
- # necessary, but kept here for BC
424
- position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
425
- **kwargs: Unpack[FlashAttentionKwargs],
426
- ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
427
- residual = hidden_states
428
- hidden_states = self.input_layernorm(hidden_states)
429
-
430
- # Self Attention
431
- hidden_states, self_attn_weights = self.self_attn(
432
- hidden_states=hidden_states,
433
- attention_mask=attention_mask,
434
- position_ids=position_ids,
435
- past_key_value=past_key_value,
436
- output_attentions=output_attentions,
437
- use_cache=use_cache,
438
- store_kv=store_kv,
439
- cache_position=cache_position,
440
- position_embeddings=position_embeddings,
441
- **kwargs,
442
- )
443
- hidden_states = residual + hidden_states
444
-
445
- # Fully Connected
446
- residual = hidden_states
447
- hidden_states = self.post_attention_layernorm(hidden_states)
448
- hidden_states = self.mlp(hidden_states)
449
- hidden_states = residual + hidden_states
450
-
451
- outputs = (hidden_states,)
452
- if output_attentions:
453
- outputs += (self_attn_weights,)
454
-
455
- return outputs
456
-
457
-
458
- @auto_docstring
459
- class SDARPreTrainedModel(PreTrainedModel):
460
- config_class = SDARConfig
461
- base_model_prefix = "model"
462
- supports_gradient_checkpointing = True
463
- _no_split_modules = ["SDARDecoderLayer"]
464
- _skip_keys_device_placement = ["past_key_values"]
465
- _supports_flash_attn_2 = True
466
- _supports_sdpa = True
467
- _supports_flex_attn = True
468
- _supports_cache_class = True
469
- _supports_quantized_cache = True
470
- _supports_static_cache = True
471
- _supports_attention_backend = True
472
-
473
- def _init_weights(self, module):
474
- std = self.config.initializer_range
475
- if isinstance(module, nn.Linear):
476
- module.weight.data.normal_(mean=0.0, std=std)
477
- if module.bias is not None:
478
- module.bias.data.zero_()
479
- elif isinstance(module, nn.Embedding):
480
- module.weight.data.normal_(mean=0.0, std=std)
481
- if module.padding_idx is not None:
482
- module.weight.data[module.padding_idx].zero_()
483
- elif isinstance(module, SDARRMSNorm):
484
- module.weight.data.fill_(1.0)
485
-
486
-
487
- class SDARRotaryEmbedding(nn.Module):
488
- def __init__(self, config: SDARConfig, device=None):
489
- super().__init__()
490
- # BC: "rope_type" was originally "type"
491
- if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
492
- self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
493
- else:
494
- self.rope_type = "default"
495
- self.max_seq_len_cached = config.max_position_embeddings
496
- self.original_max_seq_len = config.max_position_embeddings
497
-
498
- self.config = config
499
- self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
500
-
501
- inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
502
- self.register_buffer("inv_freq", inv_freq, persistent=False)
503
- self.original_inv_freq = self.inv_freq
504
-
505
- @torch.no_grad()
506
- # power user: used with advanced RoPE types (e.g. dynamic rope)
507
- @dynamic_rope_update
508
- def forward(self, x, position_ids):
509
- inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
510
- position_ids_expanded = position_ids[:, None, :].float()
511
-
512
- device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
513
- with torch.autocast(device_type=device_type, enabled=False): # Force float32
514
- freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
515
- emb = torch.cat((freqs, freqs), dim=-1)
516
- cos = emb.cos() * self.attention_scaling
517
- sin = emb.sin() * self.attention_scaling
518
-
519
- return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
520
-
521
-
522
- @auto_docstring
523
- class SDARModel(SDARPreTrainedModel):
524
- def __init__(self, config: SDARConfig):
525
- super().__init__(config)
526
- self.padding_idx = config.pad_token_id
527
- self.vocab_size = config.vocab_size
528
-
529
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
530
- self.layers = nn.ModuleList([SDARDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
531
- self.norm = SDARRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
532
- self.rotary_emb = SDARRotaryEmbedding(config=config)
533
- self.gradient_checkpointing = False
534
-
535
- # Initialize weights and apply final processing
536
- self.post_init()
537
-
538
- def get_input_embeddings(self):
539
- return self.embed_tokens
540
-
541
- def set_input_embeddings(self, value):
542
- self.embed_tokens = value
543
-
544
- @can_return_tuple
545
- @auto_docstring
546
- def forward(
547
- self,
548
- input_ids: Optional[torch.LongTensor] = None,
549
- attention_mask: Optional[torch.Tensor] = None,
550
- position_ids: Optional[torch.LongTensor] = None,
551
- past_key_values: Optional[Cache] = None,
552
- inputs_embeds: Optional[torch.FloatTensor] = None,
553
- use_cache: Optional[bool] = None,
554
- store_kv: Optional[bool] = None,
555
- output_attentions: Optional[bool] = None,
556
- output_hidden_states: Optional[bool] = None,
557
- cache_position: Optional[torch.LongTensor] = None,
558
- **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
559
- ) -> BaseModelOutputWithPast:
560
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
561
- output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
562
- use_cache = use_cache if use_cache is not None else self.config.use_cache
563
-
564
- if (input_ids is None) ^ (inputs_embeds is not None):
565
- raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
566
-
567
- if self.gradient_checkpointing and self.training and use_cache:
568
- logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.")
569
- use_cache = False
570
-
571
- # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
572
- if not isinstance(past_key_values, (type(None), Cache)):
573
- raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
574
-
575
- if inputs_embeds is None:
576
- inputs_embeds = self.embed_tokens(input_ids)
577
-
578
- if use_cache and past_key_values is None:
579
- past_key_values = DynamicCache()
580
-
581
- if cache_position is None:
582
- past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
583
- cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
584
-
585
- if position_ids is None:
586
- position_ids = cache_position.unsqueeze(0)
587
-
588
- # causal_mask = self._update_causal_mask(
589
- # attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
590
- # )
591
-
592
- hidden_states = inputs_embeds
593
-
594
- # create position embeddings to be shared across the decoder layers
595
- position_embeddings = self.rotary_emb(hidden_states, position_ids)
596
-
597
- # decoder layers
598
- all_hidden_states = () if output_hidden_states else None
599
- all_self_attns = () if output_attentions else None
600
-
601
- for decoder_layer in self.layers[: self.config.num_hidden_layers]:
602
- if output_hidden_states:
603
- all_hidden_states += (hidden_states,)
604
-
605
- layer_outputs = decoder_layer(
606
- hidden_states,
607
- attention_mask=attention_mask,
608
- position_ids=position_ids,
609
- past_key_value=past_key_values,
610
- output_attentions=output_attentions,
611
- use_cache=use_cache,
612
- store_kv=store_kv,
613
- cache_position=cache_position,
614
- position_embeddings=position_embeddings,
615
- **flash_attn_kwargs,
616
- )
617
-
618
- hidden_states = layer_outputs[0]
619
-
620
- if output_attentions:
621
- all_self_attns += (layer_outputs[1],)
622
-
623
- hidden_states = self.norm(hidden_states)
624
-
625
- # add hidden states from the last decoder layer
626
- if output_hidden_states:
627
- all_hidden_states += (hidden_states,)
628
-
629
- return BaseModelOutputWithPast(
630
- last_hidden_state=hidden_states,
631
- past_key_values=past_key_values if use_cache else None,
632
- hidden_states=all_hidden_states,
633
- attentions=all_self_attns,
634
- )
635
-
636
- def _update_causal_mask(
637
- self,
638
- attention_mask: Union[torch.Tensor, "BlockMask"],
639
- input_tensor: torch.Tensor,
640
- cache_position: torch.Tensor,
641
- past_key_values: Cache,
642
- output_attentions: bool = False,
643
- ):
644
- if self.config._attn_implementation == "flash_attention_2":
645
- if attention_mask is not None and past_key_values is not None:
646
- is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
647
- if is_padding_right:
648
- raise ValueError(
649
- "You are attempting to perform batched generation with padding_side='right'"
650
- " this may lead to unexpected behaviour for Flash Attention version of Qwen3. Make sure to "
651
- " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
652
- )
653
- if attention_mask is not None and 0.0 in attention_mask:
654
- return attention_mask
655
- return None
656
- if self.config._attn_implementation == "flex_attention":
657
- if isinstance(attention_mask, torch.Tensor):
658
- seq_len_q, seq_len_kv = attention_mask.shape
659
- assert seq_len_q == seq_len_kv, f"got {attention_mask.shape=}"
660
- attention_mask = create_block_mask(
661
- # 2d bool tensor, shape: [2*seqlen, 2*seqlen]
662
- lambda b, h, q_idx, kv_idx: attention_mask[q_idx, kv_idx],
663
- B=None,
664
- H=None,
665
- Q_LEN=seq_len_q,
666
- KV_LEN=seq_len_kv,
667
- )
668
- else:
669
- # Here we pass in flex mask computed externally
670
- assert isinstance(attention_mask, BlockMask)
671
- return attention_mask
672
-
673
- # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
674
- # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
675
- # to infer the attention mask.
676
- past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
677
- using_static_cache = isinstance(past_key_values, StaticCache)
678
- using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
679
-
680
- # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
681
- if self.config._attn_implementation == "sdpa" and not (using_static_cache or using_sliding_window_cache) and not output_attentions:
682
- if AttentionMaskConverter._ignore_causal_mask_sdpa(
683
- attention_mask,
684
- inputs_embeds=input_tensor,
685
- past_key_values_length=past_seen_tokens,
686
- sliding_window=self.config.sliding_window,
687
- is_training=self.training,
688
- ):
689
- return None
690
-
691
- dtype = input_tensor.dtype
692
- min_dtype = torch.finfo(dtype).min
693
- sequence_length = input_tensor.shape[1]
694
- # SlidingWindowCache or StaticCache
695
- if using_sliding_window_cache or using_static_cache:
696
- target_length = past_key_values.get_max_cache_shape()
697
- # DynamicCache or no cache
698
- else:
699
- target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
700
-
701
- # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
702
- causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
703
- attention_mask,
704
- sequence_length=sequence_length,
705
- target_length=target_length,
706
- dtype=dtype,
707
- cache_position=cache_position,
708
- batch_size=input_tensor.shape[0],
709
- config=self.config,
710
- past_key_values=past_key_values,
711
- )
712
-
713
- if self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu", "mps"] and not output_attentions:
714
- # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
715
- # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
716
- # Details: https://github.com/pytorch/pytorch/issues/110213
717
- causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
718
-
719
- return causal_mask
720
-
721
- @staticmethod
722
- def _prepare_4d_causal_attention_mask_with_cache_position(
723
- attention_mask: torch.Tensor,
724
- sequence_length: int,
725
- target_length: int,
726
- dtype: torch.dtype,
727
- cache_position: torch.Tensor,
728
- batch_size: int,
729
- config: SDARConfig,
730
- past_key_values: Cache,
731
- ):
732
- """
733
- Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
734
- `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
735
- Args:
736
- attention_mask (`torch.Tensor`):
737
- A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
738
- sequence_length (`int`):
739
- The sequence length being processed.
740
- target_length (`int`):
741
- The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
742
- dtype (`torch.dtype`):
743
- The dtype to use for the 4D attention mask.
744
- cache_position (`torch.Tensor`):
745
- Indices depicting the position of the input sequence tokens in the sequence.
746
- batch_size (`torch.Tensor`):
747
- Batch size.
748
- config (`SDARConfig`):
749
- The model's configuration class
750
- past_key_values (`Cache`):
751
- The cache class that is being used currently to generate
752
- """
753
- if attention_mask is not None and attention_mask.dim() == 4:
754
- # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
755
- causal_mask = attention_mask
756
- else:
757
- min_dtype = torch.finfo(dtype).min
758
- causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
759
- diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
760
- text_config = config.get_text_config()
761
- if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
762
- # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
763
- # the check is needed to verify is current checkpoint was trained with sliding window or not
764
- if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
765
- sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (cache_position.reshape(-1, 1) - text_config.sliding_window)
766
- diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
767
- causal_mask *= diagonal_attend_mask
768
- causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
769
- if attention_mask is not None:
770
- causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
771
- if attention_mask.shape[-1] > target_length:
772
- attention_mask = attention_mask[:, :target_length]
773
- mask_length = attention_mask.shape[-1]
774
- padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
775
- padding_mask = padding_mask == 0
776
- causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
777
- return causal_mask
778
-
779
-
780
- @auto_docstring
781
- class SDARForCausalLM(SDARPreTrainedModel, GenerationMixin):
782
- _tied_weights_keys = ["lm_head.weight"]
783
- _tp_plan = {"lm_head": "colwise_rep"}
784
- _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
785
-
786
- def __init__(self, config):
787
- super().__init__(config)
788
- self.model = SDARModel(config)
789
- self.vocab_size = config.vocab_size
790
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
791
-
792
- # Initialize weights and apply final processing
793
- self.post_init()
794
-
795
- def get_input_embeddings(self):
796
- return self.model.embed_tokens
797
-
798
- def set_input_embeddings(self, value):
799
- self.model.embed_tokens = value
800
-
801
- def get_output_embeddings(self):
802
- return self.lm_head
803
-
804
- def set_output_embeddings(self, new_embeddings):
805
- self.lm_head = new_embeddings
806
-
807
- def set_decoder(self, decoder):
808
- self.model = decoder
809
-
810
- def get_decoder(self):
811
- return self.model
812
-
813
- @can_return_tuple
814
- @auto_docstring
815
- def forward(
816
- self,
817
- input_ids: Optional[torch.LongTensor] = None,
818
- attention_mask: Optional[torch.Tensor] = None,
819
- position_ids: Optional[torch.LongTensor] = None,
820
- past_key_values: Optional[Cache] = None,
821
- inputs_embeds: Optional[torch.FloatTensor] = None,
822
- labels: Optional[torch.LongTensor] = None,
823
- use_cache: Optional[bool] = None,
824
- output_attentions: Optional[bool] = None,
825
- output_hidden_states: Optional[bool] = None,
826
- cache_position: Optional[torch.LongTensor] = None,
827
- logits_to_keep: Union[int, torch.Tensor] = 0,
828
- **kwargs: dict,
829
- ) -> CausalLMOutputWithPast:
830
- r"""
831
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
832
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
833
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
834
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
835
- Example:
836
- ```python
837
- >>> from transformers import AutoTokenizer, SDARForCausalLM
838
- >>> model = SDARForCausalLM.from_pretrained("DiffuOpen/SDAR-1.7B-Chat")
839
- >>> tokenizer = AutoTokenizer.from_pretrained("DiffuOpen/SDAR-1.7B-Chat")
840
- >>> prompt = "Hey, are you conscious? Can you talk to me?"
841
- >>> inputs = tokenizer(prompt, return_tensors="pt")
842
- >>> # Generate
843
- >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
844
- >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
845
- "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
846
- ```"""
847
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
848
- output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
849
-
850
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
851
- outputs: BaseModelOutputWithPast = self.model(
852
- input_ids=input_ids,
853
- attention_mask=attention_mask,
854
- position_ids=position_ids,
855
- past_key_values=past_key_values,
856
- inputs_embeds=inputs_embeds,
857
- use_cache=use_cache,
858
- output_attentions=output_attentions,
859
- output_hidden_states=output_hidden_states,
860
- cache_position=cache_position,
861
- **kwargs,
862
- )
863
-
864
- hidden_states = outputs.last_hidden_state
865
- # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
866
- slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
867
- hidden_states = hidden_states[:, slice_indices, :].contiguous()
868
- fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
869
- if fuse_linear_and_cross_entropy:
870
- # When using fused_linear_ce_loss, we do not compute the whole logits on HBM
871
- logits = None
872
- else:
873
- logits = self.lm_head(hidden_states)
874
-
875
- loss = None
876
- if labels is not None:
877
- # FusedLinearCrossEntropyLoss will be implemented by monkey patch when training
878
- # We don't use it when inferencing
879
- loss_fct = nn.CrossEntropyLoss() # nn.CE
880
- loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
881
-
882
- return CausalLMOutputWithPast(
883
- loss=loss,
884
- logits=logits,
885
- past_key_values=outputs.past_key_values,
886
- hidden_states=outputs.hidden_states,
887
- attentions=outputs.attentions,
888
- )
889
-
890
-
891
- __all__ = [
892
- "SDARForCausalLM",
893
- "SDARModel",
894
- "SDARPreTrainedModel",
895
- ]