cr0sh commited on
Commit
73f9100
·
verified ·
1 Parent(s): 991ee72

Upload modeling_afmoe.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_afmoe.py +680 -0
modeling_afmoe.py ADDED
@@ -0,0 +1,680 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional, Tuple, Union
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torch import nn
6
+
7
+ from transformers.activations import ACT2FN
8
+ from transformers.generation import GenerationMixin
9
+ from transformers.modeling_outputs import (
10
+ MoeCausalLMOutputWithPast,
11
+ MoeModelOutputWithPast,
12
+ )
13
+ from transformers.modeling_utils import PreTrainedModel, ALL_ATTENTION_FUNCTIONS
14
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
15
+ from transformers.masking_utils import (
16
+ create_causal_mask,
17
+ create_sliding_window_causal_mask,
18
+ )
19
+ from transformers.modeling_layers import GradientCheckpointingLayer
20
+ from transformers.processing_utils import Unpack
21
+ from transformers.utils import TransformersKwargs
22
+ from transformers.cache_utils import Cache, DynamicCache
23
+ from transformers.integrations import use_kernel_forward_from_hub
24
+
25
+
26
+ try:
27
+ from .configuration_afmoe import AfmoeConfig
28
+ except:
29
+ from configuration_afmoe import AfmoeConfig
30
+
31
+ class AfmoeRotaryEmbedding(nn.Module):
32
+
33
+ def __init__(self, config: AfmoeConfig, device=None):
34
+ super().__init__()
35
+ # BC: "rope_type" was originally "type"
36
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
37
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
38
+ else:
39
+ self.rope_type = "default"
40
+ self.max_seq_len_cached = config.max_position_embeddings
41
+ self.original_max_seq_len = config.max_position_embeddings
42
+
43
+ self.config = config
44
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
45
+
46
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
47
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
48
+ self.original_inv_freq = self.inv_freq
49
+
50
+ def _dynamic_frequency_update(self, position_ids, device):
51
+ """
52
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
53
+ 1 - growing beyond the cached sequence length (allow scaling)
54
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
55
+ """
56
+ seq_len = torch.max(position_ids) + 1
57
+ if seq_len > self.max_seq_len_cached: # growth
58
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
59
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
60
+ self.max_seq_len_cached = seq_len
61
+
62
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
63
+ # This .to() is needed if the model has been moved to a device after being initialized (because
64
+ # the buffer is automatically moved, but not the original copy)
65
+ self.original_inv_freq = self.original_inv_freq.to(device)
66
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
67
+ self.max_seq_len_cached = self.original_max_seq_len
68
+
69
+ @torch.no_grad()
70
+ def forward(self, x, position_ids):
71
+ if "dynamic" in self.rope_type:
72
+ self._dynamic_frequency_update(position_ids, device=x.device)
73
+
74
+ # Core RoPE block
75
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
76
+ position_ids_expanded = position_ids[:, None, :].float()
77
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
78
+ device_type = x.device.type
79
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
80
+ with torch.autocast(device_type=device_type, enabled=False):
81
+ freqs = (inv_freq_expanded.float().to(x.device) @ position_ids_expanded.float()).transpose(1, 2)
82
+ emb = torch.cat((freqs, freqs), dim=-1)
83
+ cos = emb.cos()
84
+ sin = emb.sin()
85
+
86
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
87
+ cos = cos * self.attention_scaling
88
+ sin = sin * self.attention_scaling
89
+
90
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
91
+
92
+
93
+ def rotate_half(x):
94
+ """Rotates half the hidden dims of the input."""
95
+ x1 = x[..., : x.shape[-1] // 2]
96
+ x2 = x[..., x.shape[-1] // 2 :]
97
+ return torch.cat((-x2, x1), dim=-1)
98
+
99
+
100
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
101
+ """Applies Rotary Position Embedding to the query and key tensors.
102
+
103
+ Args:
104
+ q (`torch.Tensor`): The query tensor.
105
+ k (`torch.Tensor`): The key tensor.
106
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
107
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
108
+ position_ids (`torch.Tensor`, *optional*):
109
+ Deprecated and unused.
110
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
111
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
112
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
113
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
114
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
115
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
116
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
117
+ Returns:
118
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
119
+ """
120
+ cos = cos.unsqueeze(unsqueeze_dim)
121
+ sin = sin.unsqueeze(unsqueeze_dim)
122
+ q_embed = (q * cos) + (rotate_half(q) * sin)
123
+ k_embed = (k * cos) + (rotate_half(k) * sin)
124
+ return q_embed, k_embed
125
+
126
+
127
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
128
+ """
129
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
130
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
131
+ """
132
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
133
+ if n_rep == 1:
134
+ return hidden_states
135
+ hidden_states = hidden_states[:, :, None, :, :].expand(
136
+ batch, num_key_value_heads, n_rep, slen, head_dim
137
+ )
138
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
139
+
140
+ @use_kernel_forward_from_hub("RMSNorm")
141
+ class AfmoeRMSNorm(nn.Module):
142
+ def __init__(self, hidden_size: int, eps: float):
143
+ """
144
+ AfmoeRMSNorm is equivalent to T5LayerNorm
145
+ """
146
+ super().__init__()
147
+ self.weight = nn.Parameter(torch.ones(hidden_size))
148
+ self.variance_epsilon = eps
149
+
150
+ def forward(self, hidden_states):
151
+ input_dtype = hidden_states.dtype
152
+ hidden_states = hidden_states.to(torch.float32)
153
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
154
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
155
+ return self.weight * hidden_states.to(input_dtype)
156
+
157
+ def extra_repr(self):
158
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
159
+
160
+
161
+
162
+ def eager_attention_forward(
163
+ module: nn.Module,
164
+ query: torch.Tensor,
165
+ key: torch.Tensor,
166
+ value: torch.Tensor,
167
+ attention_mask: Optional[torch.Tensor],
168
+ scaling: float,
169
+ dropout: float = 0.0,
170
+ **kwargs,
171
+ ):
172
+ key_states = repeat_kv(key, module.num_key_value_groups)
173
+ value_states = repeat_kv(value, module.num_key_value_groups)
174
+
175
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
176
+ if attention_mask is not None:
177
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
178
+ attn_weights = attn_weights + causal_mask
179
+
180
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
181
+ query.dtype
182
+ )
183
+ attn_weights = nn.functional.dropout(
184
+ attn_weights, p=dropout, training=module.training
185
+ )
186
+ attn_output = torch.matmul(attn_weights, value_states)
187
+ attn_output = attn_output.transpose(1, 2).contiguous()
188
+
189
+ return attn_output, attn_weights
190
+
191
+
192
+ class AfmoeMLP(nn.Module):
193
+ def __init__(self, config, intermediate_size=None):
194
+ super().__init__()
195
+ self.config = config
196
+ self.hidden_size = config.hidden_size
197
+ self.intermediate_size = intermediate_size or config.intermediate_size
198
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
199
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
200
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
201
+ self.act_fn = ACT2FN[config.hidden_act]
202
+
203
+ def forward(self, x):
204
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
205
+
206
+
207
+ class AfmoeTokenChoiceRouter(nn.Module):
208
+ """Token-choice top-K router for MoE routing."""
209
+
210
+ def __init__(self, config):
211
+ super().__init__()
212
+ self.config = config
213
+ self.top_k = config.num_experts_per_tok
214
+ self.num_experts = config.num_experts
215
+ self.score_func = config.score_func
216
+ self.route_norm = config.route_norm
217
+ self.route_scale = config.route_scale
218
+ self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
219
+
220
+ def forward(self, hidden_states, expert_bias: torch.Tensor | None):
221
+ _, _, hidden_dim = hidden_states.shape
222
+ hidden_states = hidden_states.view(-1, hidden_dim)
223
+
224
+ scores = self.gate(hidden_states)
225
+
226
+ # Apply scoring function in float32 for stability
227
+ if self.score_func == "sigmoid":
228
+ scores = torch.sigmoid(scores.to(torch.float32))
229
+ else:
230
+ scores = F.softmax(scores.to(torch.float32), dim=-1)
231
+
232
+ if expert_bias is not None:
233
+ _, selected_experts = torch.topk(scores + expert_bias, k=self.top_k, dim=1)
234
+ top_scores = scores.gather(dim=1, index=selected_experts)
235
+ else:
236
+ top_scores, selected_experts = torch.topk(scores, k=self.top_k, dim=1)
237
+
238
+ # Normalize weights if using sigmoid
239
+ if self.score_func == "sigmoid" and self.route_norm:
240
+ denominator = top_scores.sum(dim=-1, keepdim=True) + 1e-20
241
+ top_scores = top_scores / denominator
242
+
243
+ top_scores = top_scores * self.route_scale
244
+ return top_scores, selected_experts
245
+
246
+ class AfmoeMoE(nn.Module):
247
+ def __init__(self, config):
248
+ super().__init__()
249
+ self.config = config
250
+ self.router = AfmoeTokenChoiceRouter(config)
251
+
252
+ self.shared_experts = None
253
+ if config.num_shared_experts > 0:
254
+ self.shared_experts = AfmoeMLP(
255
+ config, config.moe_intermediate_size * config.num_shared_experts
256
+ )
257
+ self.experts = nn.ModuleList(
258
+ [AfmoeMLP(
259
+ config, intermediate_size=config.moe_intermediate_size
260
+ ) for _ in range(config.num_experts)]
261
+ )
262
+ self.expert_bias = nn.Parameter(torch.zeros(config.num_experts, dtype=torch.float32), requires_grad=False)
263
+
264
+
265
+ def forward(self, hidden_states):
266
+ batch_size, seq_len, hidden_dim = hidden_states.shape
267
+ hidden_states_flat = hidden_states.view(-1, hidden_dim)
268
+
269
+ # Get routing decisions
270
+ top_scores, selected_experts = self.router(hidden_states, self.expert_bias)
271
+
272
+ # Process through shared experts
273
+ if self.shared_experts is not None:
274
+ shared_output = self.shared_experts(hidden_states_flat)
275
+ else:
276
+ shared_output = torch.zeros_like(hidden_states_flat)
277
+
278
+ # Reorder tokens by expert for efficient processing
279
+ token_indices_sorted = torch.argsort(selected_experts.view(-1), stable=True)
280
+ top_scores_sorted = top_scores.view(-1)[token_indices_sorted]
281
+ token_to_expert = selected_experts.view(-1)[token_indices_sorted]
282
+ token_indices_sorted = token_indices_sorted // self.config.num_experts_per_tok
283
+
284
+ # Gather input tokens
285
+ token_indices_expanded = token_indices_sorted.unsqueeze(-1).expand(
286
+ -1, hidden_dim
287
+ )
288
+ routed_input = torch.gather(
289
+ hidden_states_flat, dim=0, index=token_indices_expanded
290
+ )
291
+
292
+ routed_output = torch.zeros_like(routed_input)
293
+ for expert_id in range(self.config.num_experts):
294
+ mask = token_to_expert == expert_id
295
+ if mask.any():
296
+ expert_input = routed_input[mask]
297
+ expert_out = self.experts[expert_id](expert_input)
298
+ routed_output[mask] = expert_out
299
+
300
+ routed_output = (
301
+ routed_output.to(torch.float32) * top_scores_sorted.unsqueeze(-1)
302
+ ).to(hidden_states.dtype)
303
+
304
+ # Scatter back to original positions
305
+ output = shared_output.scatter_add(
306
+ dim=0, index=token_indices_expanded, src=routed_output
307
+ )
308
+
309
+ return output.view(batch_size, seq_len, hidden_dim)
310
+
311
+
312
+ class AfmoeAttention(nn.Module):
313
+ """Multi-headed attention with local/global pattern and gating."""
314
+
315
+ def __init__(self, config: AfmoeConfig, layer_idx: int):
316
+ super().__init__()
317
+ self.config = config
318
+ self.layer_idx = layer_idx
319
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
320
+ self.num_heads = config.num_attention_heads
321
+ self.num_key_value_heads = config.num_key_value_heads
322
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
323
+
324
+ self.scaling = self.head_dim**-0.5
325
+ self.attention_dropout = config.attention_dropout
326
+ self.is_local_attention = config.layer_types[layer_idx] == "sliding_attention"
327
+ self.sliding_window = config.sliding_window if self.is_local_attention else None
328
+
329
+ self.q_proj = nn.Linear(
330
+ config.hidden_size, self.num_heads * self.head_dim, bias=False
331
+ )
332
+ self.k_proj = nn.Linear(
333
+ config.hidden_size, self.num_key_value_heads * self.head_dim, bias=False
334
+ )
335
+ self.v_proj = nn.Linear(
336
+ config.hidden_size, self.num_key_value_heads * self.head_dim, bias=False
337
+ )
338
+ self.o_proj = nn.Linear(
339
+ self.num_heads * self.head_dim, config.hidden_size, bias=False
340
+ )
341
+
342
+ self.q_norm = AfmoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
343
+ self.k_norm = AfmoeRMSNorm(self.head_dim, eps=config.rms_norm_eps)
344
+
345
+ self.gate_proj = nn.Linear(
346
+ config.hidden_size, self.num_heads * self.head_dim, bias=False
347
+ )
348
+
349
+ def forward(
350
+ self,
351
+ hidden_states: torch.Tensor,
352
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
353
+ attention_mask: Optional[torch.Tensor],
354
+ past_key_value: Optional[Cache] = None,
355
+ cache_position: Optional[torch.LongTensor] = None,
356
+ **kwargs: Unpack[TransformersKwargs],
357
+ ) -> torch.Tensor:
358
+
359
+ input_shape = hidden_states.shape[:-1]
360
+ hidden_shape = (*input_shape, -1, self.head_dim)
361
+
362
+ query_states = self.q_proj(hidden_states).view(hidden_shape)
363
+ key_states = self.k_proj(hidden_states).view(hidden_shape)
364
+ value_states = self.v_proj(hidden_states).view(hidden_shape)
365
+ gate_states = self.gate_proj(hidden_states)
366
+
367
+ query_states = self.q_norm(query_states)
368
+ key_states = self.k_norm(key_states)
369
+
370
+ query_states = query_states.transpose(1, 2)
371
+ key_states = key_states.transpose(1, 2)
372
+ value_states = value_states.transpose(1, 2)
373
+
374
+ if self.is_local_attention:
375
+ cos, sin = position_embeddings
376
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
377
+
378
+ if past_key_value is not None:
379
+ cache_kwargs = {"cache_position": cache_position}
380
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
381
+
382
+ attention_interface: Callable = eager_attention_forward
383
+ if self.config._attn_implementation != "eager":
384
+ attention_interface = ALL_ATTENTION_FUNCTIONS[
385
+ self.config._attn_implementation
386
+ ]
387
+
388
+ output, _ = attention_interface(
389
+ self,
390
+ query_states,
391
+ key_states,
392
+ value_states,
393
+ attention_mask=attention_mask,
394
+ dropout=0.0 if not self.training else self.attention_dropout,
395
+ scaling=self.scaling,
396
+ sliding_window=self.sliding_window,
397
+ **kwargs,
398
+ )
399
+
400
+ output = output.view(*input_shape, -1).contiguous()
401
+ output = output * F.sigmoid(gate_states)
402
+ return self.o_proj(output)
403
+
404
+
405
+ class AfmoeDecoderLayer(GradientCheckpointingLayer):
406
+ def __init__(self, config: AfmoeConfig, layer_idx: int):
407
+ super().__init__()
408
+ self.hidden_size = config.hidden_size
409
+ self.layer_idx = layer_idx
410
+
411
+ self.self_attn = AfmoeAttention(config=config, layer_idx=layer_idx)
412
+ self.attention_type = config.layer_types[layer_idx]
413
+
414
+ # Dual normalization for attention
415
+ self.input_layernorm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
416
+ self.post_attention_layernorm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
417
+
418
+ # Dual normalization for FFN
419
+ self.pre_mlp_layernorm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
420
+ self.post_mlp_layernorm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
421
+
422
+ # MoE or dense FFN
423
+ self.moe_enabled = layer_idx >= config.num_dense_layers
424
+ if self.moe_enabled:
425
+ self.mlp = AfmoeMoE(config)
426
+ else:
427
+ self.mlp = AfmoeMLP(config)
428
+
429
+ def forward(
430
+ self,
431
+ hidden_states: torch.Tensor,
432
+ attention_mask: Optional[torch.Tensor] = None,
433
+ position_ids: Optional[torch.LongTensor] = None,
434
+ past_key_value: Optional[Cache] = None,
435
+ use_cache: Optional[bool] = None,
436
+ cache_position: Optional[torch.LongTensor] = None,
437
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
438
+ **kwargs: Unpack[TransformersKwargs],
439
+ ) -> torch.FloatTensor:
440
+ residual = hidden_states
441
+
442
+ # Self Attention with dual normalization
443
+ hidden_states = self.input_layernorm(hidden_states)
444
+ hidden_states = self.self_attn(
445
+ hidden_states=hidden_states,
446
+ attention_mask=attention_mask,
447
+ position_ids=position_ids,
448
+ past_key_value=past_key_value,
449
+ use_cache=use_cache,
450
+ cache_position=cache_position,
451
+ position_embeddings=position_embeddings,
452
+ **kwargs,
453
+ )
454
+ hidden_states = self.post_attention_layernorm(hidden_states)
455
+ hidden_states = residual + hidden_states
456
+
457
+ # FFN with dual normalization
458
+ residual = hidden_states
459
+ hidden_states = self.pre_mlp_layernorm(hidden_states)
460
+
461
+ if self.moe_enabled:
462
+ hidden_states = self.mlp(hidden_states)
463
+ else:
464
+ hidden_states = self.mlp(hidden_states)
465
+
466
+ hidden_states = self.post_mlp_layernorm(hidden_states)
467
+ hidden_states = residual + hidden_states
468
+ return hidden_states
469
+
470
+
471
+ class AfmoePreTrainedModel(PreTrainedModel):
472
+ config_class = AfmoeConfig
473
+ base_model_prefix = "model"
474
+ _no_split_modules = ["AfmoeDecoderLayer"]
475
+ _skip_keys_device_placement = ["past_key_values"]
476
+ _keep_in_fp32_modules = [
477
+ "input_layernorm",
478
+ "post_attention_layernorm",
479
+ "pre_mlp_layernorm",
480
+ "post_mlp_layernorm",
481
+ "q_norm",
482
+ "k_norm",
483
+ "norm",
484
+ ]
485
+ _supports_sdpa = True
486
+ _supports_attention_backend = True
487
+ supports_gradient_checkpointing = True
488
+
489
+
490
+ class AfmoeModel(AfmoePreTrainedModel):
491
+ _no_split_modules = ["AfmoeDecoderLayer"]
492
+
493
+ def __init__(self, config: AfmoeConfig):
494
+ super().__init__(config)
495
+ self.padding_idx = config.pad_token_id
496
+ self.vocab_size = config.vocab_size
497
+
498
+ self.embed_tokens = nn.Embedding(
499
+ config.vocab_size, config.hidden_size, self.padding_idx
500
+ )
501
+ self.layers = nn.ModuleList(
502
+ [
503
+ AfmoeDecoderLayer(config, layer_idx)
504
+ for layer_idx in range(config.num_hidden_layers)
505
+ ]
506
+ )
507
+ self.norm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
508
+ self.rotary_emb = AfmoeRotaryEmbedding(config=config)
509
+ self.gradient_checkpointing = False
510
+
511
+ self.post_init()
512
+
513
+ def get_input_embeddings(self):
514
+ return self.embed_tokens
515
+
516
+ def set_input_embeddings(self, value):
517
+ self.embed_tokens = value
518
+
519
+
520
+ def forward(
521
+ self,
522
+ input_ids: torch.LongTensor,
523
+ attention_mask: Optional[torch.Tensor] = None,
524
+ position_ids: Optional[torch.LongTensor] = None,
525
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
526
+ inputs_embeds: Optional[torch.FloatTensor] = None,
527
+ use_cache: Optional[bool] = None,
528
+ cache_position: Optional[torch.LongTensor] = None,
529
+ **kwargs: Unpack[TransformersKwargs],
530
+ ) -> MoeModelOutputWithPast:
531
+ if (input_ids is None) ^ (inputs_embeds is not None):
532
+ raise ValueError(
533
+ "You must specify exactly one of input_ids or inputs_embeds"
534
+ )
535
+
536
+ if use_cache and past_key_values is None:
537
+ past_key_values = DynamicCache()
538
+
539
+ if inputs_embeds is None:
540
+ inputs_embeds = self.embed_tokens(input_ids)
541
+
542
+ if cache_position is None:
543
+ past_seen_tokens = (
544
+ past_key_values.get_seq_length() if past_key_values is not None else 0
545
+ )
546
+ cache_position = torch.arange(
547
+ past_seen_tokens,
548
+ past_seen_tokens + inputs_embeds.shape[1],
549
+ device=inputs_embeds.device,
550
+ )
551
+ if position_ids is None:
552
+ position_ids = cache_position.unsqueeze(0)
553
+
554
+ # It may already have been prepared by e.g. `generate`
555
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
556
+ mask_kwargs = {
557
+ "config": self.config,
558
+ "input_embeds": inputs_embeds,
559
+ "attention_mask": attention_mask,
560
+ "cache_position": cache_position,
561
+ "past_key_values": past_key_values,
562
+ }
563
+ causal_mask_mapping = {
564
+ "full_attention": create_causal_mask(**mask_kwargs),
565
+ "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
566
+ }
567
+
568
+ hidden_states = inputs_embeds
569
+
570
+ # Apply muP input scaling if enabled
571
+ if self.config.mup_enabled:
572
+ hidden_states = hidden_states * (self.config.hidden_size**0.5)
573
+
574
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
575
+
576
+ for decoder_layer in self.layers:
577
+ hidden_states = decoder_layer(
578
+ hidden_states,
579
+ attention_mask=causal_mask_mapping[decoder_layer.attention_type],
580
+ position_ids=position_ids,
581
+ past_key_value=past_key_values,
582
+ use_cache=use_cache,
583
+ cache_position=cache_position,
584
+ position_embeddings=position_embeddings,
585
+ **kwargs,
586
+ )
587
+
588
+ hidden_states = self.norm(hidden_states)
589
+ return MoeModelOutputWithPast(
590
+ last_hidden_state=hidden_states,
591
+ past_key_values=past_key_values,
592
+ )
593
+
594
+
595
+ class AfmoeForCausalLM(AfmoePreTrainedModel, GenerationMixin):
596
+ _tied_weights_keys = ["lm_head.weight"]
597
+ _tp_plan = {"lm_head": "colwise_rep"}
598
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
599
+
600
+ def __init__(self, config):
601
+ super().__init__(config)
602
+ self.model = AfmoeModel(config)
603
+ self.vocab_size = config.vocab_size
604
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
605
+
606
+ # Initialize weights and apply final processing
607
+ self.post_init()
608
+
609
+ def get_input_embeddings(self):
610
+ return self.model.embed_tokens
611
+
612
+ def set_input_embeddings(self, value):
613
+ self.model.embed_tokens = value
614
+
615
+ def get_output_embeddings(self):
616
+ return self.lm_head
617
+
618
+ def set_output_embeddings(self, new_embeddings):
619
+ self.lm_head = new_embeddings
620
+
621
+ def set_decoder(self, decoder):
622
+ self.model = decoder
623
+
624
+ def get_decoder(self):
625
+ return self.model
626
+
627
+ def forward(
628
+ self,
629
+ input_ids: torch.LongTensor,
630
+ attention_mask: Optional[torch.Tensor] = None,
631
+ position_ids: Optional[torch.LongTensor] = None,
632
+ past_key_values: Optional[Cache] = None,
633
+ inputs_embeds: Optional[torch.FloatTensor] = None,
634
+ labels: Optional[torch.LongTensor] = None,
635
+ use_cache: Optional[bool] = None,
636
+ cache_position: Optional[torch.LongTensor] = None,
637
+ logits_to_keep: Union[int, torch.Tensor] = 0,
638
+ token_type_ids: Optional[torch.Tensor] = None, # will be ignored
639
+ **kwargs: Unpack[TransformersKwargs],
640
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
641
+ outputs: MoeModelOutputWithPast = self.model(
642
+ input_ids=input_ids,
643
+ attention_mask=attention_mask,
644
+ position_ids=position_ids,
645
+ past_key_values=past_key_values,
646
+ inputs_embeds=inputs_embeds,
647
+ use_cache=use_cache,
648
+ cache_position=cache_position,
649
+ **kwargs,
650
+ )
651
+
652
+ hidden_states = outputs.last_hidden_state
653
+ # Only compute necessary logits
654
+ slice_indices = (
655
+ slice(-logits_to_keep, None)
656
+ if isinstance(logits_to_keep, int)
657
+ else logits_to_keep
658
+ )
659
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
660
+
661
+ loss = None
662
+ if labels is not None:
663
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
664
+
665
+
666
+ return MoeCausalLMOutputWithPast(
667
+ loss=loss,
668
+ logits=logits,
669
+ past_key_values=outputs.past_key_values,
670
+ hidden_states=outputs.hidden_states,
671
+ attentions=outputs.attentions,
672
+ router_logits=outputs.router_logits,
673
+ )
674
+
675
+
676
+ __all__ = [
677
+ "AfmoeForCausalLM",
678
+ "AfmoeModel",
679
+ "AfmoePreTrainedModel",
680
+ ]