| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | from __future__ import annotations |
| |
|
| | from dataclasses import dataclass |
| | from typing import Optional, Tuple, Any, Dict |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| |
|
| | from transformers import PreTrainedModel, PretrainedConfig, GenerationMixin |
| | from transformers.modeling_outputs import CausalLMOutputWithPast |
| |
|
| |
|
| | from configuration_eve import EveConfig |
| |
|
| |
|
| | class RMSNorm(nn.Module): |
| | def __init__(self, dim: int, eps: float = 1e-5): |
| | super().__init__() |
| | self.eps = eps |
| | self.weight = nn.Parameter(torch.ones(dim)) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) * self.weight |
| |
|
| |
|
| | def precompute_rope_freqs( |
| | head_dim: int, |
| | max_seq_len: int, |
| | theta: float = 10000.0, |
| | device: Optional[torch.device] = None, |
| | ) -> torch.Tensor: |
| | """Precompute complex RoPE frequencies as cis values.""" |
| | freqs = 1.0 / (theta ** (torch.arange(0, head_dim, 2, device=device).float() / head_dim)) |
| | t = torch.arange(max_seq_len, device=device).float() |
| | freqs = torch.outer(t, freqs) |
| | return torch.polar(torch.ones_like(freqs), freqs) |
| |
|
| |
|
| | def apply_rope(x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor: |
| | """ |
| | x: [B, H, T, D] |
| | freqs_cis: [T, D/2] complex |
| | """ |
| | B, H, T, D = x.shape |
| | |
| | x_complex = torch.view_as_complex(x.float().reshape(B, H, T, D // 2, 2)) |
| | freqs_cis = freqs_cis[:T].view(1, 1, T, D // 2) |
| | x_rotated = x_complex * freqs_cis |
| | return torch.view_as_real(x_rotated).reshape(B, H, T, D).type_as(x) |
| |
|
| |
|
| | class MLP(nn.Module): |
| | def __init__(self, config: EveConfig, intermediate_size: Optional[int] = None): |
| | super().__init__() |
| | hidden_dim = intermediate_size or config.expert_intermediate_size |
| | self.w1 = nn.Linear(config.n_embd, hidden_dim, bias=False) |
| | self.w2 = nn.Linear(config.n_embd, hidden_dim, bias=False) |
| | self.c_proj = nn.Linear(hidden_dim, config.n_embd, bias=False) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | return self.c_proj(F.silu(self.w1(x)) * self.w2(x)) |
| |
|
| |
|
| | class SharedMoE(nn.Module): |
| | """ |
| | Simple top-k MoE: |
| | - One shared expert always applied |
| | - N routed experts mixed by router weights |
| | - Aux loss encourages balanced expert usage (simple squared-mean heuristic) |
| | """ |
| |
|
| | def __init__(self, config: EveConfig): |
| | super().__init__() |
| | self.config = config |
| | self.top_k = config.top_k |
| | self.shared_expert = MLP(config, config.shared_expert_intermediate_size) |
| | self.experts = nn.ModuleList([MLP(config) for _ in range(config.num_experts)]) |
| | self.router = nn.Linear(config.n_embd, config.num_experts, bias=False) |
| |
|
| | def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: |
| | B, T, C = x.shape |
| | if self.top_k < 1 or self.top_k > self.config.num_experts: |
| | raise ValueError(f"Invalid MoE top_k={self.top_k}; must be in [1, {self.config.num_experts}]") |
| |
|
| | shared_out = self.shared_expert(x) |
| |
|
| | logits = self.router(x) |
| | probs = F.softmax(logits, dim=-1) |
| | top_k_weights, top_k_indices = torch.topk(probs, self.top_k, dim=-1) |
| | top_k_weights = top_k_weights / top_k_weights.sum(dim=-1, keepdim=True) |
| |
|
| | |
| | flat_probs = probs.view(-1, self.config.num_experts) |
| | expert_usage = flat_probs.mean(dim=0) |
| | aux_loss = torch.sum(expert_usage * expert_usage) * self.config.num_experts |
| |
|
| | routed_out = torch.zeros_like(x) |
| | flat_x = x.view(-1, C) |
| | flat_indices = top_k_indices.view(-1, self.top_k) |
| | flat_weights = top_k_weights.view(-1, self.top_k) |
| |
|
| | |
| | for i, expert in enumerate(self.experts): |
| | mask = flat_indices == i |
| | batch_idx, rank_idx = torch.where(mask) |
| | if batch_idx.numel() > 0: |
| | expert_input = flat_x[batch_idx] |
| | expert_output = expert(expert_input) |
| | weight = flat_weights[batch_idx, rank_idx].unsqueeze(-1) |
| | routed_out.view(-1, C).index_add_(0, batch_idx, expert_output * weight) |
| |
|
| | return shared_out + routed_out, aux_loss |
| |
|
| |
|
| | class CausalSelfAttention(nn.Module): |
| | def __init__(self, config: EveConfig): |
| | super().__init__() |
| | self.n_head = config.n_head |
| | self.head_dim = config.head_dim |
| | self.n_embd = config.n_embd |
| |
|
| | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False) |
| | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False) |
| |
|
| | def forward(self, x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor: |
| | B, T, C = x.shape |
| |
|
| | qkv = self.c_attn(x) |
| | q, k, v = qkv.split(self.n_embd, dim=2) |
| |
|
| | q = q.view(B, T, self.n_head, self.head_dim).transpose(1, 2) |
| | k = k.view(B, T, self.n_head, self.head_dim).transpose(1, 2) |
| | v = v.view(B, T, self.n_head, self.head_dim).transpose(1, 2) |
| |
|
| | q = apply_rope(q, freqs_cis) |
| | k = apply_rope(k, freqs_cis) |
| |
|
| | y = F.scaled_dot_product_attention(q, k, v, is_causal=True) |
| | y = y.transpose(1, 2).contiguous().view(B, T, C) |
| | return self.c_proj(y) |
| |
|
| |
|
| | class Block(nn.Module): |
| | def __init__(self, config: EveConfig): |
| | super().__init__() |
| | self.ln_1 = RMSNorm(config.n_embd) |
| | self.ln_2 = RMSNorm(config.n_embd) |
| | self.attn = CausalSelfAttention(config) |
| | self.mlp = SharedMoE(config) |
| |
|
| | def forward(self, x: torch.Tensor, freqs_cis: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: |
| | x = x + self.attn(self.ln_1(x), freqs_cis) |
| | mlp_out, aux_loss = self.mlp(self.ln_2(x)) |
| | x = x + mlp_out |
| | return x, aux_loss |
| |
|
| |
|
| | class DeepSeekMoE(PreTrainedModel, GenerationMixin): |
| | config_class = EveConfig |
| | _tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"} |
| |
|
| | |
| |
|
| | def __init__(self, config: EveConfig): |
| | super().__init__(config) |
| | self.config = config |
| |
|
| | self.transformer = nn.ModuleDict( |
| | dict( |
| | wte=nn.Embedding(config.vocab_size, config.n_embd), |
| | h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
| | ln_f=RMSNorm(config.n_embd), |
| | ) |
| | ) |
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| |
|
| | |
| | self.transformer.wte.weight = self.lm_head.weight |
| |
|
| | freqs_cis = precompute_rope_freqs(config.head_dim, config.block_size, config.rope_theta) |
| | self.register_buffer("freqs_cis", freqs_cis, persistent=False) |
| |
|
| | |
| | self.post_init() |
| |
|
| | |
| | if hasattr(self, "generation_config") and self.generation_config is not None: |
| | g = self.generation_config |
| | |
| | if not getattr(g, "do_sample", False): |
| | if getattr(g, "top_k", 0): |
| | g.top_k = None |
| | if getattr(g, "top_p", 1.0) != 1.0: |
| | g.top_p = None |
| | if getattr(g, "temperature", 1.0) != 1.0: |
| | g.temperature = None |
| |
|
| | |
| | def get_input_embeddings(self) -> nn.Module: |
| | return self.transformer.wte |
| |
|
| | def set_input_embeddings(self, value: nn.Module) -> None: |
| | self.transformer.wte = value |
| |
|
| | def get_output_embeddings(self) -> nn.Module: |
| | return self.lm_head |
| |
|
| | def set_output_embeddings(self, value: nn.Module) -> None: |
| | self.lm_head = value |
| |
|
| | |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | idx: Optional[torch.LongTensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | targets: Optional[torch.LongTensor] = None, |
| | **kwargs: Any, |
| | ) -> CausalLMOutputWithPast: |
| | """ |
| | If labels/targets are provided, computes *shifted* causal LM loss: |
| | loss = CE(logits[:, :-1], labels[:, 1:]) |
| | """ |
| | if idx is None: |
| | if input_ids is None: |
| | raise ValueError("Must provide input_ids or idx.") |
| | idx = input_ids |
| | if targets is None: |
| | targets = labels |
| |
|
| | B, T = idx.shape |
| | x = self.transformer.wte(idx) |
| |
|
| | total_aux_loss: Optional[torch.Tensor] = None |
| | freqs_cis = self.freqs_cis.to(x.device) |
| |
|
| | for block in self.transformer.h: |
| | x, aux_loss = block(x, freqs_cis[:T]) |
| | total_aux_loss = aux_loss if total_aux_loss is None else (total_aux_loss + aux_loss) |
| |
|
| | x = self.transformer.ln_f(x) |
| | logits = self.lm_head(x) |
| |
|
| | loss = None |
| | if targets is not None: |
| | |
| | if T < 2: |
| | |
| | shift_logits = logits[:, :0, :] |
| | shift_labels = targets[:, :0] |
| | else: |
| | shift_logits = logits[:, :-1, :].contiguous() |
| | shift_labels = targets[:, 1:].contiguous() |
| |
|
| | loss = F.cross_entropy( |
| | shift_logits.view(-1, shift_logits.size(-1)).to(torch.float32), |
| | shift_labels.view(-1), |
| | ignore_index=-100, |
| | ) |
| |
|
| |
|
| |
|
| | if total_aux_loss is not None and self.config.router_aux_loss_coef: |
| | loss = loss + (self.config.router_aux_loss_coef * total_aux_loss) |
| |
|
| | return CausalLMOutputWithPast( |
| | loss=loss, |
| | logits=logits, |
| | past_key_values=None, |
| | ) |
| |
|
| | |
| | def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs: Any) -> Dict[str, Any]: |
| | |
| | out = {"input_ids": input_ids} |
| | |
| | if "attention_mask" in kwargs and kwargs["attention_mask"] is not None: |
| | out["attention_mask"] = kwargs["attention_mask"] |
| | return out |
| |
|
| |
|