| |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import math |
|
|
| class SparseMoE(nn.Module): |
| """Sparse Mixture-of-Experts layer.""" |
| def __init__(self, d_model, num_experts, top_k, routing_algorithm, d_ff): |
| super().__init__() |
| self.d_model = d_model |
| self.num_experts = num_experts |
| self.top_k = top_k |
| self.routing_algorithm = routing_algorithm |
| |
| self.experts = nn.ModuleList([ |
| nn.Sequential( |
| nn.Linear(d_model, d_ff), |
| nn.ReLU(), |
| nn.Linear(d_ff, d_model) |
| ) for _ in range(num_experts) |
| ]) |
|
|
| if self.routing_algorithm == 'top_k': |
| self.gate = nn.Linear(d_model, num_experts) |
| |
| self.load_balancing_loss = 0.0 |
|
|
| def hash_routing(self, x): |
| token_hashes = x.sum(dim=-1).long().abs() |
| expert_indices = token_hashes % self.num_experts |
| return F.one_hot(expert_indices, num_classes=self.num_experts).float() |
|
|
| def top_k_routing(self, x): |
| gate_logits = self.gate(x) |
| top_k_logits, top_k_indices = torch.topk(gate_logits, self.top_k, dim=-1) |
| gate_scores = F.softmax(top_k_logits, dim=-1) |
| |
| router_mask = torch.zeros_like(gate_logits).scatter_(-1, top_k_indices, gate_scores) |
|
|
| if self.training: |
| probs_per_expert = gate_logits.softmax(dim=-1) |
| tokens_per_batch_seq = router_mask.shape[0] |
| fraction_tokens_per_expert = router_mask.sum(dim=0) / tokens_per_batch_seq |
| mean_prob_per_expert = probs_per_expert.mean(dim=0) |
| self.load_balancing_loss = self.num_experts * torch.sum(fraction_tokens_per_expert * mean_prob_per_expert) |
|
|
| return router_mask |
|
|
| def forward(self, x): |
| batch_size, seq_len, _ = x.shape |
| x_flat = x.view(-1, self.d_model) |
|
|
| if self.routing_algorithm == 'top_k': |
| router_output = self.top_k_routing(x_flat) |
| elif self.routing_algorithm == 'hash': |
| router_output = self.hash_routing(x_flat) |
| else: |
| raise ValueError(f"Unknown routing algorithm: {self.routing_algorithm}") |
| |
| final_output = torch.zeros_like(x_flat) |
| for i, expert in enumerate(self.experts): |
| expert_mask = router_output[:, i].unsqueeze(1) |
| active_tokens_indices = torch.where(expert_mask.squeeze() > 0)[0] |
| if active_tokens_indices.numel() > 0: |
| active_tokens = x_flat[active_tokens_indices] |
| expert_out = expert(active_tokens) |
| weighted_out = expert_out * expert_mask[active_tokens_indices] |
| final_output.index_add_(0, active_tokens_indices, weighted_out) |
|
|
| return final_output.view(batch_size, seq_len, self.d_model) |
|
|
| class GroupedQueryAttention(nn.Module): |
| """ |
| Implements Grouped-Query Attention (GQA). |
| - MHA is a special case of GQA where num_kv_heads == num_heads. |
| - MQA is a special case of GQA where num_kv_heads == 1. |
| """ |
| def __init__(self, d_model, num_heads, num_kv_heads): |
| super().__init__() |
| assert d_model % num_heads == 0, "d_model must be divisible by num_heads" |
| assert num_heads % num_kv_heads == 0, "num_heads must be divisible by num_kv_heads" |
|
|
| self.d_model = d_model |
| self.num_heads = num_heads |
| self.num_kv_heads = num_kv_heads |
| self.num_key_value_groups = num_heads // num_kv_heads |
| self.d_k = d_model // num_heads |
| |
| self.W_q = nn.Linear(d_model, d_model) |
| self.W_k = nn.Linear(d_model, self.num_kv_heads * self.d_k) |
| self.W_v = nn.Linear(d_model, self.num_kv_heads * self.d_k) |
| self.W_o = nn.Linear(d_model, d_model) |
| |
| def scaled_dot_product_attention(self, Q, K, V, mask=None): |
| attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.d_k) |
| if mask is not None: |
| attn_scores = attn_scores.masked_fill(mask == 0, -1e9) |
| attn_probs = F.softmax(attn_scores, dim=-1) |
| output = torch.matmul(attn_probs, V) |
| return output |
|
|
| def forward(self, q, k, v, mask=None): |
| batch_size = q.size(0) |
| |
| Q = self.W_q(q).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2) |
| K = self.W_k(k).view(batch_size, -1, self.num_kv_heads, self.d_k).transpose(1, 2) |
| V = self.W_v(v).view(batch_size, -1, self.num_kv_heads, self.d_k).transpose(1, 2) |
| |
| if self.num_key_value_groups > 1: |
| K = K.repeat_interleave(self.num_key_value_groups, dim=1) |
| V = V.repeat_interleave(self.num_key_value_groups, dim=1) |
| |
| context = self.scaled_dot_product_attention(Q, K, V, mask) |
| context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.d_model) |
| |
| output = self.W_o(context) |
| return output |
|
|
| class PositionalEncoding(nn.Module): |
| def __init__(self, d_model, dropout=0.1, max_len=5000): |
| super().__init__() |
| self.dropout = nn.Dropout(p=dropout) |
| pe = torch.zeros(max_len, d_model) |
| position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) |
| div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) |
| pe[:, 0::2] = torch.sin(position * div_term) |
| pe[:, 1::2] = torch.cos(position * div_term) |
| self.register_buffer('pe', pe.unsqueeze(0)) |
|
|
| def forward(self, x): |
| x = x + self.pe[:, :x.size(1)] |
| return self.dropout(x) |
|
|
| class EncoderLayer(nn.Module): |
| def __init__(self, d_model, num_heads, num_kv_heads, d_ff, num_experts, top_k, routing_algorithm, dropout): |
| super().__init__() |
| self.self_attn = GroupedQueryAttention(d_model, num_heads, num_kv_heads) |
| self.moe_ffn = SparseMoE(d_model, num_experts, top_k, routing_algorithm, d_ff) |
| self.norm1 = nn.LayerNorm(d_model) |
| self.norm2 = nn.LayerNorm(d_model) |
| self.dropout = nn.Dropout(dropout) |
|
|
| def forward(self, x, mask): |
| attn_output = self.self_attn(x, x, x, mask) |
| x = self.norm1(x + self.dropout(attn_output)) |
| |
| moe_output = self.moe_ffn(x) |
| x = self.norm2(x + self.dropout(moe_output)) |
| return x |
|
|
| class DecoderLayer(nn.Module): |
| def __init__(self, d_model, num_heads, num_kv_heads, d_ff, num_experts, top_k, routing_algorithm, dropout): |
| super().__init__() |
| self.self_attn = GroupedQueryAttention(d_model, num_heads, num_kv_heads) |
| self.cross_attn = GroupedQueryAttention(d_model, num_heads, num_kv_heads) |
| self.moe_ffn = SparseMoE(d_model, num_experts, top_k, routing_algorithm, d_ff) |
| self.norm1 = nn.LayerNorm(d_model) |
| self.norm2 = nn.LayerNorm(d_model) |
| self.norm3 = nn.LayerNorm(d_model) |
| self.dropout = nn.Dropout(dropout) |
|
|
| def forward(self, x, enc_output, src_mask, tgt_mask): |
| attn_output = self.self_attn(x, x, x, tgt_mask) |
| x = self.norm1(x + self.dropout(attn_output)) |
| |
| cross_attn_output = self.cross_attn(x, enc_output, enc_output, src_mask) |
| x = self.norm2(x + self.dropout(cross_attn_output)) |
| |
| moe_output = self.moe_ffn(x) |
| x = self.norm3(x + self.dropout(moe_output)) |
| return x |
|
|
| class MoETransformer(nn.Module): |
| def __init__(self, config, vocab_size): |
| super().__init__() |
| self.config = config |
| self.encoder_embedding = nn.Embedding(vocab_size, config['d_model']) |
| self.decoder_embedding = nn.Embedding(vocab_size, config['d_model']) |
| self.positional_encoding = PositionalEncoding(config['d_model'], config['dropout']) |
|
|
| self.encoder_layers = nn.ModuleList([ |
| EncoderLayer(config['d_model'], config['num_heads'], config['num_kv_heads'], config['d_ff'], config['num_experts'], config['top_k'], config['routing_algorithm'], config['dropout']) |
| for _ in range(config['num_encoder_layers']) |
| ]) |
| self.decoder_layers = nn.ModuleList([ |
| DecoderLayer(config['d_model'], config['num_heads'], config['num_kv_heads'], config['d_ff'], config['num_experts'], config['top_k'], config['routing_algorithm'], config['dropout']) |
| for _ in range(config['num_decoder_layers']) |
| ]) |
|
|
| self.fc_out = nn.Linear(config['d_model'], vocab_size) |
|
|
| def generate_mask(self, src, tgt, pad_idx): |
| src_mask = (src != pad_idx).unsqueeze(1).unsqueeze(2) |
| tgt_pad_mask = (tgt != pad_idx).unsqueeze(1).unsqueeze(2) |
| seq_len = tgt.size(1) |
| tgt_sub_mask = torch.tril(torch.ones((seq_len, seq_len), device=tgt.device)).bool() |
| tgt_mask = tgt_pad_mask & tgt_sub_mask |
| return src_mask, tgt_mask |
|
|
| def forward(self, src, tgt, pad_idx=0): |
| src_mask, tgt_mask = self.generate_mask(src, tgt, pad_idx) |
| |
| src_emb = self.positional_encoding(self.encoder_embedding(src) * math.sqrt(self.config['d_model'])) |
| tgt_emb = self.positional_encoding(self.decoder_embedding(tgt) * math.sqrt(self.config['d_model'])) |
|
|
| enc_output = src_emb |
| for layer in self.encoder_layers: |
| enc_output = layer(enc_output, src_mask) |
| |
| dec_output = tgt_emb |
| for layer in self.decoder_layers: |
| dec_output = layer(dec_output, enc_output, src_mask, tgt_mask) |
| |
| return self.fc_out(dec_output) |
|
|
| def get_total_load_balancing_loss(self): |
| total_loss = 0 |
| for layer in self.encoder_layers + self.decoder_layers: |
| total_loss += layer.moe_ffn.load_balancing_loss |
| return total_loss |
|
|
| @torch.no_grad() |
| def generate(self, src, max_length, start_symbol, pad_idx=0): |
| self.eval() |
| device = next(self.parameters()).device |
| src = src.to(device) |
| batch_size = src.shape[0] |
|
|
| src_mask = (src != pad_idx).unsqueeze(1).unsqueeze(2) |
| src_emb = self.positional_encoding(self.encoder_embedding(src) * math.sqrt(self.config['d_model'])) |
| enc_output = src_emb |
| for layer in self.encoder_layers: |
| enc_output = layer(enc_output, src_mask) |
|
|
| tgt = torch.full((batch_size, 1), start_symbol, dtype=torch.long, device=device) |
|
|
| for _ in range(max_length - 1): |
| _, tgt_mask = self.generate_mask(src, tgt, pad_idx) |
| tgt_emb = self.positional_encoding(self.decoder_embedding(tgt) * math.sqrt(self.config['d_model'])) |
| dec_output = tgt_emb |
| for layer in self.decoder_layers: |
| dec_output = layer(dec_output, enc_output, src_mask, tgt_mask) |
| |
| logits = self.fc_out(dec_output[:, -1]) |
| next_token = torch.argmax(logits, dim=-1).unsqueeze(1) |
| tgt = torch.cat([tgt, next_token], dim=1) |
|
|
| if (tgt == 1).any(dim=-1).all(): |
| break |
|
|
| return tgt |