| """ |
| GLADIUS v2.0 β Nexus Router |
| |
| Routes hidden states to specialists. Top-k routing with load balancing. |
| Specialists run ON the kernel β they are not separate models. |
| |
| STUB: Routes exist but only one specialist (reasoning) is wired. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| from .config import KernelConfig |
|
|
|
|
| class NexusRouter(nn.Module): |
| """ |
| Learned router that activates top-k specialists per input. |
| |
| argmax_specialist S(specialist | hidden_state) |
| """ |
|
|
| def __init__(self, config: KernelConfig): |
| super().__init__() |
| self.config = config |
|
|
| |
| self.gate = nn.Linear(config.hidden_dim, config.num_specialists, bias=False) |
|
|
| |
| self.balance_coeff = 0.01 |
|
|
| def forward(self, hidden: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| """ |
| Args: |
| hidden: (batch, hidden_dim) β pooled representation |
| Returns: |
| indices: (batch, top_k) β which specialists to activate |
| weights: (batch, top_k) β how much to weight each |
| """ |
| logits = self.gate(hidden) |
| probs = F.softmax(logits, dim=-1) |
|
|
| |
| weights, indices = probs.topk(self.config.router_top_k, dim=-1) |
|
|
| |
| weights = weights / weights.sum(dim=-1, keepdim=True) |
|
|
| return indices, weights |
|
|
| def balance_loss(self, hidden: torch.Tensor) -> torch.Tensor: |
| """ |
| Auxiliary load-balancing loss. |
| Encourages equal specialist usage across a batch. |
| """ |
| logits = self.gate(hidden) |
| probs = F.softmax(logits, dim=-1) |
|
|
| |
| mean_probs = probs.mean(dim=0) |
|
|
| |
| uniform = torch.ones_like(mean_probs) / self.config.num_specialists |
|
|
| |
| return self.balance_coeff * F.mse_loss(mean_probs, uniform) |
|
|