gladius-training / kernel /router.py
amuzetnoM's picture
GLADIUS training package: kernel + omega + synthase + checkpoint (step 529)
63e99b4 verified
"""
GLADIUS v2.0 β€” Nexus Router
Routes hidden states to specialists. Top-k routing with load balancing.
Specialists run ON the kernel β€” they are not separate models.
STUB: Routes exist but only one specialist (reasoning) is wired.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .config import KernelConfig
class NexusRouter(nn.Module):
"""
Learned router that activates top-k specialists per input.
argmax_specialist S(specialist | hidden_state)
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.config = config
# Router: hidden_dim β†’ num_specialists logits
self.gate = nn.Linear(config.hidden_dim, config.num_specialists, bias=False)
# Load balancing auxiliary loss coefficient
self.balance_coeff = 0.01
def forward(self, hidden: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Args:
hidden: (batch, hidden_dim) β€” pooled representation
Returns:
indices: (batch, top_k) β€” which specialists to activate
weights: (batch, top_k) β€” how much to weight each
"""
logits = self.gate(hidden) # (B, num_specialists)
probs = F.softmax(logits, dim=-1)
# Top-k selection
weights, indices = probs.topk(self.config.router_top_k, dim=-1)
# Renormalize weights
weights = weights / weights.sum(dim=-1, keepdim=True)
return indices, weights
def balance_loss(self, hidden: torch.Tensor) -> torch.Tensor:
"""
Auxiliary load-balancing loss.
Encourages equal specialist usage across a batch.
"""
logits = self.gate(hidden)
probs = F.softmax(logits, dim=-1)
# Mean probability per specialist across batch
mean_probs = probs.mean(dim=0)
# Ideal: uniform = 1/num_specialists
uniform = torch.ones_like(mean_probs) / self.config.num_specialists
# L2 distance from uniform
return self.balance_coeff * F.mse_loss(mean_probs, uniform)