| """ |
| GLADIUS v2.0 β Updated Kernel with Router + Specialist Wiring |
| |
| Changes from original: |
| - SpecialistRegistry imported and instantiated |
| - Router called in forward() between final_norm and tool_cortex |
| - Specialist outputs added as weighted residual |
| - Balance loss returned for training |
| - specialist_residual_scale config for stability |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import math |
| from typing import Optional |
|
|
| from .config import KernelConfig |
| from .embeddings import SharedEmbeddings |
| from .attention import TransformerLayer, RMSNorm |
| from .memory import ThreeTemperatureMemory |
| from .temporal import TimeEngine |
| from .modulator import Modulator |
| from .cognition import CognitionLoop |
| from .tools import ToolCortex |
| from .router import NexusRouter |
|
|
| |
| import sys, os |
| sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) |
| from specialists.specialists import SpecialistRegistry |
|
|
|
|
| class GladiusKernel(nn.Module): |
| """ |
| GLADIUS Intelligence Kernel v2.1 β Router + Specialists Wired |
| |
| Forward pass: |
| 1. Embed β 2. Memory read β 3. Time encoding β 4. Transformer layers β |
| 5. Final norm β 5.5 ROUTER + SPECIALISTS β 6. Tool check β |
| 7. Modulate β 8. Memory write β 9. Cognition heartbeat |
| """ |
|
|
| def __init__(self, config: KernelConfig): |
| super().__init__() |
| self.config = config |
|
|
| |
| self.embeddings = SharedEmbeddings(config) |
| self.memory = ThreeTemperatureMemory(config) |
| self.time_engine = TimeEngine(config) |
| self.modulator = Modulator(config) |
| self.cognition = CognitionLoop(config) |
| self.tool_cortex = ToolCortex(config) |
| self.router = NexusRouter(config) |
| |
| |
| self.specialist_registry = SpecialistRegistry(config) |
| |
| |
| self.specialist_scale = getattr(config, 'specialist_residual_scale', 0.1) |
|
|
| |
| self.layers = nn.ModuleList([ |
| TransformerLayer(config, layer_idx=i) |
| for i in range(config.num_layers) |
| ]) |
| self.final_norm = RMSNorm(config.hidden_dim) |
|
|
| |
| self.register_buffer( |
| 'causal_mask', |
| torch.tril(torch.ones(1, 1, config.max_seq_len, config.max_seq_len)), |
| ) |
|
|
| |
| self.has_senses = False |
| try: |
| from .senses import SensoryIntegration |
| self.senses = SensoryIntegration(config) |
| self.has_senses = True |
| except Exception: |
| pass |
|
|
| self._init_weights() |
|
|
| def _init_weights(self): |
| """Initialize weights with scaled normal distribution.""" |
| for name, p in self.named_parameters(): |
| if p.dim() > 1: |
| nn.init.xavier_uniform_(p) |
|
|
| def count_parameters(self) -> dict: |
| """Count parameters by component.""" |
| counts = {} |
| for name, module in self.named_children(): |
| n = sum(p.numel() for p in module.parameters()) |
| counts[name] = n |
| counts['total'] = sum(p.numel() for p in self.parameters()) |
| counts['trainable'] = sum(p.numel() for p in self.parameters() if p.requires_grad) |
| return counts |
|
|
| def forward( |
| self, |
| input_ids: torch.Tensor | None = None, |
| timestamp: float | torch.Tensor | None = None, |
| images: torch.Tensor | None = None, |
| audio: torch.Tensor | None = None, |
| ) -> dict: |
| """ |
| Full forward pass through the kernel. |
| |
| Args: |
| input_ids: (batch, seq_len) token IDs β can be None for pure sensory input |
| timestamp: Unix timestamp (or None for current time) |
| images: (batch, C, H, W) pixel values [0, 1] β vision input |
| audio: (batch, 1, n_mels, n_frames) mel spectrogram β audio input |
| |
| Returns: |
| dict with: |
| logits: (batch, seq_len, vocab_size) β modulated output logits |
| silence: (batch, 1) β silence gate value |
| pixel_output: (batch, 3) β RGB output |
| mode: CognitiveMode β current cognitive mode |
| importance: (batch, seq_len, 1) β memory importance scores |
| modality_mask: (batch, seq_len) or None |
| cognitive_state: cognitive state vector |
| mode_probs: mode probability distribution |
| balance_loss: router load-balancing loss (for training) |
| router_indices: (batch, top_k) β which specialists were selected |
| router_weights: (batch, top_k) β routing weights |
| """ |
| |
| text_embeds = None |
| if input_ids is not None: |
| B, S = input_ids.shape |
| text_embeds = self.embeddings.embed(input_ids) |
| |
| |
| modality_mask = None |
| if self.has_senses and (images is not None or audio is not None): |
| x, modality_mask = self.senses( |
| text_embeds=text_embeds, |
| images=images, |
| audio=audio, |
| ) |
| B = x.shape[0] |
| S = x.shape[1] |
| elif text_embeds is not None: |
| x = text_embeds |
| B, S = x.shape[0], x.shape[1] |
| else: |
| raise ValueError("Must provide input_ids, images, or audio") |
|
|
| |
| x = self.memory.read(x) |
|
|
| |
| time_embed = None |
| if timestamp is not None: |
| if isinstance(timestamp, (int, float)): |
| timestamp = torch.tensor([timestamp] * B, dtype=torch.float32) |
| time_embed = self.time_engine(timestamp) |
| x = x + time_embed.unsqueeze(1) |
|
|
| |
| if S <= self.config.max_seq_len: |
| mask = self.causal_mask[:, :, :S, :S] |
| else: |
| mask = torch.tril(torch.ones(1, 1, S, S, device=x.device)) |
| for layer in self.layers: |
| x = layer(x, mask=mask) |
|
|
| |
| x = self.final_norm(x) |
|
|
| |
| |
| pooled = x.mean(dim=1) |
| |
| |
| router_indices, router_weights = self.router(pooled) |
| |
| |
| specialist_out = self.specialist_registry(x, router_indices, router_weights, mask=mask) |
| |
| |
| x = x + self.specialist_scale * specialist_out |
| |
| |
| balance_loss = self.router.balance_loss(pooled) |
|
|
| |
| tool_result = self.tool_cortex.check_activation(x) |
| if tool_result is not None: |
| x = x + tool_result |
|
|
| |
| logits, silence, pixel_output = self.modulator(x, self.embeddings.output_head, temporal_embedding=time_embed) |
|
|
| |
| importance = self.memory.write(x) |
|
|
| |
| mode, cognitive_state, mode_probs = self.cognition.heartbeat(x) |
|
|
| |
| if self.cognition.should_consolidate(): |
| self.memory.consolidate() |
|
|
| |
| self.time_engine.record_event() |
|
|
| return { |
| 'logits': logits, |
| 'silence': silence, |
| 'pixel_output': pixel_output, |
| 'mode': mode, |
| 'importance': importance, |
| 'modality_mask': modality_mask, |
| 'cognitive_state': cognitive_state, |
| 'mode_probs': mode_probs, |
| 'balance_loss': balance_loss, |
| 'router_indices': router_indices, |
| 'router_weights': router_weights, |
| } |
|
|
| @torch.no_grad() |
| def generate( |
| self, |
| input_ids: torch.Tensor, |
| max_tokens: int = 100, |
| temperature: float = 1.0, |
| top_k: int = 50, |
| timestamp: float | None = None, |
| ) -> torch.Tensor: |
| """ |
| Autoregressive generation. |
| """ |
| self.eval() |
| generated = input_ids.clone() |
|
|
| for _ in range(max_tokens): |
| |
| context = generated[:, -self.config.max_seq_len:] |
|
|
| result = self.forward(context, timestamp=timestamp) |
| logits = result['logits'] |
| silence = result['silence'] |
|
|
| |
| if silence.item() > self.config.silence_threshold: |
| break |
|
|
| |
| next_logits = logits[:, -1, :] / temperature |
|
|
| if top_k > 0: |
| v, _ = next_logits.topk(top_k) |
| next_logits[next_logits < v[:, [-1]]] = float('-inf') |
|
|
| probs = F.softmax(next_logits, dim=-1) |
| next_token = torch.multinomial(probs, num_samples=1) |
|
|
| generated = torch.cat([generated, next_token], dim=1) |
|
|
| |
| if next_token.item() == self.config.eos_token_id: |
| break |
|
|
| return generated |
|
|