""" GLADIUS v2.0 — Updated Kernel with Router + Specialist Wiring Changes from original: - SpecialistRegistry imported and instantiated - Router called in forward() between final_norm and tool_cortex - Specialist outputs added as weighted residual - Balance loss returned for training - specialist_residual_scale config for stability """ import torch import torch.nn as nn import torch.nn.functional as F import math from typing import Optional from .config import KernelConfig from .embeddings import SharedEmbeddings from .attention import TransformerLayer, RMSNorm from .memory import ThreeTemperatureMemory from .temporal import TimeEngine from .modulator import Modulator from .cognition import CognitionLoop from .tools import ToolCortex from .router import NexusRouter # Import specialists import sys, os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) from specialists.specialists import SpecialistRegistry class GladiusKernel(nn.Module): """ GLADIUS Intelligence Kernel v2.1 — Router + Specialists Wired Forward pass: 1. Embed → 2. Memory read → 3. Time encoding → 4. Transformer layers → 5. Final norm → 5.5 ROUTER + SPECIALISTS → 6. Tool check → 7. Modulate → 8. Memory write → 9. Cognition heartbeat """ def __init__(self, config: KernelConfig): super().__init__() self.config = config # Core components self.embeddings = SharedEmbeddings(config) self.memory = ThreeTemperatureMemory(config) self.time_engine = TimeEngine(config) self.modulator = Modulator(config) self.cognition = CognitionLoop(config) self.tool_cortex = ToolCortex(config) self.router = NexusRouter(config) # === NEW: Specialist Registry === self.specialist_registry = SpecialistRegistry(config) # Specialist residual scaling (start small for stability) self.specialist_scale = getattr(config, 'specialist_residual_scale', 0.1) # Transformer layers self.layers = nn.ModuleList([ TransformerLayer(config, layer_idx=i) for i in range(config.num_layers) ]) self.final_norm = RMSNorm(config.hidden_dim) # Causal mask (precomputed for efficiency) self.register_buffer( 'causal_mask', torch.tril(torch.ones(1, 1, config.max_seq_len, config.max_seq_len)), ) # Senses (multimodal — optional) self.has_senses = False try: from .senses import SensoryIntegration self.senses = SensoryIntegration(config) self.has_senses = True except Exception: pass self._init_weights() def _init_weights(self): """Initialize weights with scaled normal distribution.""" for name, p in self.named_parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def count_parameters(self) -> dict: """Count parameters by component.""" counts = {} for name, module in self.named_children(): n = sum(p.numel() for p in module.parameters()) counts[name] = n counts['total'] = sum(p.numel() for p in self.parameters()) counts['trainable'] = sum(p.numel() for p in self.parameters() if p.requires_grad) return counts def forward( self, input_ids: torch.Tensor | None = None, timestamp: float | torch.Tensor | None = None, images: torch.Tensor | None = None, audio: torch.Tensor | None = None, ) -> dict: """ Full forward pass through the kernel. Args: input_ids: (batch, seq_len) token IDs — can be None for pure sensory input timestamp: Unix timestamp (or None for current time) images: (batch, C, H, W) pixel values [0, 1] — vision input audio: (batch, 1, n_mels, n_frames) mel spectrogram — audio input Returns: dict with: logits: (batch, seq_len, vocab_size) — modulated output logits silence: (batch, 1) — silence gate value pixel_output: (batch, 3) — RGB output mode: CognitiveMode — current cognitive mode importance: (batch, seq_len, 1) — memory importance scores modality_mask: (batch, seq_len) or None cognitive_state: cognitive state vector mode_probs: mode probability distribution balance_loss: router load-balancing loss (for training) router_indices: (batch, top_k) — which specialists were selected router_weights: (batch, top_k) — routing weights """ # 1. Embed text tokens (if provided) text_embeds = None if input_ids is not None: B, S = input_ids.shape text_embeds = self.embeddings.embed(input_ids) # (B, S, D) # 2. Sensory integration modality_mask = None if self.has_senses and (images is not None or audio is not None): x, modality_mask = self.senses( text_embeds=text_embeds, images=images, audio=audio, ) B = x.shape[0] S = x.shape[1] elif text_embeds is not None: x = text_embeds B, S = x.shape[0], x.shape[1] else: raise ValueError("Must provide input_ids, images, or audio") # 2. Memory read (hot memory context + warm adapter) x = self.memory.read(x) # 3. Temporal encoding (additive input + stored for output gating) time_embed = None if timestamp is not None: if isinstance(timestamp, (int, float)): timestamp = torch.tensor([timestamp] * B, dtype=torch.float32) time_embed = self.time_engine(timestamp) # (B, D) x = x + time_embed.unsqueeze(1) # Broadcast across seq_len # 4. Transformer layers with causal mask if S <= self.config.max_seq_len: mask = self.causal_mask[:, :, :S, :S] else: mask = torch.tril(torch.ones(1, 1, S, S, device=x.device)) for layer in self.layers: x = layer(x, mask=mask) # 5. Final norm x = self.final_norm(x) # === 5.5 ROUTER + SPECIALIST DISPATCH (NEW) === # Pool hidden states for routing decision pooled = x.mean(dim=1) # (B, D) # Router decides which specialists to activate router_indices, router_weights = self.router(pooled) # (B, top_k), (B, top_k) # Dispatch to specialists — weighted sum of specialist outputs specialist_out = self.specialist_registry(x, router_indices, router_weights, mask=mask) # Add specialist contribution as scaled residual x = x + self.specialist_scale * specialist_out # Compute balance loss for training balance_loss = self.router.balance_loss(pooled) # 6. Tool check tool_result = self.tool_cortex.check_activation(x) if tool_result is not None: x = x + tool_result # 7. Modulate and produce logits (time gates the output) logits, silence, pixel_output = self.modulator(x, self.embeddings.output_head, temporal_embedding=time_embed) # 8. Memory write importance = self.memory.write(x) # 9. Cognition heartbeat mode, cognitive_state, mode_probs = self.cognition.heartbeat(x) # 10. Consolidation check if self.cognition.should_consolidate(): self.memory.consolidate() # Record event in time engine self.time_engine.record_event() return { 'logits': logits, 'silence': silence, 'pixel_output': pixel_output, 'mode': mode, 'importance': importance, 'modality_mask': modality_mask, 'cognitive_state': cognitive_state, 'mode_probs': mode_probs, 'balance_loss': balance_loss, 'router_indices': router_indices, 'router_weights': router_weights, } @torch.no_grad() def generate( self, input_ids: torch.Tensor, max_tokens: int = 100, temperature: float = 1.0, top_k: int = 50, timestamp: float | None = None, ) -> torch.Tensor: """ Autoregressive generation. """ self.eval() generated = input_ids.clone() for _ in range(max_tokens): # Truncate to max_seq_len context = generated[:, -self.config.max_seq_len:] result = self.forward(context, timestamp=timestamp) logits = result['logits'] silence = result['silence'] # Check silence gate if silence.item() > self.config.silence_threshold: break # Sample next token next_logits = logits[:, -1, :] / temperature if top_k > 0: v, _ = next_logits.topk(top_k) next_logits[next_logits < v[:, [-1]]] = float('-inf') probs = F.softmax(next_logits, dim=-1) next_token = torch.multinomial(probs, num_samples=1) generated = torch.cat([generated, next_token], dim=1) # EOS check if next_token.item() == self.config.eos_token_id: break return generated