gladius-v2-kernel / kernel /cognition.py
amuzetnoM's picture
WYRM kernel source (v27 FINAL)
9463e5c verified
"""
GLADIUS v2.0 — Cognitive Heartbeat
The system that makes GLADIUS think without being asked.
State Monitor → Heartbeat Scheduler → Attention Filter → Prompt Generator
↓ ↓ ↓ ↓
"what's happening" "what mode" "what matters" "what to think about"
Four modes: active (100ms), monitoring (1s), reflective (5s), dormant (30s)
The cognitive loop runs continuously, independent of external input.
STUB VERSION — implements the interface. Real logic in Phase 5.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from enum import IntEnum
from .config import KernelConfig
class CognitiveMode(IntEnum):
ACTIVE = 0 # Processing input, fast heartbeat (100ms)
MONITORING = 1 # Idle but alert, watching for triggers (1s)
REFLECTIVE = 2 # Consolidating, self-prompting (5s)
DORMANT = 3 # Deep idle, minimal activity (30s)
class StateMonitor(nn.Module):
"""Observes internal state and produces a cognitive state vector."""
def __init__(self, config: KernelConfig):
super().__init__()
self.proj = nn.Sequential(
nn.Linear(config.hidden_dim, config.cognition_state_dim),
nn.SiLU(),
nn.Linear(config.cognition_state_dim, config.cognition_state_dim),
)
def forward(self, hidden: torch.Tensor) -> torch.Tensor:
"""
Args:
hidden: (batch, seq_len, hidden_dim) — last layer output
Returns:
state: (batch, cognition_state_dim)
"""
# Pool across sequence
pooled = hidden.mean(dim=1)
return self.proj(pooled)
class HeartbeatScheduler(nn.Module):
"""
Decides cognitive mode based on state.
argmax_mode S(mode | cognitive_state, time, memory_pressure)
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.mode_classifier = nn.Linear(config.cognition_state_dim, config.cognition_modes)
def forward(self, state: torch.Tensor) -> tuple[CognitiveMode, torch.Tensor]:
"""
Returns:
mode: CognitiveMode enum
mode_probs: (batch, num_modes) — probability distribution
"""
logits = self.mode_classifier(state)
probs = F.softmax(logits, dim=-1)
# Take mode from first item in batch (kernel-level decision)
mode_idx = logits[0].argmax(dim=-1).item()
return CognitiveMode(mode_idx), probs
class AttentionFilter(nn.Module):
"""
Reticular Activating System (RAS).
Filters incoming signals by relevance to current state.
argmax_signal S(process | signal, context) vs S(discard | signal, context)
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.relevance = nn.Sequential(
nn.Linear(config.hidden_dim + config.cognition_state_dim, config.hidden_dim // 2),
nn.SiLU(),
nn.Linear(config.hidden_dim // 2, 1),
nn.Sigmoid()
)
def forward(self, signal: torch.Tensor, state: torch.Tensor) -> torch.Tensor:
"""
Args:
signal: (batch, hidden_dim) — incoming information
state: (batch, cognition_state_dim) — current cognitive state
Returns:
relevance_score: (batch, 1) — 0 = irrelevant, 1 = critical
"""
combined = torch.cat([signal, state], dim=-1)
return self.relevance(combined)
class CognitionLoop(nn.Module):
"""
Complete cognitive heartbeat system.
STUB: Always returns ACTIVE mode and passes through.
Phase 5 implements the full self-prompting loop.
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.state_monitor = StateMonitor(config)
self.scheduler = HeartbeatScheduler(config)
self.attention_filter = AttentionFilter(config)
self.current_mode = CognitiveMode.ACTIVE
def heartbeat(self, hidden: torch.Tensor) -> tuple[CognitiveMode, torch.Tensor, torch.Tensor]:
"""
One heartbeat tick. Observes state, decides mode.
Args:
hidden: (batch, seq_len, hidden_dim)
Returns:
tuple of:
mode: current cognitive mode
state: (batch, cognition_state_dim) cognitive state vector
probs: (batch, num_modes) mode probability distribution
"""
state = self.state_monitor(hidden)
mode, probs = self.scheduler(state)
self.current_mode = mode
# Cache state and probs for auxiliary loss computation
self._last_state = state
self._last_mode_probs = probs
return mode, state, probs
def get_last_state(self):
"""Return cached cognitive state and mode probs from last heartbeat."""
return getattr(self, '_last_state', None), getattr(self, '_last_mode_probs', None)
def should_self_prompt(self) -> bool:
"""Whether the cognition loop should generate a self-prompt."""
return self.current_mode == CognitiveMode.REFLECTIVE
def should_consolidate(self) -> bool:
"""Whether memory consolidation should run."""
return self.current_mode in (CognitiveMode.REFLECTIVE, CognitiveMode.DORMANT)