gladius-training / kernel /memory.py
amuzetnoM's picture
GLADIUS training package: kernel + omega + synthase + checkpoint (step 529)
63e99b4 verified
"""
GLADIUS v2.0 β€” Three-Temperature Memory
Hot: Learned KV cache. Session-lived. Fast reads via hybrid attention.
Warm: Online LoRA adapters (Locas GLU-FFN + Share subspace + EBLoRA spectral balance).
Survives restarts via checkpoint. The dragon.
Cold: HEKTOR VDB. Unlimited capacity. Permanent archive.
Memory is the SUBSTRATE. Everything depends on it.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .config import KernelConfig
class HotMemory(nn.Module):
"""
Learned KV cache with importance-gated writes.
Read: SLA2 hybrid attention over memory slots (linear scan + sparse softmax).
Write: Importance gate decides whether to store. LRU eviction when full.
argmax_slot: S(slot | query) β€” which slot has the best match?
argmax_write: S(write | hidden) β€” should this be remembered?
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.config = config
self.num_slots = config.hot_memory_slots
self.hidden_dim = config.hidden_dim
# Memory banks β€” learnable parameters that get written to
self.keys = nn.Parameter(torch.zeros(self.num_slots, config.hidden_dim))
self.values = nn.Parameter(torch.zeros(self.num_slots, config.hidden_dim))
# Importance gate: decides whether incoming hidden state is worth storing
self.write_gate = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, 1),
nn.Sigmoid()
)
# Usage counter for LRU eviction (not a parameter β€” just tracking)
self.register_buffer('usage', torch.zeros(self.num_slots))
self.register_buffer('write_head', torch.tensor(0, dtype=torch.long))
self._init_weights()
def _init_weights(self):
nn.init.normal_(self.keys, std=0.01)
nn.init.normal_(self.values, std=0.01)
def read(self, query: torch.Tensor) -> torch.Tensor:
"""
Read from hot memory.
Args:
query: (batch, seq_len, hidden_dim)
Returns:
memory_context: (batch, seq_len, hidden_dim)
"""
# Attention over memory slots
# query: (B, S, D), keys: (N, D) β†’ scores: (B, S, N)
scores = torch.matmul(query, self.keys.T) / math.sqrt(self.hidden_dim)
weights = F.softmax(scores, dim=-1)
# Update usage stats (detached β€” not part of gradient)
with torch.no_grad():
self.usage += weights.detach().mean(dim=(0, 1))
# Weighted sum of values
memory_context = torch.matmul(weights, self.values)
return memory_context
def write(self, hidden: torch.Tensor) -> torch.Tensor:
"""
Conditionally write to hot memory.
Args:
hidden: (batch, seq_len, hidden_dim)
Returns:
importance_scores: (batch, seq_len, 1) β€” for downstream use
"""
# Compute importance for each position
importance = self.write_gate(hidden) # (B, S, 1)
# Write the most important position (simplified: take max per batch)
with torch.no_grad():
# Pool across sequence: take position with highest importance
max_importance, max_idx = importance.squeeze(-1).max(dim=1) # (B,)
for b in range(hidden.shape[0]):
if max_importance[b] > self.config.hot_importance_threshold:
# Find least-used slot (LRU eviction)
slot = self.usage.argmin().item()
# Write
self.keys.data[slot] = hidden[b, max_idx[b]].detach()
self.values.data[slot] = hidden[b, max_idx[b]].detach()
self.usage[slot] = self.usage.max() + 1 # Mark as most recently used
return importance
def reset(self):
"""Clear hot memory (session boundary)."""
self.keys.data.zero_()
self.values.data.zero_()
self.usage.zero_()
self.write_head.zero_()
class WarmMemory(nn.Module):
"""
Online LoRA adapters with spectral balancing.
STUB VERSION β€” will be replaced with full Share+EBLoRA+Locas synthesis.
For now: simple low-rank adapter applied to hidden states.
Checkpoint/restore implemented for restart survival.
Architecture (target):
- Locas GLU-FFN structure (compatible with base SwiGLU layers)
- Share shared-subspace evolution (incremental knowledge integration)
- EBLoRA spectral balancing (prevents catastrophic forgetting)
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.config = config
# Simple low-rank adapter (stub β€” Locas GLU-FFN in Phase 5)
self.lora_A = nn.Parameter(
torch.randn(config.hidden_dim, config.warm_rank) * 0.01
)
self.lora_B = nn.Parameter(
torch.randn(config.warm_rank, config.hidden_dim) * 0.01 # Small random (not zero)
)
self.scale = nn.Parameter(torch.tensor(0.1))
# Spectral health tracking
self.register_buffer('update_count', torch.tensor(0, dtype=torch.long))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Apply warm memory adapter. Additive residual."""
adapter_out = (x @ self.lora_A @ self.lora_B) * self.scale
return x + adapter_out
def consolidate(self, hot_memory: HotMemory):
"""
STUB: Consolidate important hot memory into warm adapters.
Phase 5 implements full Share+EBLoRA+Locas synthesis.
"""
self.update_count += 1
def condition_number(self) -> float:
"""Spectral health: Οƒ_max / Οƒ_min of the adapter."""
with torch.no_grad():
W = self.lora_A @ self.lora_B
S = torch.linalg.svdvals(W)
if S[-1] < 1e-8:
return float('inf')
return (S[0] / S[-1]).item()
def checkpoint(self, path: str):
"""Save to disk. Survives restart."""
torch.save({
'lora_A': self.lora_A.data,
'lora_B': self.lora_B.data,
'scale': self.scale.data,
'update_count': self.update_count,
}, path)
def restore(self, path: str):
"""Load from disk."""
data = torch.load(path, weights_only=True)
self.lora_A.data = data['lora_A']
self.lora_B.data = data['lora_B']
self.scale.data = data['scale']
self.update_count = data['update_count']
class ColdMemory:
"""
HEKTOR vector database interface.
NOT a nn.Module β€” this is an external system.
Cold memory is the permanent archive. Unlimited capacity.
Accessed via HEKTOR's Unix socket / Python API.
STUB: Returns zeros. Phase 5 connects to actual HEKTOR.
"""
def __init__(self, config: KernelConfig):
self.config = config
self.embedding_dim = config.cold_embedding_dim
self.top_k = config.cold_top_k
def retrieve(self, query: torch.Tensor) -> torch.Tensor:
"""
Query cold memory.
Args:
query: (batch, hidden_dim)
Returns:
results: (batch, top_k, hidden_dim)
"""
# STUB: return zeros
B = query.shape[0]
return torch.zeros(B, self.top_k, self.embedding_dim, device=query.device)
def store(self, key: torch.Tensor, value: dict):
"""Archive to cold storage. STUB: no-op."""
pass
class ThreeTemperatureMemory(nn.Module):
"""
Unified memory interface.
Manages the full hot β†’ warm β†’ cold pipeline.
The cognition loop calls consolidate() during reflective mode.
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.hot = HotMemory(config)
self.warm = WarmMemory(config)
self.cold = ColdMemory(config)
def read(self, query: torch.Tensor) -> torch.Tensor:
"""Read from hot memory and apply warm adapter."""
hot_context = self.hot.read(query)
return self.warm(query + hot_context)
def write(self, hidden: torch.Tensor) -> torch.Tensor:
"""Write to hot memory. Returns importance scores."""
importance = self.hot.write(hidden)
# Periodic spectral check on warm memory (runs during forward pass)
if hasattr(self.warm, 'adapters') and hasattr(self.warm, 'balancer'):
self.warm.update_count += 1
if self.warm.update_count % 25 == 0:
for adapter in self.warm.adapters:
cn = self.warm.balancer.condition_number(adapter)
if cn > 50.0: # Emergency rebalance threshold
self.warm.balancer.rebalance(adapter)
return importance
def consolidate(self):
"""Hot β†’ Warm consolidation. Called by cognition loop."""
if hasattr(self.warm, 'consolidate'):
import inspect
sig = inspect.signature(self.warm.consolidate)
if len(sig.parameters) > 1:
# RealWarmMemory signature
self.warm.consolidate(self.hot.keys, self.hot.values, self.hot.usage)
else:
self.warm.consolidate(self.hot)
def checkpoint(self, path: str):
"""Save warm memory to disk."""
self.warm.checkpoint(path)
def restore(self, path: str):
"""Restore warm memory from disk."""
self.warm.restore(path)