| """ |
| GLADIUS v2.0 β Warm Memory: Share + EBLoRA + Locas Synthesis |
| |
| The dragon. Three papers forged into one mechanism: |
| - Locas (2602.05085): GLU-FFN structure, principled initialization, merge capability |
| - Share (2602.06043): Evolving shared subspace, incremental integration |
| - EBLoRA (2602.00722): Spectral balancing, Stiefel manifold constraint |
| |
| This replaces the stub WarmMemory in memory.py. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import math |
|
|
|
|
| class LocasAdapter(nn.Module): |
| """ |
| Locas-style GLU-FFN adapter. |
| |
| Same structure as base model's SwiGLU layers, but low-rank. |
| Can be merged INTO base model weights (permanentize). |
| Initialized from base model parameters for fast convergence. |
| """ |
|
|
| def __init__(self, hidden_dim: int, rank: int): |
| super().__init__() |
| self.hidden_dim = hidden_dim |
| self.rank = rank |
|
|
| |
| self.gate_proj = nn.Linear(hidden_dim, rank, bias=False) |
| self.up_proj = nn.Linear(hidden_dim, rank, bias=False) |
| self.down_proj = nn.Linear(rank, hidden_dim, bias=False) |
|
|
| |
| self.scale = nn.Parameter(torch.tensor(0.01)) |
|
|
| self._init_weights() |
|
|
| def _init_weights(self): |
| |
| |
| nn.init.normal_(self.gate_proj.weight, std=0.01) |
| nn.init.normal_(self.up_proj.weight, std=0.01) |
| nn.init.zeros_(self.down_proj.weight) |
|
|
| def init_from_base(self, base_gate: nn.Linear, base_up: nn.Linear): |
| """ |
| Locas principled initialization: extract top-k singular vectors |
| from base model's FFN projections. |
| """ |
| with torch.no_grad(): |
| |
| try: |
| U, S, V = torch.linalg.svd(base_gate.weight, full_matrices=False) |
| self.gate_proj.weight.data = V[:self.rank, :] |
| except torch._C._LinAlgError: |
| pass |
|
|
| |
| try: |
| U, S, V = torch.linalg.svd(base_up.weight, full_matrices=False) |
| self.up_proj.weight.data = V[:self.rank, :] |
| except torch._C._LinAlgError: |
| pass |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| """GLU-FFN forward. Returns residual to add to hidden state.""" |
| gate = F.silu(self.gate_proj(x)) |
| up = self.up_proj(x) |
| return self.down_proj(gate * up) * self.scale |
|
|
| def get_weight_matrix(self) -> torch.Tensor: |
| """ |
| Reconstruct the effective weight matrix for spectral analysis. |
| Returns the linearized adapter: W_eff β down @ (gate β up) [simplified] |
| For spectral analysis, we use the dominant path: down @ up |
| """ |
| return self.down_proj.weight @ self.up_proj.weight |
|
|
|
|
| class SpectralBalancer: |
| """ |
| EBLoRA-inspired spectral balancing. |
| |
| Monitors the condition number of the adapter and rebalances |
| when singular values become too skewed (which causes forgetting). |
| """ |
|
|
| def __init__(self, condition_threshold: float = 10.0): |
| self.condition_threshold = condition_threshold |
| self.history = [] |
|
|
| def condition_number(self, adapter: LocasAdapter) -> float: |
| """Compute Ο_max / Ο_min of the effective weight matrix.""" |
| with torch.no_grad(): |
| W = adapter.get_weight_matrix() |
| try: |
| S = torch.linalg.svdvals(W) |
| except torch._C._LinAlgError: |
| return 1.0 |
| S_nonzero = S[S > 1e-8] |
| if len(S_nonzero) < 2: |
| return 1.0 |
| return (S_nonzero[0] / S_nonzero[-1]).item() |
|
|
| def needs_rebalance(self, adapter: LocasAdapter) -> bool: |
| cn = self.condition_number(adapter) |
| self.history.append(cn) |
| return cn > self.condition_threshold |
|
|
| def rebalance(self, adapter: LocasAdapter): |
| """ |
| Force spectral balance by normalizing singular values. |
| |
| Decouples magnitude from direction (EBLoRA core principle): |
| 1. SVD the effective weight |
| 2. Soft-clamp singular values toward the mean |
| 3. Reconstruct |
| """ |
| with torch.no_grad(): |
| |
| W = adapter.down_proj.weight @ adapter.up_proj.weight |
|
|
| |
| |
| eps = 1e-6 * torch.eye(W.shape[0], W.shape[1], device=W.device, dtype=W.dtype) |
| W = W + eps |
|
|
| try: |
| U, S, Vh = torch.linalg.svd(W, full_matrices=False) |
| except torch._C._LinAlgError: |
| |
| return |
|
|
| |
| mask = S > 1e-6 |
| if mask.sum() < 2: |
| return |
|
|
| S_active = S[mask] |
|
|
| |
| log_S = torch.log(S_active) |
| log_mean = log_S.mean() |
| |
| balanced_log_S = log_mean + 0.5 * (log_S - log_mean) |
| S_balanced = S.clone() |
| S_balanced[mask] = torch.exp(balanced_log_S) |
|
|
| |
| rank = adapter.rank |
| |
| k = min(rank, S_balanced.shape[0], U.shape[1], Vh.shape[0]) |
| sqrt_S = torch.sqrt(S_balanced[:k].clamp(min=1e-8)) |
|
|
| new_down_full = U[:, :k] @ torch.diag(sqrt_S) |
| new_up_full = torch.diag(sqrt_S) @ Vh[:k, :] |
|
|
| |
| r = min(rank, k) |
| adapter.down_proj.weight.data[:, :r] = new_down_full[:, :r] |
| adapter.up_proj.weight.data[:r, :] = new_up_full[:r, :] |
|
|
|
|
| class SubspaceTracker: |
| """ |
| Share-inspired evolving subspace tracker. |
| |
| Maintains a compact representation of what the warm memory "knows." |
| New knowledge is checked against this subspace: |
| - If it projects well β already known β small update |
| - If large residual β novel knowledge β evolve subspace |
| """ |
|
|
| def __init__(self, hidden_dim: int, rank: int, novelty_threshold: float = 0.1): |
| self.hidden_dim = hidden_dim |
| self.rank = rank |
| self.novelty_threshold = novelty_threshold |
|
|
| |
| self.basis = torch.zeros(rank, hidden_dim) |
| |
| self.importance = torch.zeros(rank) |
| self.initialized = False |
|
|
| def initialize_from_adapter(self, adapter: LocasAdapter): |
| """Extract subspace from current adapter state.""" |
| with torch.no_grad(): |
| W = adapter.get_weight_matrix() |
| U, S, Vh = torch.linalg.svd(W, full_matrices=False) |
| k = min(self.rank, len(S)) |
| self.basis[:k] = Vh[:k] |
| self.importance[:k] = S[:k] |
| self.initialized = True |
|
|
| def compute_novelty(self, gradient: torch.Tensor) -> tuple[float, torch.Tensor]: |
| """ |
| Measure how much of the gradient is NOT captured by current subspace. |
| """ |
| if not self.initialized: |
| return float('inf'), gradient |
|
|
| |
| g_flat = gradient.flatten() |
| basis_flat = self.basis.reshape(self.rank, -1).to(g_flat.device) |
|
|
| |
| g_dim = g_flat.shape[0] |
| b_dim = basis_flat.shape[1] |
|
|
| if g_dim != b_dim: |
| |
| g_proj = g_flat[:b_dim] if g_dim > b_dim else F.pad(g_flat, (0, b_dim - g_dim)) |
| else: |
| g_proj = g_flat |
|
|
| projection = basis_flat @ g_proj |
| reconstructed = projection @ basis_flat |
| residual = g_proj - reconstructed |
| novelty = residual.norm().item() |
|
|
| return novelty, residual.reshape(self.hidden_dim) |
|
|
| def evolve(self, new_direction: torch.Tensor, importance: float): |
| """ |
| Integrate a new direction into the subspace. |
| Replaces the least important existing direction. |
| """ |
| |
| new_dir_flat = new_direction.flatten() |
| new_dir_flat = new_dir_flat / (new_dir_flat.norm() + 1e-8) |
|
|
| |
| least_idx = self.importance.argmin().item() |
| self.basis[least_idx] = new_dir_flat[:self.hidden_dim] |
| self.importance[least_idx] = importance |
|
|
| |
| self.importance *= 0.99 |
|
|
|
|
| class RealWarmMemory(nn.Module): |
| """ |
| Full warm memory implementation: Locas + Share + EBLoRA. |
| |
| Architecture: Locas GLU-FFN adapter (per transformer layer) |
| Evolution: Share subspace tracking for novelty detection |
| Stability: EBLoRA spectral balancing |
| |
| This is the dragon, tamed. |
| """ |
|
|
| def __init__(self, config, num_layers: int | None = None): |
| super().__init__() |
| from .config import KernelConfig |
| self.config = config |
| self.hidden_dim = config.hidden_dim |
| self.rank = config.warm_rank |
| num_layers = num_layers or config.num_layers |
|
|
| |
| self.adapters = nn.ModuleList([ |
| LocasAdapter(config.hidden_dim, config.warm_rank) |
| for _ in range(num_layers) |
| ]) |
|
|
| |
| self.balancer = SpectralBalancer(config.warm_condition_threshold) |
|
|
| |
| self.trackers = [ |
| SubspaceTracker(config.hidden_dim, config.warm_rank, config.warm_novelty_threshold) |
| for _ in range(num_layers) |
| ] |
|
|
| |
| self.register_buffer('update_count', torch.tensor(0, dtype=torch.long)) |
|
|
| def forward(self, x: torch.Tensor, layer_idx: int = 0) -> torch.Tensor: |
| """Apply warm memory adapter for a specific layer.""" |
| if layer_idx < len(self.adapters): |
| return x + self.adapters[layer_idx](x) |
| return x |
|
|
| def forward_all(self, x: torch.Tensor) -> torch.Tensor: |
| """Apply all adapters sequentially (for simple use).""" |
| for adapter in self.adapters: |
| x = x + adapter(x) |
| return x |
|
|
| @torch.no_grad() |
| def consolidate(self, hot_keys: torch.Tensor, hot_values: torch.Tensor, |
| importance_scores: torch.Tensor): |
| """ |
| Real consolidation: hot memory β warm adapters. |
| |
| 1. Filter by importance |
| 2. Check novelty against subspace |
| 3. Update adapters with spectral balancing |
| """ |
| self.update_count += 1 |
|
|
| |
| signal = hot_values.mean(dim=0) |
| if signal.norm() < 1e-6: |
| return |
|
|
| for i, (adapter, tracker) in enumerate(zip(self.adapters, self.trackers)): |
| |
| if not tracker.initialized: |
| tracker.initialize_from_adapter(adapter) |
|
|
| |
| W_grad = torch.outer(signal, signal) |
|
|
| |
| novelty, residual = tracker.compute_novelty(W_grad) |
|
|
| if novelty > tracker.novelty_threshold: |
| |
| tracker.evolve(residual, novelty) |
|
|
| |
| lr = 0.001 |
| r = min(adapter.rank, residual.shape[0]) |
| h = min(self.hidden_dim, adapter.up_proj.weight.shape[1]) |
| adapter.up_proj.weight.data[:r, :h] += lr * residual[:r].unsqueeze(1).expand(r, h) * 0.01 |
|
|
| |
| |
| |
| |
| |
| signal_norm = signal / (signal.norm() + 1e-8) |
| r_down = min(adapter.rank, adapter.down_proj.weight.shape[1]) |
| h_down = min(self.hidden_dim, adapter.down_proj.weight.shape[0]) |
| down_update = signal_norm[:h_down].unsqueeze(1) * residual[:r_down].unsqueeze(0) |
| |
| adapter.down_proj.weight.data[:h_down, :r_down] += lr * down_update * 0.1 |
|
|
| |
| target_scale = 0.3 |
| adapter.scale.data += lr * 0.1 * (target_scale - adapter.scale.data) |
|
|
| |
| if self.update_count % max(1, self.config.warm_balance_frequency // 10) == 0: |
| cn = self.balancer.condition_number(adapter) |
| if cn > self.config.warm_condition_threshold: |
| self.balancer.rebalance(adapter) |
|
|
| def condition_number(self) -> float: |
| """Average condition number across all adapter layers.""" |
| cns = [self.balancer.condition_number(a) for a in self.adapters] |
| return sum(cns) / len(cns) |
|
|
| def checkpoint(self, path: str): |
| """Save warm memory state.""" |
| state = { |
| 'adapters': self.state_dict(), |
| 'update_count': self.update_count.item(), |
| 'subspace_states': [ |
| {'basis': t.basis.clone(), 'importance': t.importance.clone()} |
| for t in self.trackers |
| ], |
| 'spectral_history': self.balancer.history[-100:], |
| } |
| torch.save(state, path) |
|
|
| def restore(self, path: str): |
| """Load warm memory state.""" |
| state = torch.load(path, weights_only=False) |
| self.load_state_dict(state['adapters'], strict=False) |
| self.update_count.fill_(state['update_count']) |
| for tracker, ss in zip(self.trackers, state.get('subspace_states', [])): |
| tracker.basis = ss['basis'] |
| tracker.importance = ss['importance'] |
| tracker.initialized = True |
| self.balancer.history = state.get('spectral_history', []) |
|
|