| """ |
| GLADIUS v2.0 β MoDA: Multi-Head Depth Attention |
| |
| The insight: Standard transformers compute Q, K, V from the CURRENT layer's hidden |
| state only. But every previous layer already computed useful representations that get |
| discarded. MoDA adds a second set of K, V projections that attend over a "depth cache" |
| β the hidden states from ALL previous layers at each position. |
| |
| This is NOT cross-attention (fixed external memory). This is SELF-attention through |
| depth β the model attending to its own computation history. |
| |
| Architecture per layer l: |
| Sequence path (standard): |
| Q_seq = W_q @ x_l (query from current layer) |
| K_seq = W_k @ x_l (key from current layer) |
| V_seq = W_v @ x_l (value from current layer) |
| O_seq = softmax(Q_seq @ K_seq^T / sqrt(d)) @ V_seq |
| |
| Depth path (NEW): |
| K_depth = W_k_depth @ stack(x_0, x_1, ..., x_{l-1}) |
| V_depth = W_v_depth @ stack(x_0, x_1, ..., x_{l-1}) |
| O_depth = softmax(Q_seq @ K_depth^T / sqrt(d)) @ V_depth |
| |
| Combined: |
| O = gate * O_seq + (1 - gate) * O_depth |
| |
| The depth KV projections are TINY (hidden_dim β head_dim per group), and the depth |
| cache grows linearly with layers (not sequence length), so the cost is negligible. |
| |
| For GLADIUS Wyrm (640d, 14L, 20H): |
| Depth cache at layer 13: 13 Γ seq_len depth tokens per position |
| Extra params per layer: 2 Γ hidden_dim Γ (hidden_dim / num_groups) β 40K |
| Total extra: 14 Γ 40K β 560K params (0.5% of 104.9M) |
| |
| Reference: MoDA paper (Multi-Head Depth Attention) + Ali's SLA2 hybrid architecture. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import math |
|
|
| from .config import KernelConfig |
| from .attention import RoPE, RMSNorm, SwiGLU |
|
|
|
|
| class DepthKVProjection(nn.Module): |
| """ |
| Projects depth cache hidden states into K, V for depth attention. |
| |
| Uses Grouped Query Attention (GQA) style β fewer KV heads than Q heads |
| to keep the depth path lightweight. |
| |
| For Wyrm: 20 Q heads, 4 KV groups β 5 Q heads per KV group. |
| """ |
| |
| def __init__(self, hidden_dim: int, num_kv_heads: int, head_dim: int): |
| super().__init__() |
| self.num_kv_heads = num_kv_heads |
| self.head_dim = head_dim |
| self.kv_dim = num_kv_heads * head_dim |
| |
| self.k_proj = nn.Linear(hidden_dim, self.kv_dim, bias=False) |
| self.v_proj = nn.Linear(hidden_dim, self.kv_dim, bias=False) |
| |
| self._init_weights() |
| |
| def _init_weights(self): |
| |
| nn.init.normal_(self.k_proj.weight, std=0.005) |
| nn.init.normal_(self.v_proj.weight, std=0.005) |
| |
| def forward(self, depth_cache: torch.Tensor): |
| """ |
| Args: |
| depth_cache: (batch, depth_len, hidden_dim) β stacked hidden states from previous layers |
| Returns: |
| K_depth: (batch, num_kv_heads, depth_len, head_dim) |
| V_depth: (batch, num_kv_heads, depth_len, head_dim) |
| """ |
| B, D_len, _ = depth_cache.shape |
| K = self.k_proj(depth_cache).view(B, D_len, self.num_kv_heads, self.head_dim).transpose(1, 2) |
| V = self.v_proj(depth_cache).view(B, D_len, self.num_kv_heads, self.head_dim).transpose(1, 2) |
| return K, V |
|
|
|
|
| class MoDAAttention(nn.Module): |
| """ |
| Multi-Head Depth Attention β the core MoDA mechanism. |
| |
| Combines standard sequence attention (SLA2 hybrid: softmax + linear blend) |
| with depth attention over previous layers' hidden states. |
| |
| The depth path uses GQA with fewer KV heads for efficiency. |
| A learned gate controls the blend between sequence and depth paths. |
| """ |
| |
| def __init__(self, config: KernelConfig, layer_idx: int = 0, |
| num_depth_kv_heads: int = 4): |
| super().__init__() |
| self.config = config |
| self.layer_idx = layer_idx |
| self.num_heads = config.num_heads |
| self.head_dim = config.head_dim |
| self.hidden_dim = config.hidden_dim |
| self.num_depth_kv_heads = num_depth_kv_heads |
| |
| |
| assert config.num_heads % num_depth_kv_heads == 0, \ |
| f"num_heads ({config.num_heads}) must be divisible by num_depth_kv_heads ({num_depth_kv_heads})" |
| self.q_per_kv = config.num_heads // num_depth_kv_heads |
| |
| |
| self.q_proj = nn.Linear(config.hidden_dim, config.hidden_dim, bias=False) |
| self.k_proj = nn.Linear(config.hidden_dim, config.hidden_dim, bias=False) |
| self.v_proj = nn.Linear(config.hidden_dim, config.hidden_dim, bias=False) |
| self.o_proj = nn.Linear(config.hidden_dim, config.hidden_dim, bias=False) |
| |
| |
| self.depth_kv = DepthKVProjection(config.hidden_dim, num_depth_kv_heads, config.head_dim) |
| |
| |
| |
| |
| self.depth_gate = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.num_heads), |
| nn.Sigmoid() |
| ) |
| |
| |
| self.alpha_router = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.num_heads), |
| nn.Sigmoid() |
| ) |
| |
| |
| self.rope = RoPE(config.head_dim, config.max_seq_len) |
| |
| |
| self.qk_softcap = getattr(config, 'qk_softcap', None) |
| |
| self._init_weights() |
| |
| def _init_weights(self): |
| for proj in [self.q_proj, self.k_proj, self.v_proj, self.o_proj]: |
| nn.init.normal_(proj.weight, std=0.02) |
| |
| |
| nn.init.constant_(self.depth_gate[0].bias, -2.0) |
| nn.init.zeros_(self.alpha_router[0].bias) |
| |
| def _expand_kv_heads(self, kv: torch.Tensor) -> torch.Tensor: |
| """ |
| Expand GQA KV heads to match Q heads. |
| (B, num_kv_heads, L, D) β (B, num_heads, L, D) |
| """ |
| B, H_kv, L, D = kv.shape |
| |
| return kv.unsqueeze(2).expand(B, H_kv, self.q_per_kv, L, D).reshape(B, self.num_heads, L, D) |
| |
| def forward( |
| self, |
| x: torch.Tensor, |
| mask: torch.Tensor | None = None, |
| depth_cache: torch.Tensor | None = None, |
| ) -> torch.Tensor: |
| """ |
| Args: |
| x: (batch, seq_len, hidden_dim) β current layer input |
| mask: (batch, 1, seq_len, seq_len) β causal mask |
| depth_cache: (batch, num_prev_layers * seq_len, hidden_dim) β stacked previous layers |
| OR None for layer 0 (no depth history yet) |
| Returns: |
| (batch, seq_len, hidden_dim) |
| """ |
| B, S, D = x.shape |
| |
| |
| Q = self.q_proj(x).view(B, S, self.num_heads, self.head_dim).transpose(1, 2) |
| K = self.k_proj(x).view(B, S, self.num_heads, self.head_dim).transpose(1, 2) |
| V = self.v_proj(x).view(B, S, self.num_heads, self.head_dim).transpose(1, 2) |
| |
| |
| Q_rope = self.rope(Q, S) |
| K_rope = self.rope(K, S) |
| |
| |
| |
| Q_lin = F.elu(Q_rope) + 1 |
| K_lin = F.elu(K_rope) + 1 |
| KV_lin = torch.matmul(K_lin.transpose(-2, -1), V) |
| Z_lin = K_lin.transpose(-2, -1).sum(dim=-1, keepdim=True) |
| O_linear = torch.matmul(Q_lin, KV_lin) / (torch.matmul(Q_lin, Z_lin) + 1e-6) |
| |
| |
| scores = torch.matmul(Q_rope, K_rope.transpose(-2, -1)) / math.sqrt(self.head_dim) |
| if self.qk_softcap is not None and self.qk_softcap > 0: |
| scores = self.qk_softcap * torch.tanh(scores / self.qk_softcap) |
| if mask is not None: |
| scores = scores.masked_fill(mask == 0, float('-inf')) |
| attn_weights = F.softmax(scores, dim=-1) |
| O_softmax = torch.matmul(attn_weights, V) |
| |
| |
| alpha = self.alpha_router(x).permute(0, 2, 1).unsqueeze(-1) |
| O_seq = alpha * O_softmax + (1 - alpha) * O_linear |
| |
| |
| if depth_cache is not None and depth_cache.shape[1] > 0: |
| |
| K_depth, V_depth = self.depth_kv(depth_cache) |
| |
| |
| K_depth = self._expand_kv_heads(K_depth) |
| V_depth = self._expand_kv_heads(V_depth) |
| |
| |
| |
| |
| depth_scores = torch.matmul(Q, K_depth.transpose(-2, -1)) / math.sqrt(self.head_dim) |
| |
| if self.qk_softcap is not None and self.qk_softcap > 0: |
| depth_scores = self.qk_softcap * torch.tanh(depth_scores / self.qk_softcap) |
| |
| |
| depth_attn = F.softmax(depth_scores, dim=-1) |
| O_depth = torch.matmul(depth_attn, V_depth) |
| |
| |
| gate = self.depth_gate(x).permute(0, 2, 1).unsqueeze(-1) |
| O = (1 - gate) * O_seq + gate * O_depth |
| else: |
| |
| O = O_seq |
| |
| |
| O = O.transpose(1, 2).contiguous().view(B, S, D) |
| return self.o_proj(O) |
|
|
|
|
| class MoDATransformerLayer(nn.Module): |
| """ |
| Transformer layer with MoDA attention. |
| |
| Drop-in replacement for TransformerLayer, but forward() now accepts |
| and returns depth_cache for the depth attention mechanism. |
| """ |
| |
| def __init__(self, config: KernelConfig, layer_idx: int = 0, |
| num_depth_kv_heads: int = 4): |
| super().__init__() |
| self.layer_idx = layer_idx |
| self.attention = MoDAAttention(config, layer_idx, num_depth_kv_heads) |
| self.ffn = SwiGLU(config) |
| self.attn_norm = RMSNorm(config.hidden_dim) |
| self.ffn_norm = RMSNorm(config.hidden_dim) |
| |
| def forward( |
| self, |
| x: torch.Tensor, |
| mask: torch.Tensor | None = None, |
| depth_cache: torch.Tensor | None = None, |
| ) -> torch.Tensor: |
| """ |
| Args: |
| x: (batch, seq_len, hidden_dim) |
| mask: causal mask |
| depth_cache: stacked previous layer outputs (batch, prev_layers * seq_len, hidden_dim) |
| Returns: |
| x: (batch, seq_len, hidden_dim) β output of this layer |
| """ |
| x = x + self.attention(self.attn_norm(x), mask=mask, depth_cache=depth_cache) |
| x = x + self.ffn(self.ffn_norm(x)) |
| return x |
|
|
|
|
| def upgrade_kernel_to_moda(kernel, num_depth_kv_heads: int = 4, |
| init_from_sequence: bool = True): |
| """ |
| Surgical upgrade: replace HybridAttention layers with MoDA layers. |
| |
| Preserves ALL existing weights (Q, K, V, O projections, FFN, norms). |
| Only adds new depth_kv projections and depth_gate. |
| |
| Args: |
| kernel: GladiusKernel instance with existing trained weights |
| num_depth_kv_heads: number of KV heads for depth attention (GQA) |
| init_from_sequence: if True, initialize depth KV from sequence KV weights |
| |
| Returns: |
| Modified kernel with MoDA layers (in-place) |
| """ |
| config = kernel.config |
| device = next(kernel.parameters()).device |
| dtype = next(kernel.parameters()).dtype |
| |
| new_layers = nn.ModuleList() |
| |
| for i, old_layer in enumerate(kernel.layers): |
| |
| moda_layer = MoDATransformerLayer(config, layer_idx=i, |
| num_depth_kv_heads=num_depth_kv_heads) |
| |
| |
| |
| moda_layer.attention.q_proj.weight.data.copy_(old_layer.attention.q_proj.weight.data) |
| moda_layer.attention.k_proj.weight.data.copy_(old_layer.attention.k_proj.weight.data) |
| moda_layer.attention.v_proj.weight.data.copy_(old_layer.attention.v_proj.weight.data) |
| moda_layer.attention.o_proj.weight.data.copy_(old_layer.attention.o_proj.weight.data) |
| |
| |
| moda_layer.attention.alpha_router[0].weight.data.copy_(old_layer.attention.alpha_router[0].weight.data) |
| moda_layer.attention.alpha_router[0].bias.data.copy_(old_layer.attention.alpha_router[0].bias.data) |
| |
| |
| moda_layer.attention.rope.inv_freq.data.copy_(old_layer.attention.rope.inv_freq.data) |
| moda_layer.attention.rope.cos_cached.data.copy_(old_layer.attention.rope.cos_cached.data) |
| moda_layer.attention.rope.sin_cached.data.copy_(old_layer.attention.rope.sin_cached.data) |
| |
| |
| moda_layer.ffn.gate_proj.weight.data.copy_(old_layer.ffn.gate_proj.weight.data) |
| moda_layer.ffn.up_proj.weight.data.copy_(old_layer.ffn.up_proj.weight.data) |
| moda_layer.ffn.down_proj.weight.data.copy_(old_layer.ffn.down_proj.weight.data) |
| |
| |
| moda_layer.attn_norm.weight.data.copy_(old_layer.attn_norm.weight.data) |
| moda_layer.ffn_norm.weight.data.copy_(old_layer.ffn_norm.weight.data) |
| |
| |
| if init_from_sequence: |
| |
| |
| seq_k_weight = old_layer.attention.k_proj.weight.data |
| seq_v_weight = old_layer.attention.v_proj.weight.data |
| |
| |
| head_dim = config.head_dim |
| q_per_kv = config.num_heads // num_depth_kv_heads |
| kv_dim = num_depth_kv_heads * head_dim |
| |
| |
| depth_k_weight = torch.zeros(kv_dim, config.hidden_dim, device=device, dtype=dtype) |
| depth_v_weight = torch.zeros(kv_dim, config.hidden_dim, device=device, dtype=dtype) |
| |
| for g in range(num_depth_kv_heads): |
| src_head = g * q_per_kv |
| src_start = src_head * head_dim |
| src_end = src_start + head_dim |
| dst_start = g * head_dim |
| dst_end = dst_start + head_dim |
| |
| depth_k_weight[dst_start:dst_end] = seq_k_weight[src_start:src_end] |
| depth_v_weight[dst_start:dst_end] = seq_v_weight[src_start:src_end] |
| |
| |
| moda_layer.attention.depth_kv.k_proj.weight.data.copy_(depth_k_weight * 0.1) |
| moda_layer.attention.depth_kv.v_proj.weight.data.copy_(depth_v_weight * 0.1) |
| |
| new_layers.append(moda_layer) |
| |
| |
| kernel.layers = new_layers.to(device) |
| |
| |
| |
| kernel._moda_enabled = True |
| kernel._num_depth_kv_heads = num_depth_kv_heads |
| |
| |
| total = sum(p.numel() for p in kernel.parameters()) |
| trainable = sum(p.numel() for p in kernel.parameters() if p.requires_grad) |
| depth_params = sum( |
| sum(p.numel() for p in layer.attention.depth_kv.parameters()) + |
| sum(p.numel() for p in layer.attention.depth_gate.parameters()) |
| for layer in kernel.layers |
| ) |
| |
| print(f"\n=== MoDA Upgrade Complete ===") |
| print(f" Total params: {total:,} (+{depth_params:,} depth params)") |
| print(f" Trainable: {trainable:,}") |
| print(f" Depth overhead: {depth_params/total*100:.2f}%") |
| print(f" Depth KV heads: {num_depth_kv_heads} (GQA ratio: {config.num_heads // num_depth_kv_heads}:1)") |
| print(f" Memory: {total * 2 / 1024 / 1024:.1f} MB (bfloat16)") |
| |
| return kernel |
|
|
|
|
| class MoDAKernelMixin: |
| """ |
| Mixin to patch GladiusKernel.forward() for depth cache propagation. |
| |
| Usage: |
| kernel = GladiusKernel.load_checkpoint(path) |
| kernel = upgrade_kernel_to_moda(kernel) |
| patch_kernel_forward_for_moda(kernel) |
| """ |
| pass |
|
|
|
|
| def patch_kernel_forward_for_moda(kernel): |
| """ |
| Monkey-patch the kernel's forward to thread depth cache through layers. |
| |
| The original forward just does: |
| for layer in self.layers: |
| x = layer(x, mask=mask) |
| |
| MoDA needs: |
| depth_cache = [] |
| for layer in self.layers: |
| x = layer(x, mask=mask, depth_cache=stack(depth_cache)) |
| depth_cache.append(x) |
| """ |
| import types |
| |
| _original_forward = kernel.forward |
| |
| def moda_forward(self, input_ids=None, timestamp=None, images=None, audio=None): |
| """MoDA-patched forward: threads depth cache through transformer layers.""" |
| |
| text_embeds = None |
| if input_ids is not None: |
| B, S = input_ids.shape |
| text_embeds = self.embeddings.embed(input_ids) |
| |
| modality_mask = None |
| if self.has_senses and (images is not None or audio is not None): |
| x, modality_mask = self.senses(text_embeds=text_embeds, images=images, audio=audio) |
| B, S = x.shape[0], x.shape[1] |
| elif text_embeds is not None: |
| x = text_embeds |
| B, S = x.shape[0], x.shape[1] |
| else: |
| raise ValueError("Must provide input_ids, images, or audio") |
| |
| |
| x = self.memory.read(x) |
| |
| |
| time_embed = None |
| if timestamp is not None: |
| if isinstance(timestamp, (int, float)): |
| timestamp = torch.tensor([timestamp] * B, dtype=torch.float32, device=x.device) |
| time_embed = self.time_engine(timestamp) |
| x = x + time_embed.unsqueeze(1) |
| |
| |
| if S <= self.config.max_seq_len: |
| mask = self.causal_mask[:, :, :S, :S] |
| else: |
| mask = torch.tril(torch.ones(1, 1, S, S, device=x.device)) |
| |
| depth_states = [] |
| for layer in self.layers: |
| |
| if len(depth_states) > 0: |
| |
| |
| |
| depth_cache = torch.stack(depth_states, dim=1) |
| else: |
| depth_cache = None |
| |
| x = layer(x, mask=mask, depth_cache=depth_cache) |
| |
| depth_states.append(x.mean(dim=1).detach()) |
| |
| |
| |
| |
| |
| x = self.final_norm(x) |
| |
| |
| tool_result = self.tool_cortex.check_activation(x) |
| if tool_result is not None: |
| x = x + tool_result |
| |
| |
| logits, silence, pixel_output = self.modulator(x, self.embeddings.output_head, temporal_embedding=time_embed) |
| |
| |
| importance = self.memory.write(x) |
| |
| |
| mode, cognitive_state, mode_probs = self.cognition.heartbeat(x) |
| |
| if self.cognition.should_consolidate(): |
| self.memory.consolidate() |
| |
| self.time_engine.record_event() |
| |
| return { |
| 'logits': logits, |
| 'silence': silence, |
| 'pixel_output': pixel_output, |
| 'mode': mode, |
| 'importance': importance, |
| 'modality_mask': modality_mask, |
| 'cognitive_state': cognitive_state, |
| 'mode_probs': mode_probs, |
| } |
| |
| kernel.forward = types.MethodType(moda_forward, kernel) |
| print(" Forward pass patched for MoDA depth cache propagation β
") |
| return kernel |
|
|