| """ |
| GLADIUS β Synthase Transformer Layer |
| |
| Drop-in replacement for TransformerLayer that adds ATP synthase depth attention |
| as a separate residual stream (peripheral stalk principle). |
| |
| Architecture: |
| x_normed = RMSNorm(x) |
| O_backbone = backbone_attention(x_normed, mask) β F1 hexamer (stable) |
| O_depth = synthase_depth(Q, x_normed, depth_cache) β FO motor (depth) |
| x = x + O_backbone + depth_scale * O_depth β peripheral stalk |
| x = x + FFN(RMSNorm(x)) |
| |
| The backbone attention is UNCHANGED. The depth attention is purely additive. |
| The backbone can learn to ignore depth (O_depth β 0 via gate). |
| The depth can learn to contribute (gate β 1, depth_scale grows). |
| Neither is forced. Both have fair initial conditions. |
| |
| Authors: Ali A. Shakil, Ava Shakil |
| Date: March 27, 2026 |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import math |
| from typing import Optional |
|
|
| from .synthase_attention import SynthaseDepthAttention |
|
|
|
|
| class SynthaseTransformerLayer(nn.Module): |
| """ |
| Transformer layer with ATP synthase-inspired depth attention. |
| |
| Compatible with GLADIUS Omega kernel (HybridAttention + SwiGLU + RMSNorm). |
| """ |
| |
| def __init__( |
| self, |
| |
| attention: nn.Module, |
| ffn: nn.Module, |
| attn_norm: nn.Module, |
| ffn_norm: nn.Module, |
| |
| hidden_dim: int = 640, |
| num_heads: int = 20, |
| head_dim: int = 32, |
| num_depth_kv_heads: int = 4, |
| depth_k: int = 32, |
| max_depth_layers: int = 14, |
| layer_idx: int = 0, |
| qk_softcap: Optional[float] = None, |
| use_bottleneck: bool = False, |
| bottleneck_dim: int = 128, |
| ): |
| super().__init__() |
| self.layer_idx = layer_idx |
| |
| |
| self.attention = attention |
| self.ffn = ffn |
| self.attn_norm = attn_norm |
| self.ffn_norm = ffn_norm |
| |
| |
| self.depth_attention = SynthaseDepthAttention( |
| hidden_dim=hidden_dim, |
| num_heads=num_heads, |
| head_dim=head_dim, |
| num_depth_kv_heads=num_depth_kv_heads, |
| depth_k=depth_k, |
| max_depth_layers=max_depth_layers, |
| qk_softcap=qk_softcap, |
| use_bottleneck=use_bottleneck, |
| bottleneck_dim=bottleneck_dim, |
| ) |
| |
| |
| |
| |
| self.depth_scale = nn.Parameter(torch.tensor(0.1)) |
| |
| def forward( |
| self, |
| x: torch.Tensor, |
| mask: Optional[torch.Tensor] = None, |
| depth_cache: Optional[torch.Tensor] = None, |
| ) -> torch.Tensor: |
| """ |
| Forward with separate backbone + depth residual streams. |
| |
| Args: |
| x: (B, S, D) β input hidden state |
| mask: causal mask |
| depth_cache: (B, D_len, D) β depth cache from DepthCacheBuilder |
| Returns: |
| x: (B, S, D) β output with depth contribution |
| """ |
| x_normed = self.attn_norm(x) |
| |
| |
| O_backbone = self.attention(x_normed, mask=mask) |
| |
| |
| if depth_cache is not None and depth_cache.shape[1] > 0: |
| |
| B, S, D = x_normed.shape |
| Q = self.attention.q_proj(x_normed) |
| Q = Q.view(B, S, self.attention.num_heads, self.attention.head_dim).transpose(1, 2) |
| |
| |
| |
| |
| O_depth = self.depth_attention(Q, x_normed, depth_cache) |
| |
| |
| x = x + O_backbone + self.depth_scale * O_depth |
| else: |
| x = x + O_backbone |
| |
| |
| x = x + self.ffn(self.ffn_norm(x)) |
| return x |
| |
| def get_depth_diagnostics(self, x: torch.Tensor, depth_cache: Optional[torch.Tensor] = None) -> dict: |
| """Get diagnostics for monitoring depth attention health.""" |
| diag = { |
| 'depth_scale': self.depth_scale.item(), |
| 'layer_idx': self.layer_idx, |
| } |
| if depth_cache is not None and depth_cache.shape[1] > 0: |
| x_normed = self.attn_norm(x) |
| gate_diag = self.depth_attention.get_diagnostics(x_normed) |
| diag.update(gate_diag) |
| return diag |
|
|