gladius-training / kernel /synthase /synthase_layer.py
amuzetnoM's picture
GLADIUS training package: kernel + omega + synthase + checkpoint (step 529)
63e99b4 verified
"""
GLADIUS β€” Synthase Transformer Layer
Drop-in replacement for TransformerLayer that adds ATP synthase depth attention
as a separate residual stream (peripheral stalk principle).
Architecture:
x_normed = RMSNorm(x)
O_backbone = backbone_attention(x_normed, mask) ← F1 hexamer (stable)
O_depth = synthase_depth(Q, x_normed, depth_cache) ← FO motor (depth)
x = x + O_backbone + depth_scale * O_depth ← peripheral stalk
x = x + FFN(RMSNorm(x))
The backbone attention is UNCHANGED. The depth attention is purely additive.
The backbone can learn to ignore depth (O_depth β†’ 0 via gate).
The depth can learn to contribute (gate β†’ 1, depth_scale grows).
Neither is forced. Both have fair initial conditions.
Authors: Ali A. Shakil, Ava Shakil
Date: March 27, 2026
"""
import torch
import torch.nn as nn
import math
from typing import Optional
from .synthase_attention import SynthaseDepthAttention
class SynthaseTransformerLayer(nn.Module):
"""
Transformer layer with ATP synthase-inspired depth attention.
Compatible with GLADIUS Omega kernel (HybridAttention + SwiGLU + RMSNorm).
"""
def __init__(
self,
# These will be set by the existing layer's components
attention: nn.Module, # HybridAttention (backbone)
ffn: nn.Module, # SwiGLU
attn_norm: nn.Module, # RMSNorm
ffn_norm: nn.Module, # RMSNorm
# Synthase-specific config
hidden_dim: int = 640,
num_heads: int = 20,
head_dim: int = 32,
num_depth_kv_heads: int = 4,
depth_k: int = 32,
max_depth_layers: int = 14,
layer_idx: int = 0,
qk_softcap: Optional[float] = None,
use_bottleneck: bool = False,
bottleneck_dim: int = 128,
):
super().__init__()
self.layer_idx = layer_idx
# Backbone components (transferred from existing layer β€” NOT new)
self.attention = attention
self.ffn = ffn
self.attn_norm = attn_norm
self.ffn_norm = ffn_norm
# === NEW: Synthase depth attention (the ATP synthase motor) ===
self.depth_attention = SynthaseDepthAttention(
hidden_dim=hidden_dim,
num_heads=num_heads,
head_dim=head_dim,
num_depth_kv_heads=num_depth_kv_heads,
depth_k=depth_k,
max_depth_layers=max_depth_layers,
qk_softcap=qk_softcap,
use_bottleneck=use_bottleneck,
bottleneck_dim=bottleneck_dim,
)
# Depth residual scale β€” starts at 0.1 (pump mode)
# Learnable: grows as depth becomes useful (β†’ production mode)
# Can go negative (depth as inhibitor β€” removing info is also useful)
self.depth_scale = nn.Parameter(torch.tensor(0.1))
def forward(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
depth_cache: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Forward with separate backbone + depth residual streams.
Args:
x: (B, S, D) β€” input hidden state
mask: causal mask
depth_cache: (B, D_len, D) β€” depth cache from DepthCacheBuilder
Returns:
x: (B, S, D) β€” output with depth contribution
"""
x_normed = self.attn_norm(x)
# === Backbone attention (F1 hexamer β€” stable, well-trained) ===
O_backbone = self.attention(x_normed, mask=mask)
# === Depth attention (FO motor β€” learning to couple) ===
if depth_cache is not None and depth_cache.shape[1] > 0:
# Extract un-rotated Q from backbone for depth cross-attention
B, S, D = x_normed.shape
Q = self.attention.q_proj(x_normed)
Q = Q.view(B, S, self.attention.num_heads, self.attention.head_dim).transpose(1, 2)
# Note: we use UN-ROTATED Q for depth attention.
# Depth has its own positional encoding (layer index, not sequence position).
# RoPE is for sequence-axis positioning only.
O_depth = self.depth_attention(Q, x_normed, depth_cache)
# Peripheral stalk: separate residual streams, both additive
x = x + O_backbone + self.depth_scale * O_depth
else:
x = x + O_backbone
# FFN (unchanged)
x = x + self.ffn(self.ffn_norm(x))
return x
def get_depth_diagnostics(self, x: torch.Tensor, depth_cache: Optional[torch.Tensor] = None) -> dict:
"""Get diagnostics for monitoring depth attention health."""
diag = {
'depth_scale': self.depth_scale.item(),
'layer_idx': self.layer_idx,
}
if depth_cache is not None and depth_cache.shape[1] > 0:
x_normed = self.attn_norm(x)
gate_diag = self.depth_attention.get_diagnostics(x_normed)
diag.update(gate_diag)
return diag