gladius-training / kernel /modulator.py
amuzetnoM's picture
GLADIUS training package: kernel + omega + synthase + checkpoint (step 529)
63e99b4 verified
"""
GLADIUS v2.0 — Language Modulator
Register Vector (4D): formal↔casual, technical↔simple, concise↔elaborate, warm↔detached
Intent Vector (4D): inform, persuade, comfort, challenge
Silence Gate: scalar 0-1, >threshold = suppress output entirely
Bias Layer: register + intent → vocab-sized logit adjustment
Temporal Gate: time engine modulates ALL outputs — silence, pixel, logit bias
argmax_token S(token | base_logits + register_bias + intent_bias, time)
The modulator reshapes the argmax landscape without replacing knowledge.
Time doesn't just color the input — it gates the output.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
from .config import KernelConfig
class Modulator(nn.Module):
"""
Language modulation system with temporal output gating.
Derives register and intent from cognitive state (not from prompt tokens).
Applies as additive logit bias — same candidates, different weighting.
When temporal embedding is provided, it modulates ALL output heads:
- Silence gate: time decides when to speak
- Pixel output: time shifts color
- Logit bias scale: time controls how strongly register/intent reshape output
The temporal inversion: time flows forward through the input path,
then gates the output path — closing the loop.
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.config = config
# Register vector: derived from hidden state
self.register_head = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, config.register_dim),
nn.Tanh(), # [-1, 1] for each axis
)
# Intent vector: derived from hidden state
self.intent_head = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, config.intent_dim),
nn.Softmax(dim=-1), # Distribution over intents
)
# Silence gate: scalar decision
self.silence_head = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, 1),
nn.Sigmoid(),
)
# Bias projection: register + intent → vocab logit adjustment
self.bias_proj = nn.Linear(
config.register_dim + config.intent_dim,
config.vocab_size,
bias=False
)
# Bias scale (learnable, starts small)
self.bias_scale = nn.Parameter(torch.tensor(0.1))
# Pixel head: RGB output
self.pixel_head = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, 3), # RGB output
nn.Sigmoid(), # Constrain to [0, 1]
)
# === Temporal Output Gate ===
# Time modulates expression. Three learned projections from temporal embedding:
# silence_gate_mod: (hidden_dim → 1) — temporal modulation of silence
# pixel_mod: (hidden_dim → 3) — temporal color shift
# bias_scale_mod: (hidden_dim → 1) — temporal scaling of logit bias
# All use Tanh: centered at 0 (no effect), can push positive or negative.
# Additive on silence/pixel, multiplicative on bias scale.
self.temporal_silence_mod = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 8),
nn.SiLU(),
nn.Linear(config.hidden_dim // 8, 1),
nn.Tanh(), # [-1, +1] — push silence up or down
)
self.temporal_pixel_mod = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 8),
nn.SiLU(),
nn.Linear(config.hidden_dim // 8, 3),
nn.Tanh(), # [-1, +1] per channel — shift RGB
)
self.temporal_bias_mod = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 8),
nn.SiLU(),
nn.Linear(config.hidden_dim // 8, 1),
nn.Sigmoid(), # [0, 2] after scaling — can amplify or suppress bias
)
# Initialize temporal gates near-zero so they don't disrupt existing checkpoints
for mod in [self.temporal_silence_mod, self.temporal_pixel_mod, self.temporal_bias_mod]:
nn.init.zeros_(mod[-2].weight) # Linear before activation
nn.init.zeros_(mod[-2].bias)
def forward(
self,
hidden: torch.Tensor,
output_head: nn.Linear,
temporal_embedding: Optional[torch.Tensor] = None,
modality_mask: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
hidden: (batch, seq_len, hidden_dim) — final layer output
output_head: the shared embedding output projection
temporal_embedding: (batch, hidden_dim) — from TimeEngine, or None
Returns:
logits: (batch, seq_len, vocab_size) — temporally modulated logits
silence: (batch, 1) — temporally gated silence value
pixel_output: (batch, 3) — temporally shifted RGB values [0, 1]
"""
# Base logits from output head
base_logits = output_head(hidden) # (B, S, V)
# Derive register and intent from pooled hidden state
pooled = hidden.mean(dim=1) # (B, D)
register = self.register_head(pooled) # (B, register_dim)
intent = self.intent_head(pooled) # (B, intent_dim)
silence = self.silence_head(pooled) # (B, 1)
pixel_output = self.pixel_head(pooled) # (B, 3) - RGB output
# Compute logit bias
control = torch.cat([register, intent], dim=-1) # (B, register_dim + intent_dim)
bias = self.bias_proj(control) # (B, V)
# === Temporal output modulation ===
if temporal_embedding is not None:
# Time gates silence: additive shift before final clamp
t_silence = self.temporal_silence_mod(temporal_embedding) # (B, 1) in [-1, 1]
silence = (silence + t_silence * 0.3).clamp(0, 1) # Scaled, clamped to valid range
# Time shifts pixel: additive color modulation
t_pixel = self.temporal_pixel_mod(temporal_embedding) # (B, 3) in [-1, 1]
pixel_output = (pixel_output + t_pixel * 0.2).clamp(0, 1) # Gentle shift, valid RGB
# Time scales bias: multiplicative on bias strength
t_bias = self.temporal_bias_mod(temporal_embedding) * 2.0 # (B, 1) in [0, 2]
bias = bias * self.bias_scale * t_bias # Time can amplify or suppress register/intent
else:
bias = bias * self.bias_scale
# Apply bias (broadcast across sequence)
modulated_logits = base_logits + bias.unsqueeze(1)
return modulated_logits, silence, pixel_output