File size: 7,059 Bytes
63e99b4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | """
GLADIUS v2.0 — Language Modulator
Register Vector (4D): formal↔casual, technical↔simple, concise↔elaborate, warm↔detached
Intent Vector (4D): inform, persuade, comfort, challenge
Silence Gate: scalar 0-1, >threshold = suppress output entirely
Bias Layer: register + intent → vocab-sized logit adjustment
Temporal Gate: time engine modulates ALL outputs — silence, pixel, logit bias
argmax_token S(token | base_logits + register_bias + intent_bias, time)
The modulator reshapes the argmax landscape without replacing knowledge.
Time doesn't just color the input — it gates the output.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
from .config import KernelConfig
class Modulator(nn.Module):
"""
Language modulation system with temporal output gating.
Derives register and intent from cognitive state (not from prompt tokens).
Applies as additive logit bias — same candidates, different weighting.
When temporal embedding is provided, it modulates ALL output heads:
- Silence gate: time decides when to speak
- Pixel output: time shifts color
- Logit bias scale: time controls how strongly register/intent reshape output
The temporal inversion: time flows forward through the input path,
then gates the output path — closing the loop.
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.config = config
# Register vector: derived from hidden state
self.register_head = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, config.register_dim),
nn.Tanh(), # [-1, 1] for each axis
)
# Intent vector: derived from hidden state
self.intent_head = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, config.intent_dim),
nn.Softmax(dim=-1), # Distribution over intents
)
# Silence gate: scalar decision
self.silence_head = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, 1),
nn.Sigmoid(),
)
# Bias projection: register + intent → vocab logit adjustment
self.bias_proj = nn.Linear(
config.register_dim + config.intent_dim,
config.vocab_size,
bias=False
)
# Bias scale (learnable, starts small)
self.bias_scale = nn.Parameter(torch.tensor(0.1))
# Pixel head: RGB output
self.pixel_head = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 4),
nn.SiLU(),
nn.Linear(config.hidden_dim // 4, 3), # RGB output
nn.Sigmoid(), # Constrain to [0, 1]
)
# === Temporal Output Gate ===
# Time modulates expression. Three learned projections from temporal embedding:
# silence_gate_mod: (hidden_dim → 1) — temporal modulation of silence
# pixel_mod: (hidden_dim → 3) — temporal color shift
# bias_scale_mod: (hidden_dim → 1) — temporal scaling of logit bias
# All use Tanh: centered at 0 (no effect), can push positive or negative.
# Additive on silence/pixel, multiplicative on bias scale.
self.temporal_silence_mod = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 8),
nn.SiLU(),
nn.Linear(config.hidden_dim // 8, 1),
nn.Tanh(), # [-1, +1] — push silence up or down
)
self.temporal_pixel_mod = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 8),
nn.SiLU(),
nn.Linear(config.hidden_dim // 8, 3),
nn.Tanh(), # [-1, +1] per channel — shift RGB
)
self.temporal_bias_mod = nn.Sequential(
nn.Linear(config.hidden_dim, config.hidden_dim // 8),
nn.SiLU(),
nn.Linear(config.hidden_dim // 8, 1),
nn.Sigmoid(), # [0, 2] after scaling — can amplify or suppress bias
)
# Initialize temporal gates near-zero so they don't disrupt existing checkpoints
for mod in [self.temporal_silence_mod, self.temporal_pixel_mod, self.temporal_bias_mod]:
nn.init.zeros_(mod[-2].weight) # Linear before activation
nn.init.zeros_(mod[-2].bias)
def forward(
self,
hidden: torch.Tensor,
output_head: nn.Linear,
temporal_embedding: Optional[torch.Tensor] = None,
modality_mask: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
hidden: (batch, seq_len, hidden_dim) — final layer output
output_head: the shared embedding output projection
temporal_embedding: (batch, hidden_dim) — from TimeEngine, or None
Returns:
logits: (batch, seq_len, vocab_size) — temporally modulated logits
silence: (batch, 1) — temporally gated silence value
pixel_output: (batch, 3) — temporally shifted RGB values [0, 1]
"""
# Base logits from output head
base_logits = output_head(hidden) # (B, S, V)
# Derive register and intent from pooled hidden state
pooled = hidden.mean(dim=1) # (B, D)
register = self.register_head(pooled) # (B, register_dim)
intent = self.intent_head(pooled) # (B, intent_dim)
silence = self.silence_head(pooled) # (B, 1)
pixel_output = self.pixel_head(pooled) # (B, 3) - RGB output
# Compute logit bias
control = torch.cat([register, intent], dim=-1) # (B, register_dim + intent_dim)
bias = self.bias_proj(control) # (B, V)
# === Temporal output modulation ===
if temporal_embedding is not None:
# Time gates silence: additive shift before final clamp
t_silence = self.temporal_silence_mod(temporal_embedding) # (B, 1) in [-1, 1]
silence = (silence + t_silence * 0.3).clamp(0, 1) # Scaled, clamped to valid range
# Time shifts pixel: additive color modulation
t_pixel = self.temporal_pixel_mod(temporal_embedding) # (B, 3) in [-1, 1]
pixel_output = (pixel_output + t_pixel * 0.2).clamp(0, 1) # Gentle shift, valid RGB
# Time scales bias: multiplicative on bias strength
t_bias = self.temporal_bias_mod(temporal_embedding) * 2.0 # (B, 1) in [0, 2]
bias = bias * self.bias_scale * t_bias # Time can amplify or suppress register/intent
else:
bias = bias * self.bias_scale
# Apply bias (broadcast across sequence)
modulated_logits = base_logits + bias.unsqueeze(1)
return modulated_logits, silence, pixel_output
|