| """ |
| GLADIUS v2.0 — Language Modulator |
| |
| Register Vector (4D): formal↔casual, technical↔simple, concise↔elaborate, warm↔detached |
| Intent Vector (4D): inform, persuade, comfort, challenge |
| Silence Gate: scalar 0-1, >threshold = suppress output entirely |
| Bias Layer: register + intent → vocab-sized logit adjustment |
| Temporal Gate: time engine modulates ALL outputs — silence, pixel, logit bias |
| |
| argmax_token S(token | base_logits + register_bias + intent_bias, time) |
| The modulator reshapes the argmax landscape without replacing knowledge. |
| Time doesn't just color the input — it gates the output. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from typing import Optional |
|
|
| from .config import KernelConfig |
|
|
|
|
| class Modulator(nn.Module): |
| """ |
| Language modulation system with temporal output gating. |
| |
| Derives register and intent from cognitive state (not from prompt tokens). |
| Applies as additive logit bias — same candidates, different weighting. |
| |
| When temporal embedding is provided, it modulates ALL output heads: |
| - Silence gate: time decides when to speak |
| - Pixel output: time shifts color |
| - Logit bias scale: time controls how strongly register/intent reshape output |
| |
| The temporal inversion: time flows forward through the input path, |
| then gates the output path — closing the loop. |
| """ |
|
|
| def __init__(self, config: KernelConfig): |
| super().__init__() |
| self.config = config |
|
|
| |
| self.register_head = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.hidden_dim // 4), |
| nn.SiLU(), |
| nn.Linear(config.hidden_dim // 4, config.register_dim), |
| nn.Tanh(), |
| ) |
|
|
| |
| self.intent_head = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.hidden_dim // 4), |
| nn.SiLU(), |
| nn.Linear(config.hidden_dim // 4, config.intent_dim), |
| nn.Softmax(dim=-1), |
| ) |
|
|
| |
| self.silence_head = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.hidden_dim // 4), |
| nn.SiLU(), |
| nn.Linear(config.hidden_dim // 4, 1), |
| nn.Sigmoid(), |
| ) |
|
|
| |
| self.bias_proj = nn.Linear( |
| config.register_dim + config.intent_dim, |
| config.vocab_size, |
| bias=False |
| ) |
|
|
| |
| self.bias_scale = nn.Parameter(torch.tensor(0.1)) |
|
|
| |
| self.pixel_head = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.hidden_dim // 4), |
| nn.SiLU(), |
| nn.Linear(config.hidden_dim // 4, 3), |
| nn.Sigmoid(), |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| self.temporal_silence_mod = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.hidden_dim // 8), |
| nn.SiLU(), |
| nn.Linear(config.hidden_dim // 8, 1), |
| nn.Tanh(), |
| ) |
|
|
| self.temporal_pixel_mod = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.hidden_dim // 8), |
| nn.SiLU(), |
| nn.Linear(config.hidden_dim // 8, 3), |
| nn.Tanh(), |
| ) |
|
|
| self.temporal_bias_mod = nn.Sequential( |
| nn.Linear(config.hidden_dim, config.hidden_dim // 8), |
| nn.SiLU(), |
| nn.Linear(config.hidden_dim // 8, 1), |
| nn.Sigmoid(), |
| ) |
|
|
| |
| for mod in [self.temporal_silence_mod, self.temporal_pixel_mod, self.temporal_bias_mod]: |
| nn.init.zeros_(mod[-2].weight) |
| nn.init.zeros_(mod[-2].bias) |
|
|
| def forward( |
| self, |
| hidden: torch.Tensor, |
| output_head: nn.Linear, |
| temporal_embedding: Optional[torch.Tensor] = None, |
| modality_mask: Optional[torch.Tensor] = None, |
| ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| """ |
| Args: |
| hidden: (batch, seq_len, hidden_dim) — final layer output |
| output_head: the shared embedding output projection |
| temporal_embedding: (batch, hidden_dim) — from TimeEngine, or None |
| Returns: |
| logits: (batch, seq_len, vocab_size) — temporally modulated logits |
| silence: (batch, 1) — temporally gated silence value |
| pixel_output: (batch, 3) — temporally shifted RGB values [0, 1] |
| """ |
| |
| base_logits = output_head(hidden) |
|
|
| |
| pooled = hidden.mean(dim=1) |
| register = self.register_head(pooled) |
| intent = self.intent_head(pooled) |
| silence = self.silence_head(pooled) |
| pixel_output = self.pixel_head(pooled) |
|
|
| |
| control = torch.cat([register, intent], dim=-1) |
| bias = self.bias_proj(control) |
|
|
| |
| if temporal_embedding is not None: |
| |
| t_silence = self.temporal_silence_mod(temporal_embedding) |
| silence = (silence + t_silence * 0.3).clamp(0, 1) |
|
|
| |
| t_pixel = self.temporal_pixel_mod(temporal_embedding) |
| pixel_output = (pixel_output + t_pixel * 0.2).clamp(0, 1) |
|
|
| |
| t_bias = self.temporal_bias_mod(temporal_embedding) * 2.0 |
| bias = bias * self.bias_scale * t_bias |
| else: |
| bias = bias * self.bias_scale |
|
|
| |
| modulated_logits = base_logits + bias.unsqueeze(1) |
|
|
| return modulated_logits, silence, pixel_output |
|
|