| |
| """ |
| Persona Core Vector (PCV) + Plasticity Head — Training Stub |
| |
| Goals: |
| - Show where persona vector p is injected and how an identity regularizer applies |
| - Provide a PlasticityHead that predicts Δp from last hidden state + meta-signal |
| - Offer a skeleton training/eval loop with safety guards (EMA, magnitude threshold) |
| |
| This is a scaffold — wire to your real model and data pipeline. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| from dataclasses import dataclass |
| from typing import Optional |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
|
|
| @dataclass |
| class Config: |
| d_model: int = 1024 |
| vocab_size: int = 32000 |
| max_len: int = 2048 |
| persona_dim: int = 1024 |
| lr: float = 1e-5 |
| ema_beta: float = 0.999 |
| delta_max_norm: float = 0.5 |
|
|
|
|
| class TinyBackbone(nn.Module): |
| """Placeholder for a Transformer; returns hidden states like a last-layer representation.""" |
|
|
| def __init__(self, cfg: Config): |
| super().__init__() |
| self.cfg = cfg |
| self.tok = nn.Embedding(cfg.vocab_size, cfg.d_model) |
| self.proj = nn.Linear(cfg.d_model, cfg.d_model) |
| self.ln = nn.LayerNorm(cfg.d_model) |
| self.lm_head = nn.Linear(cfg.d_model, cfg.vocab_size, bias=False) |
|
|
| def forward(self, x_emb: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| |
| h = self.proj(x_emb) |
| h = torch.tanh(h) |
| h = self.ln(h) |
| logits = self.lm_head(h) |
| last = h[:, -1, :] |
| return logits, last |
|
|
|
|
| class PCVWrapper(nn.Module): |
| """Wraps backbone and injects persona vector p into token embeddings.""" |
|
|
| def __init__(self, cfg: Config): |
| super().__init__() |
| self.cfg = cfg |
| self.backbone = TinyBackbone(cfg) |
| self.persona = nn.Parameter(torch.zeros(cfg.persona_dim)) |
|
|
| def forward(self, token_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| |
| base = self.backbone.tok(token_ids) |
| x_emb = base + self.persona.view(1, 1, -1) |
| logits, last = self.backbone(x_emb) |
| return logits, last |
|
|
|
|
| class PlasticityHead(nn.Module): |
| """Predicts Δp (and optionally LN deltas) from last hidden state + meta-signal.""" |
|
|
| def __init__(self, d_model: int): |
| super().__init__() |
| self.mlp = nn.Sequential( |
| nn.Linear(d_model * 2, d_model * 2), |
| nn.Tanh(), |
| nn.Linear(d_model * 2, d_model), |
| ) |
|
|
| def forward(self, last_hidden: torch.Tensor, meta: torch.Tensor) -> torch.Tensor: |
| |
| h = torch.cat([last_hidden, meta], dim=-1) |
| delta = self.mlp(h) |
| return delta |
|
|
|
|
| def identity_regularizer(p: torch.Tensor, p_target: torch.Tensor, lam: float = 1e-3) -> torch.Tensor: |
| return lam * F.mse_loss(p, p_target) |
|
|
|
|
| def main(): |
| ap = argparse.ArgumentParser(description="PCV + Plasticity Head stub") |
| ap.add_argument("--steps", type=int, default=5) |
| ap.add_argument("--seq", type=int, default=64) |
| args = ap.parse_args() |
|
|
| cfg = Config() |
| model = PCVWrapper(cfg) |
| ph = PlasticityHead(cfg.d_model) |
| opt = torch.optim.AdamW(list(model.parameters()) + list(ph.parameters()), lr=cfg.lr) |
|
|
| |
| ema_p = model.persona.detach().clone() |
|
|
| B = 2 |
| vocab = cfg.vocab_size |
| p_target = torch.zeros_like(model.persona) |
|
|
| for step in range(args.steps): |
| |
| token_ids = torch.randint(0, vocab, (B, args.seq)) |
| logits, last = model(token_ids) |
| lm_loss = F.cross_entropy(logits[:, :-1, :].contiguous().view(-1, vocab), token_ids[:, 1:].contiguous().view(-1)) |
|
|
| |
| meta = torch.zeros_like(last) |
| delta = ph(last, meta).mean(dim=0) |
|
|
| |
| id_loss = identity_regularizer(model.persona, p_target, lam=1e-3) |
|
|
| loss = lm_loss + id_loss |
| opt.zero_grad() |
| loss.backward() |
| opt.step() |
|
|
| |
| if delta.norm().item() <= cfg.delta_max_norm: |
| with torch.no_grad(): |
| model.persona.add_(0.1 * delta) |
| else: |
| |
| with torch.no_grad(): |
| model.persona.copy_(ema_p) |
|
|
| |
| with torch.no_grad(): |
| ema_p.mul_(cfg.ema_beta).add_((1 - cfg.ema_beta) * model.persona) |
|
|
| print(f"step={step} loss={loss.item():.4f} | |p|={model.persona.norm().item():.3f}") |
|
|
| print("Stub complete. Wire this scaffold to your real model/training loop.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|
|
|