| """ |
| GLADIUS v2.0 — Shared Embeddings |
| |
| Token embeddings and output projection head. |
| Optionally weight-tied (saves ~8M params at full scale). |
| Tool embeddings live in the same space — see tools.py. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import math |
|
|
| from .config import KernelConfig |
|
|
|
|
| class SharedEmbeddings(nn.Module): |
| """ |
| Shared vocabulary embedding layer. |
| |
| Tokens, tools, and specialist routing all project into the same |
| hidden_dim space. This is what makes tool activation = generation: |
| they live in the same manifold. |
| """ |
|
|
| def __init__(self, config: KernelConfig): |
| super().__init__() |
| self.config = config |
|
|
| |
| self.token_embed = nn.Embedding( |
| config.vocab_size, config.hidden_dim, |
| padding_idx=config.pad_token_id |
| ) |
|
|
| |
| self.output_head = nn.Linear(config.hidden_dim, config.vocab_size, bias=False) |
|
|
| |
| self.output_head.weight = self.token_embed.weight |
|
|
| |
| self.scale = math.sqrt(config.hidden_dim) |
|
|
| self._init_weights() |
|
|
| def _init_weights(self): |
| nn.init.normal_(self.token_embed.weight, mean=0.0, std=0.02) |
| |
| with torch.no_grad(): |
| self.token_embed.weight[self.config.pad_token_id].zero_() |
|
|
| def embed(self, input_ids: torch.Tensor) -> torch.Tensor: |
| """Token IDs → hidden representations.""" |
| return self.token_embed(input_ids) * self.scale |
|
|
| def project(self, hidden: torch.Tensor) -> torch.Tensor: |
| """Hidden representations → vocabulary logits.""" |
| return self.output_head(hidden) |
|
|
| def forward(self, input_ids: torch.Tensor) -> torch.Tensor: |
| """Convenience: embed.""" |
| return self.embed(input_ids) |
|
|