| """ |
| GLADIUS v2.0 β Tool Cortex with Grid Primitives |
| |
| Tools as embeddings in the same vector space as vocabulary. |
| Tool activation via cosine similarity threshold. |
| |
| v2.1 Changes: |
| - register_grid_tools() method for ARC grid manipulation primitives |
| - Each tool gets a distinct learned embedding |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
| from .config import KernelConfig |
|
|
|
|
| class ToolCortex(nn.Module): |
| """ |
| Tool understanding via shared embedding space. |
| |
| Tools live in the same manifold as tokens. When a hidden state |
| is close enough to a tool embedding (cosine sim > threshold), |
| the tool activates. |
| """ |
|
|
| def __init__(self, config: KernelConfig): |
| super().__init__() |
| self.config = config |
|
|
| |
| self.tool_embeddings = nn.Parameter( |
| torch.randn(config.max_tools, config.hidden_dim) * 0.02 |
| ) |
|
|
| |
| self.activation_gate = nn.Sequential( |
| nn.Linear(config.hidden_dim * 2, config.hidden_dim // 2), |
| nn.SiLU(), |
| nn.Linear(config.hidden_dim // 2, 1), |
| nn.Sigmoid(), |
| ) |
|
|
| |
| self.result_proj = nn.Linear(config.hidden_dim, config.hidden_dim, bias=False) |
|
|
| |
| self.register_buffer('tool_active', torch.zeros(config.max_tools, dtype=torch.bool)) |
| self.num_registered = 0 |
|
|
| |
| self.tool_names = {} |
|
|
| def register_tool(self, tool_id: int, description_embedding: torch.Tensor, name: str = ""): |
| """ |
| Register a tool by initializing its embedding. |
| """ |
| with torch.no_grad(): |
| self.tool_embeddings.data[tool_id] = description_embedding |
| self.tool_active[tool_id] = True |
| self.num_registered += 1 |
| if name: |
| self.tool_names[tool_id] = name |
|
|
| def register_grid_tools(self): |
| """ |
| Register grid manipulation primitives for ARC tasks. |
| |
| 6 tools, each initialized with a distinct random seed so they |
| separate in embedding space during training. The model learns |
| WHEN to invoke each tool through gradient signal. |
| |
| Tools: |
| 0: rotate β rotate grid 90/180/270 degrees |
| 1: flip β horizontal or vertical flip |
| 2: tile β repeat/tile a pattern |
| 3: extract β extract a subgrid region |
| 4: fill β fill a region with a color |
| 5: copy β copy a pattern to a new location |
| """ |
| D = self.config.hidden_dim |
| |
| grid_tools = [ |
| (0, "rotate", 42), |
| (1, "flip", 137), |
| (2, "tile", 256), |
| (3, "extract", 512), |
| (4, "fill", 1024), |
| (5, "copy", 2048), |
| ] |
|
|
| for tool_id, name, seed in grid_tools: |
| |
| gen = torch.Generator() |
| gen.manual_seed(seed) |
| embedding = torch.randn(D, generator=gen) * 0.1 |
| |
| |
| |
| angle = tool_id * (3.14159 / len(grid_tools)) |
| for i in range(D): |
| embedding[i] += 0.05 * torch.sin(torch.tensor(angle + i * 0.1)) |
| |
| self.register_tool(tool_id, embedding, name=name) |
|
|
| def check_activation(self, hidden: torch.Tensor) -> torch.Tensor | None: |
| """ |
| Check if any tool should activate based on hidden state similarity. |
| |
| Args: |
| hidden: (batch, seq_len, hidden_dim) |
| Returns: |
| tool_contribution: (batch, seq_len, hidden_dim) or None |
| """ |
| if self.num_registered == 0: |
| return None |
|
|
| |
| pooled = hidden.mean(dim=1) |
|
|
| |
| pooled_norm = F.normalize(pooled, dim=-1) |
| tools_norm = F.normalize(self.tool_embeddings, dim=-1) |
| similarities = torch.matmul(pooled_norm, tools_norm.T) |
|
|
| |
| similarities = similarities.masked_fill(~self.tool_active.unsqueeze(0), -1.0) |
|
|
| |
| best_sim, best_tool = similarities.max(dim=-1) |
|
|
| |
| if best_sim.max().item() < self.config.tool_activation_threshold: |
| return None |
|
|
| |
| tool_embed = self.tool_embeddings[best_tool] |
| |
| |
| combined = torch.cat([pooled, tool_embed], dim=-1) |
| activation_strength = self.activation_gate(combined) |
| |
| |
| contribution = self.result_proj(tool_embed) * activation_strength |
|
|
| |
| return contribution.unsqueeze(1).expand_as(hidden) |
| |
| def get_tool_activations(self, hidden: torch.Tensor) -> dict: |
| """ |
| Debug method: get similarity scores for all registered tools. |
| |
| Returns dict mapping tool_name β similarity_score |
| """ |
| if self.num_registered == 0: |
| return {} |
| |
| pooled = hidden.mean(dim=1) |
| pooled_norm = F.normalize(pooled, dim=-1) |
| tools_norm = F.normalize(self.tool_embeddings, dim=-1) |
| similarities = torch.matmul(pooled_norm, tools_norm.T) |
| |
| result = {} |
| for tool_id, name in self.tool_names.items(): |
| result[name] = similarities[0, tool_id].item() |
| |
| return result |
|
|