File size: 6,022 Bytes
63e99b4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | """
GLADIUS v2.0 β Tool Cortex with Grid Primitives
Tools as embeddings in the same vector space as vocabulary.
Tool activation via cosine similarity threshold.
v2.1 Changes:
- register_grid_tools() method for ARC grid manipulation primitives
- Each tool gets a distinct learned embedding
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .config import KernelConfig
class ToolCortex(nn.Module):
"""
Tool understanding via shared embedding space.
Tools live in the same manifold as tokens. When a hidden state
is close enough to a tool embedding (cosine sim > threshold),
the tool activates.
"""
def __init__(self, config: KernelConfig):
super().__init__()
self.config = config
# Tool embeddings: same dimension as token embeddings
self.tool_embeddings = nn.Parameter(
torch.randn(config.max_tools, config.hidden_dim) * 0.02
)
# Tool activation gate (refines raw cosine similarity)
self.activation_gate = nn.Sequential(
nn.Linear(config.hidden_dim * 2, config.hidden_dim // 2),
nn.SiLU(),
nn.Linear(config.hidden_dim // 2, 1),
nn.Sigmoid(),
)
# Tool result projection (maps tool output back to hidden space)
self.result_proj = nn.Linear(config.hidden_dim, config.hidden_dim, bias=False)
# Registry metadata (not learned β set at runtime)
self.register_buffer('tool_active', torch.zeros(config.max_tools, dtype=torch.bool))
self.num_registered = 0
# Tool names for debugging/logging
self.tool_names = {}
def register_tool(self, tool_id: int, description_embedding: torch.Tensor, name: str = ""):
"""
Register a tool by initializing its embedding.
"""
with torch.no_grad():
self.tool_embeddings.data[tool_id] = description_embedding
self.tool_active[tool_id] = True
self.num_registered += 1
if name:
self.tool_names[tool_id] = name
def register_grid_tools(self):
"""
Register grid manipulation primitives for ARC tasks.
6 tools, each initialized with a distinct random seed so they
separate in embedding space during training. The model learns
WHEN to invoke each tool through gradient signal.
Tools:
0: rotate β rotate grid 90/180/270 degrees
1: flip β horizontal or vertical flip
2: tile β repeat/tile a pattern
3: extract β extract a subgrid region
4: fill β fill a region with a color
5: copy β copy a pattern to a new location
"""
D = self.config.hidden_dim
grid_tools = [
(0, "rotate", 42),
(1, "flip", 137),
(2, "tile", 256),
(3, "extract", 512),
(4, "fill", 1024),
(5, "copy", 2048),
]
for tool_id, name, seed in grid_tools:
# Distinct initialization per tool using different seeds
gen = torch.Generator()
gen.manual_seed(seed)
embedding = torch.randn(D, generator=gen) * 0.1
# Add a deterministic component based on tool_id to ensure separation
# This creates orthogonal-ish initial directions
angle = tool_id * (3.14159 / len(grid_tools))
for i in range(D):
embedding[i] += 0.05 * torch.sin(torch.tensor(angle + i * 0.1))
self.register_tool(tool_id, embedding, name=name)
def check_activation(self, hidden: torch.Tensor) -> torch.Tensor | None:
"""
Check if any tool should activate based on hidden state similarity.
Args:
hidden: (batch, seq_len, hidden_dim)
Returns:
tool_contribution: (batch, seq_len, hidden_dim) or None
"""
if self.num_registered == 0:
return None
# Pool hidden state
pooled = hidden.mean(dim=1) # (B, D)
# Cosine similarity with all tool embeddings
pooled_norm = F.normalize(pooled, dim=-1)
tools_norm = F.normalize(self.tool_embeddings, dim=-1)
similarities = torch.matmul(pooled_norm, tools_norm.T) # (B, max_tools)
# Mask inactive tools
similarities = similarities.masked_fill(~self.tool_active.unsqueeze(0), -1.0)
# Find best matching tool
best_sim, best_tool = similarities.max(dim=-1) # (B,)
# Check threshold
if best_sim.max().item() < self.config.tool_activation_threshold:
return None
# Tool activated β combine tool embedding with hidden state
tool_embed = self.tool_embeddings[best_tool] # (B, D)
# Refined activation: concatenate pooled + tool, gate the contribution
combined = torch.cat([pooled, tool_embed], dim=-1) # (B, 2D)
activation_strength = self.activation_gate(combined) # (B, 1)
# Only contribute if activation passes threshold
contribution = self.result_proj(tool_embed) * activation_strength # (B, D)
# Broadcast across sequence
return contribution.unsqueeze(1).expand_as(hidden)
def get_tool_activations(self, hidden: torch.Tensor) -> dict:
"""
Debug method: get similarity scores for all registered tools.
Returns dict mapping tool_name β similarity_score
"""
if self.num_registered == 0:
return {}
pooled = hidden.mean(dim=1) # (B, D)
pooled_norm = F.normalize(pooled, dim=-1)
tools_norm = F.normalize(self.tool_embeddings, dim=-1)
similarities = torch.matmul(pooled_norm, tools_norm.T) # (B, max_tools)
result = {}
for tool_id, name in self.tool_names.items():
result[name] = similarities[0, tool_id].item()
return result
|