gladius-training / kernel /config_arc.py
amuzetnoM's picture
GLADIUS training package: kernel + omega + synthase + checkpoint (step 529)
63e99b4 verified
"""
GLADIUS v2.1 β€” ARC-Ready Kernel Configuration
Extends KernelConfig with ARC competition settings.
"""
import torch
from dataclasses import dataclass, field
@dataclass
class ArcKernelConfig:
"""
Configuration for GLADIUS kernel in ARC competition mode.
Inherits all Wyrm settings and adds:
- Expanded vocab (grid tokens)
- Grid tool activation
- Specialist residual scaling
- Context window options
"""
# === Base Wyrm Settings (from checkpoint) ===
vocab_size: int = 16022 # 16000 BPE + 22 grid tokens
hidden_dim: int = 640
num_layers: int = 14
num_heads: int = 20
head_dim: int = 32
ffn_dim: int = 2560
max_seq_len: int = 1024 # Start at 1024, can scale to 2048/4096
# Memory
hot_memory_slots: int = 512
hot_importance_threshold: float = 0.5
warm_rank: int = 32
warm_condition_threshold: float = 10.0
warm_balance_frequency: int = 100
warm_novelty_threshold: float = 0.1
warm_checkpoint_interval: int = 300
cold_embedding_dim: int = 640
cold_top_k: int = 4
# Time
time_dim: int = 64
time_num_frequencies: int = 16
time_max_events: int = 64
clock_mode: str = 'continuous'
lattice_size: int = 256
lattice_scales: int = 4
# Cognition
cognition_state_dim: int = 128
cognition_modes: int = 4
cognition_prompt_types: int = 5
# Modulator
register_dim: int = 4
intent_dim: int = 4
silence_threshold: float = 0.7
# Tools
max_tools: int = 32
tool_activation_threshold: float = 0.6
# Router + Specialists
num_specialists: int = 4
router_top_k: int = 2
# Attention
attention_sparse_budget: int = 64
attention_alpha_init: float = 0.5
# Training
learning_rate: float = 3e-4
weight_decay: float = 0.01
warmup_steps: int = 500
max_grad_norm: float = 1.0
batch_size: int = 4
accumulation_steps: int = 8
# Device
device: str = 'cpu'
dtype: torch.dtype = torch.float32
# Paths
checkpoint_dir: str = 'checkpoints'
seed: int = 42
# Special tokens
pad_token_id: int = 0
bos_token_id: int = 1
eos_token_id: int = 2
unk_token_id: int = 3
# === NEW: ARC-Specific Settings ===
grid_tools_enabled: bool = True
specialist_residual_scale: float = 0.1 # Start small, increase as specialists learn
# Multi-task training LR groups
backbone_lr: float = 3e-5 # Low β€” preserve backbone
specialist_lr: float = 3e-4 # Medium β€” learn fast
router_lr: float = 1e-3 # High β€” learn routing quickly
tool_lr: float = 3e-4 # Medium β€” learn tool activation
# Task mix ratios
english_ratio: float = 0.6
grid_ratio: float = 0.2
program_ratio: float = 0.1
tool_ratio: float = 0.1
def wyrm_to_arc_config(wyrm_config) -> ArcKernelConfig:
"""
Convert a Wyrm KernelConfig to ARC-ready config.
Copies all matching fields, adds ARC-specific ones.
"""
arc = ArcKernelConfig()
if hasattr(wyrm_config, '__dict__'):
for key, value in vars(wyrm_config).items():
if hasattr(arc, key) and key != 'vocab_size': # Don't override expanded vocab
setattr(arc, key, value)
# Force ARC settings
arc.vocab_size = 16022
arc.grid_tools_enabled = True
arc.specialist_residual_scale = 0.1
return arc