| """ |
| GLADIUS v2.0 — Kernel Configuration |
| |
| Every hyperparameter in one place. No magic numbers anywhere else. |
| Hardware target: Intel i3-1005G1, 16GB RAM, no GPU. |
| """ |
|
|
| from dataclasses import dataclass, field |
| from typing import Optional |
| import torch |
|
|
|
|
| @dataclass |
| class KernelConfig: |
| """Master configuration for the GLADIUS kernel.""" |
|
|
| |
| vocab_size: int = 32_000 |
| hidden_dim: int = 256 |
| num_layers: int = 6 |
| num_heads: int = 8 |
| head_dim: int = 32 |
| ffn_dim: int = 1024 |
| max_seq_len: int = 512 |
|
|
| |
| hot_memory_slots: int = 512 |
| hot_importance_threshold: float = 0.5 |
|
|
| |
| warm_rank: int = 16 |
| warm_condition_threshold: float = 10.0 |
| warm_balance_frequency: int = 100 |
| warm_novelty_threshold: float = 0.1 |
| warm_checkpoint_interval: int = 300 |
|
|
| |
| cold_embedding_dim: int = 256 |
| cold_top_k: int = 4 |
|
|
| |
| time_dim: int = 32 |
| time_num_frequencies: int = 16 |
| time_max_events: int = 64 |
| clock_mode: str = 'continuous' |
| lattice_size: int = 256 |
| lattice_scales: int = 4 |
|
|
| |
| cognition_state_dim: int = 64 |
| cognition_modes: int = 4 |
| cognition_prompt_types: int = 5 |
|
|
| |
| register_dim: int = 4 |
| intent_dim: int = 4 |
| silence_threshold: float = 0.7 |
|
|
| |
| max_tools: int = 64 |
| tool_activation_threshold: float = 0.6 |
|
|
| |
| num_specialists: int = 4 |
| router_top_k: int = 2 |
|
|
| |
| attention_sparse_budget: int = 64 |
| attention_alpha_init: float = 0.5 |
|
|
| |
| learning_rate: float = 3e-4 |
| weight_decay: float = 0.01 |
| warmup_steps: int = 500 |
| max_grad_norm: float = 1.0 |
| batch_size: int = 4 |
| accumulation_steps: int = 8 |
|
|
| |
| device: str = 'cpu' |
| dtype: torch.dtype = torch.float32 |
| checkpoint_dir: str = 'checkpoints' |
| seed: int = 42 |
|
|
| |
| pad_token_id: int = 0 |
| bos_token_id: int = 1 |
| eos_token_id: int = 2 |
| unk_token_id: int = 3 |
|
|
| def __post_init__(self): |
| assert self.hidden_dim == self.num_heads * self.head_dim, \ |
| f"hidden_dim ({self.hidden_dim}) must equal num_heads ({self.num_heads}) * head_dim ({self.head_dim})" |
| assert self.hidden_dim == self.cold_embedding_dim, \ |
| "cold_embedding_dim must match hidden_dim for direct injection" |
|
|
| @property |
| def estimated_params(self) -> dict: |
| """Rough parameter count per component.""" |
| embed = self.vocab_size * self.hidden_dim * 2 |
| attn_per_layer = 4 * self.hidden_dim * self.hidden_dim |
| ffn_per_layer = 3 * self.hidden_dim * self.ffn_dim |
| transformer = self.num_layers * (attn_per_layer + ffn_per_layer) |
| hot_mem = 2 * self.hot_memory_slots * self.hidden_dim |
| warm_mem = self.num_layers * 3 * self.hidden_dim * self.warm_rank |
| time_eng = self.time_dim * self.time_num_frequencies * 4 |
| cognition = self.cognition_state_dim * self.hidden_dim * 4 |
| modulator = (self.register_dim + self.intent_dim) * self.vocab_size |
| tools = self.max_tools * self.hidden_dim * 3 |
| total = embed + transformer + hot_mem + warm_mem + time_eng + cognition + modulator + tools |
|
|
| return { |
| 'embeddings': embed, |
| 'transformer': transformer, |
| 'hot_memory': hot_mem, |
| 'warm_memory': warm_mem, |
| 'time_engine': time_eng, |
| 'cognition': cognition, |
| 'modulator': modulator, |
| 'tool_cortex': tools, |
| 'total': total, |
| 'total_MB_f32': total * 4 / 1024 / 1024, |
| 'total_MB_f16': total * 2 / 1024 / 1024, |
| } |
|
|
|
|
| |
|
|
| def tiny_config() -> KernelConfig: |
| """For tests. ~1M params.""" |
| return KernelConfig( |
| vocab_size=1000, hidden_dim=64, num_layers=2, num_heads=4, head_dim=16, |
| ffn_dim=256, max_seq_len=128, hot_memory_slots=32, warm_rank=4, |
| max_tools=8, num_specialists=2, cold_embedding_dim=64, |
| ) |
|
|
| def dev_config() -> KernelConfig: |
| """For development. ~10M params. Fast iteration on CPU.""" |
| return KernelConfig( |
| vocab_size=8000, hidden_dim=128, num_layers=4, num_heads=4, head_dim=32, |
| ffn_dim=512, max_seq_len=256, hot_memory_slots=128, warm_rank=8, |
| max_tools=16, num_specialists=2, cold_embedding_dim=128, |
| ) |
|
|
| def full_config() -> KernelConfig: |
| """Target configuration. ~30M params.""" |
| return KernelConfig() |
|
|