| | """ |
| | Improved Chess Transformer Model for the Chess Challenge (<1M params). |
| | |
| | Upgrades vs baseline: |
| | - RoPE (rotary positional embeddings) => removes learned position embedding params, better length generalization |
| | - PyTorch SDPA (scaled_dot_product_attention) => faster + stable attention kernels |
| | - SwiGLU MLP => better quality per parameter than GELU MLP |
| | - RMSNorm (optional but recommended) => slightly cheaper / often stable |
| | |
| | Default config aims around ~0.9–0.98M params depending on exact settings. |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import math |
| | from typing import Optional, Tuple, Union |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | from transformers import PretrainedConfig, PreTrainedModel |
| | from transformers.modeling_outputs import CausalLMOutputWithPast |
| |
|
| |
|
| | |
| | |
| | |
| | class ChessConfig(PretrainedConfig): |
| | model_type = "chess_transformer" |
| |
|
| | def __init__( |
| | self, |
| | vocab_size: int = 1200, |
| | n_embd: int = 160, |
| | n_layer: int = 3, |
| | n_head: int = 5, |
| | n_ctx: int = 256, |
| | n_inner: Optional[int] = 320, |
| | dropout: float = 0.1, |
| | norm_epsilon: float = 1e-6, |
| | tie_weights: bool = True, |
| | use_rmsnorm: bool = True, |
| | pad_token_id: int = 0, |
| | bos_token_id: int = 1, |
| | eos_token_id: int = 2, |
| | rope_theta: float = 10000.0, |
| | **kwargs, |
| | ): |
| | super().__init__( |
| | pad_token_id=pad_token_id, |
| | bos_token_id=bos_token_id, |
| | eos_token_id=eos_token_id, |
| | **kwargs, |
| | ) |
| | assert n_embd % n_head == 0, "n_embd must be divisible by n_head" |
| |
|
| | self.vocab_size = vocab_size |
| | self.n_embd = n_embd |
| | self.n_layer = n_layer |
| | self.n_head = n_head |
| | self.n_ctx = n_ctx |
| | self.n_inner = n_inner if n_inner is not None else 2 * n_embd |
| | self.dropout = dropout |
| | self.norm_epsilon = norm_epsilon |
| | self.tie_weights = tie_weights |
| | self.use_rmsnorm = use_rmsnorm |
| | self.rope_theta = rope_theta |
| |
|
| | |
| | self.tie_word_embeddings = bool(tie_weights) |
| |
|
| |
|
| | |
| | |
| | |
| | class RMSNorm(nn.Module): |
| | def __init__(self, dim: int, eps: float = 1e-6): |
| | super().__init__() |
| | self.eps = eps |
| | self.weight = nn.Parameter(torch.ones(dim)) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | |
| | norm = x.pow(2).mean(dim=-1, keepdim=True).add(self.eps).rsqrt() |
| | return x * norm * self.weight |
| |
|
| |
|
| | def make_norm(config: ChessConfig) -> nn.Module: |
| | if getattr(config, "use_rmsnorm", True): |
| | return RMSNorm(config.n_embd, eps=config.norm_epsilon) |
| | return nn.LayerNorm(config.n_embd, eps=config.norm_epsilon) |
| |
|
| |
|
| | |
| | |
| | |
| | class RotaryCache(nn.Module): |
| | """ |
| | Precomputes cos/sin for RoPE up to max_seq_len. |
| | head_dim must be even for interleaved rotation. |
| | """ |
| |
|
| | def __init__(self, head_dim: int, max_seq_len: int, theta: float = 10000.0): |
| | super().__init__() |
| | assert head_dim % 2 == 0, "RoPE requires even head_dim" |
| |
|
| | inv_freq = 1.0 / (theta ** (torch.arange(0, head_dim, 2).float() / head_dim)) |
| | t = torch.arange(max_seq_len).float() |
| | freqs = torch.einsum("t,f->tf", t, inv_freq) |
| |
|
| | |
| | self.register_buffer("cos", freqs.cos()[None, None, :, :], persistent=False) |
| | self.register_buffer("sin", freqs.sin()[None, None, :, :], persistent=False) |
| |
|
| | def get(self, seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]: |
| | return self.cos[:, :, :seq_len, :], self.sin[:, :, :seq_len, :] |
| |
|
| |
|
| | def apply_rope(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: |
| | """ |
| | x: (B, H, T, D) where D is even |
| | cos/sin: (1, 1, T, D/2) |
| | """ |
| | x_even = x[..., ::2] |
| | x_odd = x[..., 1::2] |
| | |
| | out_even = x_even * cos - x_odd * sin |
| | out_odd = x_even * sin + x_odd * cos |
| | |
| | out = torch.stack((out_even, out_odd), dim=-1).flatten(-2) |
| | return out |
| |
|
| |
|
| | |
| | |
| | |
| | class MultiHeadAttention(nn.Module): |
| | def __init__(self, config: ChessConfig): |
| | super().__init__() |
| | self.n_head = config.n_head |
| | self.n_embd = config.n_embd |
| | self.head_dim = config.n_embd // config.n_head |
| |
|
| | |
| | self.qkv = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False) |
| | self.proj = nn.Linear(config.n_embd, config.n_embd, bias=False) |
| | self.drop = nn.Dropout(config.dropout) |
| |
|
| | self.rope = RotaryCache( |
| | head_dim=self.head_dim, |
| | max_seq_len=config.n_ctx, |
| | theta=getattr(config, "rope_theta", 10000.0), |
| | ) |
| |
|
| | def _neg_inf(self, dtype: torch.dtype) -> float: |
| | |
| | if dtype in (torch.float16, torch.bfloat16): |
| | return -1e4 |
| | return -1e9 |
| |
|
| | def forward(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor: |
| | """ |
| | x: (B,T,C) |
| | attention_mask: (B,T) with 1 for real tokens, 0 for pad |
| | """ |
| | B, T, C = x.shape |
| |
|
| | qkv = self.qkv(x) |
| | q, k, v = qkv.split(C, dim=-1) |
| |
|
| | |
| | q = q.view(B, T, self.n_head, self.head_dim).transpose(1, 2) |
| | k = k.view(B, T, self.n_head, self.head_dim).transpose(1, 2) |
| | v = v.view(B, T, self.n_head, self.head_dim).transpose(1, 2) |
| |
|
| | |
| | cos, sin = self.rope.get(T) |
| | cos = cos.to(dtype=q.dtype, device=q.device) |
| | sin = sin.to(dtype=q.dtype, device=q.device) |
| | q = apply_rope(q, cos, sin) |
| | k = apply_rope(k, cos, sin) |
| |
|
| | attn_mask = None |
| | if attention_mask is not None: |
| | |
| | |
| | pad = (attention_mask == 0) |
| | |
| | pad = pad[:, None, None, :].expand(B, 1, T, T) |
| | attn_mask = torch.zeros((B, 1, T, T), device=x.device, dtype=x.dtype) |
| | attn_mask = attn_mask.masked_fill(pad, self._neg_inf(x.dtype)) |
| |
|
| | |
| | y = F.scaled_dot_product_attention( |
| | q, k, v, |
| | dropout_p=self.drop.p if self.training else 0.0, |
| | is_causal=True, |
| | ) |
| |
|
| | y = y.transpose(1, 2).contiguous().view(B, T, C) |
| | y = self.proj(y) |
| | return y |
| |
|
| |
|
| | |
| | |
| | |
| | class SwiGLU(nn.Module): |
| | def __init__(self, config: ChessConfig): |
| | super().__init__() |
| | d = config.n_embd |
| | m = config.n_inner |
| | self.w1 = nn.Linear(d, m, bias=False) |
| | self.w2 = nn.Linear(d, m, bias=False) |
| | self.w3 = nn.Linear(m, d, bias=False) |
| | self.drop = nn.Dropout(config.dropout) |
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | return self.drop(self.w3(F.silu(self.w1(x)) * self.w2(x))) |
| |
|
| |
|
| | |
| | |
| | |
| | class TransformerBlock(nn.Module): |
| | def __init__(self, config: ChessConfig): |
| | super().__init__() |
| | self.ln_1 = make_norm(config) |
| | self.attn = MultiHeadAttention(config) |
| | self.ln_2 = make_norm(config) |
| | self.mlp = SwiGLU(config) |
| |
|
| | def forward(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor: |
| | x = x + self.attn(self.ln_1(x), attention_mask=attention_mask) |
| | x = x + self.mlp(self.ln_2(x)) |
| | return x |
| |
|
| |
|
| | |
| | |
| | |
| | class ChessForCausalLM(PreTrainedModel): |
| | config_class = ChessConfig |
| | base_model_prefix = "transformer" |
| | supports_gradient_checkpointing = True |
| | keys_to_ignore_on_load_missing = ["lm_head.weight"] |
| |
|
| | def __init__(self, config: ChessConfig): |
| | super().__init__(config) |
| |
|
| | |
| | self.wte = nn.Embedding(config.vocab_size, config.n_embd) |
| | self.drop = nn.Dropout(config.dropout) |
| |
|
| | self.h = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layer)]) |
| | self.ln_f = make_norm(config) |
| |
|
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| |
|
| | if config.tie_weights: |
| | self._tied_weights_keys = ["lm_head.weight"] |
| |
|
| | self.post_init() |
| |
|
| | if config.tie_weights: |
| | self.tie_weights() |
| |
|
| | self.gradient_checkpointing = False |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, ChessForCausalLM): |
| | module.gradient_checkpointing = value |
| |
|
| | def get_input_embeddings(self) -> nn.Module: |
| | return self.wte |
| |
|
| | def set_input_embeddings(self, new_embeddings: nn.Module): |
| | self.wte = new_embeddings |
| | if getattr(self.config, "tie_weights", False): |
| | self.tie_weights() |
| |
|
| | def get_output_embeddings(self) -> nn.Module: |
| | return self.lm_head |
| |
|
| | def set_output_embeddings(self, new_embeddings: nn.Module): |
| | self.lm_head = new_embeddings |
| |
|
| | def tie_weights(self): |
| | if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False): |
| | self._tie_or_clone_weights(self.lm_head, self.wte) |
| |
|
| | def _init_weights(self, module: nn.Module): |
| | |
| | if isinstance(module, nn.Linear): |
| | nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| | if module.bias is not None: |
| | nn.init.zeros_(module.bias) |
| | elif isinstance(module, nn.Embedding): |
| | nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| | elif isinstance(module, (nn.LayerNorm, RMSNorm)): |
| | |
| | if hasattr(module, "weight") and module.weight is not None: |
| | nn.init.ones_(module.weight) |
| | if hasattr(module, "bias") and module.bias is not None: |
| | nn.init.zeros_(module.bias) |
| |
|
| | def forward( |
| | self, |
| | input_ids: torch.LongTensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs, |
| | ) -> Union[Tuple, CausalLMOutputWithPast]: |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | B, T = input_ids.shape |
| | if T > self.config.n_ctx: |
| | |
| | input_ids = input_ids[:, -self.config.n_ctx :] |
| | if attention_mask is not None: |
| | attention_mask = attention_mask[:, -self.config.n_ctx :] |
| | T = input_ids.shape[1] |
| |
|
| | x = self.wte(input_ids) |
| | x = self.drop(x) |
| |
|
| | |
| | if self.gradient_checkpointing and self.training: |
| | for block in self.h: |
| | x = torch.utils.checkpoint.checkpoint(block, x, attention_mask, use_reentrant=False) |
| | else: |
| | for block in self.h: |
| | x = block(x, attention_mask=attention_mask) |
| |
|
| | x = self.ln_f(x) |
| | logits = self.lm_head(x) |
| |
|
| | loss = None |
| | if labels is not None: |
| | |
| | shift_logits = logits[:, :-1, :].contiguous() |
| | shift_labels = labels[:, 1:].contiguous() |
| | loss_fct = nn.CrossEntropyLoss(ignore_index=-100) |
| | loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
| |
|
| | if not return_dict: |
| | out = (logits,) |
| | return ((loss,) + out) if loss is not None else out |
| |
|
| | return CausalLMOutputWithPast( |
| | loss=loss, |
| | logits=logits, |
| | past_key_values=None, |
| | hidden_states=None, |
| | attentions=None, |
| | ) |
| |
|
| | @torch.no_grad() |
| | def generate_move( |
| | self, |
| | input_ids: torch.LongTensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | temperature: float = 1.0, |
| | top_k: Optional[int] = None, |
| | top_p: Optional[float] = None, |
| | ) -> int: |
| | self.eval() |
| | outputs = self(input_ids=input_ids, attention_mask=attention_mask) |
| | logits = outputs.logits[:, -1, :] / max(temperature, 1e-6) |
| |
|
| | if top_k is not None and top_k > 0: |
| | kth = torch.topk(logits, k=min(top_k, logits.size(-1)))[0][..., -1, None] |
| | logits = logits.masked_fill(logits < kth, -1e9) |
| |
|
| | if top_p is not None and 0 < top_p < 1: |
| | sorted_logits, sorted_indices = torch.sort(logits, descending=True) |
| | probs = F.softmax(sorted_logits, dim=-1) |
| | cumprobs = torch.cumsum(probs, dim=-1) |
| |
|
| | to_remove = cumprobs > top_p |
| | to_remove[..., 1:] = to_remove[..., :-1].clone() |
| | to_remove[..., 0] = 0 |
| |
|
| | remove_indices = to_remove.scatter(dim=-1, index=sorted_indices, src=to_remove) |
| | logits = logits.masked_fill(remove_indices, -1e9) |
| |
|
| | probs = F.softmax(logits, dim=-1) |
| | next_token = torch.multinomial(probs, num_samples=1) |
| | return next_token.item() |
| |
|
| |
|
| | |
| | from transformers import AutoConfig, AutoModelForCausalLM |
| |
|
| | AutoConfig.register("chess_transformer", ChessConfig) |
| | AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM) |
| |
|