| | """ |
| | Chess Transformer Model for the Chess Challenge. |
| | |
| | This module provides a simple GPT-style transformer architecture |
| | designed to fit within the 1M parameter constraint. |
| | |
| | Key components: |
| | - ChessConfig: Configuration class for model hyperparameters |
| | - ChessForCausalLM: The main model class for next-move prediction |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import math |
| | from dataclasses import dataclass |
| | from typing import Optional, Tuple, Union |
| |
|
| | from httpx import head |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | from transformers import PretrainedConfig, PreTrainedModel |
| | from transformers.modeling_outputs import CausalLMOutputWithPast |
| |
|
| |
|
| | class ChessConfig(PretrainedConfig): |
| | """ |
| | Configuration class for the Chess Transformer model. |
| | |
| | This configuration is designed for a ~1M parameter model. |
| | Students can adjust these values to explore different architectures. |
| | |
| | Parameter budget breakdown (with default values): |
| | - Embeddings (vocab): 1200 x 128 = 153,600 |
| | - Position Embeddings: 256 x 128 = 32,768 |
| | - Transformer Layers: 6 x ~120,000 = ~720,000 |
| | - LM Head (with weight tying): 0 (shared with embeddings) |
| | - Total: ~906,000 parameters |
| | |
| | Attributes: |
| | vocab_size: Size of the vocabulary (number of unique moves). |
| | n_embd: Embedding dimension (d_model). |
| | n_layer: Number of transformer layers. |
| | n_head: Number of attention heads. |
| | n_ctx: Maximum sequence length (context window). |
| | n_inner: Feed-forward inner dimension (default: 3 * n_embd). |
| | dropout: Dropout probability. |
| | layer_norm_epsilon: Epsilon for layer normalization. |
| | tie_weights: Whether to tie embedding and output weights. |
| | """ |
| | |
| | model_type = "chess_transformer" |
| | |
| | def __init__( |
| | self, |
| | vocab_size: int = 1792, |
| | n_embd: int = 128, |
| | n_layer: int = 10, |
| | n_head: int = 4, |
| | n_ctx: int = 256, |
| | n_inner: Optional[int] = None, |
| | dropout: float = 0.1, |
| | layer_norm_epsilon: float = 1e-5, |
| | tie_weights: bool = True, |
| | pad_token_id: int = 0, |
| | bos_token_id: int = 1, |
| | eos_token_id: int = 2, |
| | **kwargs, |
| | ): |
| | super().__init__( |
| | pad_token_id=pad_token_id, |
| | bos_token_id=bos_token_id, |
| | eos_token_id=eos_token_id, |
| | **kwargs, |
| | ) |
| | |
| | self.vocab_size = vocab_size |
| | self.n_embd = n_embd |
| | self.n_layer = n_layer |
| | self.n_head = n_head |
| | self.n_ctx = n_ctx |
| | self.n_inner = n_inner if n_inner is not None else 3 * n_embd |
| | self.dropout = dropout |
| | self.layer_norm_epsilon = layer_norm_epsilon |
| | self.tie_weights = tie_weights |
| | |
| | self.tie_word_embeddings = bool(tie_weights) |
| |
|
| | class RMSNorm(nn.Module): |
| | def __init__(self, dim: int, eps: float = 1e-6): |
| | super().__init__() |
| | self.eps = eps |
| | self.weight = nn.Parameter(torch.ones(dim)) |
| | |
| | def forward(self, x): |
| | var = torch.mean(x**2, dim = -1, keepdim= True) |
| | x = x * torch.rsqrt(var + self.eps) |
| | return self.weight * x |
| | |
| | class SwiGLU(nn.Module): |
| | def __init__(self, config: ChessConfig): |
| | super().__init__() |
| | self.w1 = nn.Linear(config.n_embd, config.n_inner, bias = False) |
| | self.w2 = nn.Linear(config.n_embd, config.n_inner, bias = False) |
| | self.w3 = nn.Linear(config.n_inner, config.n_embd, bias = False) |
| | self.dropout = nn.Dropout(config.dropout) |
| |
|
| | def forward(self, x): |
| | x1 = self.w1(x) |
| | x2 = self.w2(x) |
| | hidden = F.silu(x1) * x2 |
| | return self.dropout(self.w3(hidden)) |
| | |
| | class RotaryEmbedding(nn.Module): |
| |
|
| | def __init__(self, head_dim: int, max_position_embeddings: int = 2048, base: float = 10000.0): |
| | super().__init__() |
| | self.head_dim = head_dim |
| | self.max_pos = max_position_embeddings |
| | |
| | inv_freq = 1.0 / (base ** (torch.arange(0, head_dim, 2).float() / head_dim)) |
| | self.register_buffer("inv_freq", inv_freq, persistent=False) |
| |
|
| | self.recompute_cache(max_position_embeddings) |
| |
|
| | def recompute_cache(self, max_pos): |
| | self.max_pos = max_pos |
| | t = torch.arange(max_pos, device=self.inv_freq.device, dtype=self.inv_freq.dtype) |
| | freqs = torch.outer(t, self.inv_freq) |
| | |
| | emb = torch.cat((freqs, freqs), dim=-1) |
| | |
| | self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) |
| | self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) |
| |
|
| | def forward(self, x, seq_len=None): |
| | |
| | if seq_len > self.max_pos: |
| | self.recompute_cache(seq_len) |
| | |
| | return ( |
| | self.cos_cached[..., :seq_len, :].to(dtype=x.dtype, device=x.device), |
| | self.sin_cached[..., :seq_len, :].to(dtype=x.dtype, device=x.device) |
| | ) |
| |
|
| | def rotate_half(x): |
| | x1 = x[..., : x.shape[-1] // 2] |
| | x2 = x[..., x.shape[-1] // 2 :] |
| | return torch.cat((-x2, x1), dim=-1) |
| |
|
| | def apply_rotary_emb(q, k, cos, sin): |
| |
|
| | |
| | |
| | q_embed = (q * cos) + (rotate_half(q) * sin) |
| | k_embed = (k * cos) + (rotate_half(k) * sin) |
| | return q_embed, k_embed |
| |
|
| | class MultiQueryAttention(nn.Module): |
| | """ |
| | Multi-head self-attention module. |
| | |
| | This is a standard scaled dot-product attention implementation |
| | with causal masking for autoregressive generation. |
| | """ |
| | |
| | def __init__(self, config: ChessConfig): |
| | super().__init__() |
| | |
| | assert config.n_embd % config.n_head == 0, \ |
| | f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})" |
| | |
| | self.n_head = config.n_head |
| | self.n_embd = config.n_embd |
| | self.head_dim = config.n_embd // config.n_head |
| | |
| | |
| | self.c_q = nn.Linear(config.n_embd, config.n_embd, bias = False) |
| | self.c_k = nn.Linear(config.n_embd, self.head_dim, bias = False) |
| | self.c_v = nn.Linear(config.n_embd, self.head_dim, bias = False) |
| | self.c_proj = nn.Linear(config.n_embd, config.n_embd) |
| | |
| | self.dropout = nn.Dropout(config.dropout) |
| | |
| | |
| | self.register_buffer( |
| | "bias", |
| | torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view( |
| | 1, 1, config.n_ctx, config.n_ctx |
| | ), |
| | persistent=False, |
| | ) |
| | |
| | def forward( |
| | self, |
| | x: torch.Tensor, |
| | freqs_cos: torch.Tensor, |
| | freqs_sin: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | ) -> torch.Tensor: |
| | batch_size, seq_len, _ = x.size() |
| | |
| | q = self.c_q(x).view(batch_size, seq_len, self.n_head, self.head_dim) |
| | k = self.c_k(x).view(batch_size, seq_len, 1, self.head_dim) |
| | v = self.c_v(x).view(batch_size, seq_len, 1, self.head_dim) |
| |
|
| | |
| | q = q.transpose(1, 2) |
| | k = k.transpose(1, 2) |
| | v = v.transpose(1, 2) |
| | q, k = apply_rotary_emb(q, k, freqs_cos, freqs_sin) |
| | |
| | |
| | attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim) |
| | |
| | |
| | causal_mask = self.bias[:, :, :seq_len, :seq_len] |
| | attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf")) |
| | |
| | |
| | if attention_mask is not None: |
| | |
| | attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) |
| | attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf")) |
| | |
| | attn_weights = F.softmax(attn_weights, dim=-1) |
| | attn_weights = self.dropout(attn_weights) |
| | |
| | |
| | attn_output = torch.matmul(attn_weights, v) |
| | |
| | |
| | attn_output = attn_output.transpose(1, 2).contiguous().view( |
| | batch_size, seq_len, self.n_embd |
| | ) |
| | |
| | |
| | attn_output = self.c_proj(attn_output) |
| | |
| | return attn_output |
| |
|
| |
|
| | class FeedForward(nn.Module): |
| | """ |
| | Feed-forward network (MLP) module. |
| | |
| | Standard two-layer MLP with GELU activation. |
| | """ |
| | |
| | def __init__(self, config: ChessConfig): |
| | super().__init__() |
| | |
| | self.net = nn.Sequential( |
| | nn.Linear(config.n_embd, config.n_inner, bias = False), |
| | nn.GELU(), |
| | nn.Linear(config.n_inner, config.n_embd, bias = False), |
| | nn.Dropout(config.dropout) |
| | ) |
| | |
| | def forward(self, x: torch.Tensor) -> torch.Tensor: |
| | return self.net(x) |
| |
|
| |
|
| | class TransformerBlock(nn.Module): |
| | """ |
| | A single transformer block with attention and feed-forward layers. |
| | |
| | Uses pre-normalization (LayerNorm before attention/FFN) for better |
| | training stability. |
| | """ |
| | |
| | def __init__(self, config: ChessConfig): |
| | super().__init__() |
| | |
| | self.ln_1 = RMSNorm(config.n_embd) |
| | self.attn = MultiQueryAttention(config) |
| | self.ln_2 = RMSNorm(config.n_embd) |
| | self.mlp = SwiGLU(config) |
| | |
| | def forward( |
| | self, |
| | x: torch.Tensor, |
| | cos: torch.Tensor, |
| | sin: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | ) -> torch.Tensor: |
| | |
| | x = x + self.attn(self.ln_1(x), cos, sin, attention_mask=attention_mask) |
| | |
| | x = x + self.mlp(self.ln_2(x)) |
| | return x |
| |
|
| |
|
| | class ChessForCausalLM(PreTrainedModel): |
| | """ |
| | Chess Transformer for Causal Language Modeling (next-move prediction). |
| | |
| | This model is designed to predict the next chess move given a sequence |
| | of previous moves. It uses a GPT-style architecture with: |
| | - Token embeddings for chess moves |
| | - Learned positional embeddings |
| | - Stacked transformer blocks |
| | - Linear head for next-token prediction |
| | |
| | The model supports weight tying between the embedding layer and the |
| | output projection to save parameters. |
| | |
| | Example: |
| | >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6) |
| | >>> model = ChessForCausalLM(config) |
| | >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])} |
| | >>> outputs = model(**inputs) |
| | >>> next_move_logits = outputs.logits[:, -1, :] |
| | """ |
| | |
| | config_class = ChessConfig |
| | base_model_prefix = "transformer" |
| | supports_gradient_checkpointing = True |
| | |
| | keys_to_ignore_on_load_missing = ["lm_head.weight"] |
| | |
| | def __init__(self, config: ChessConfig): |
| | super().__init__(config) |
| | |
| | |
| | self.wte = nn.Embedding(config.vocab_size, config.n_embd) |
| | |
| | |
| | self.drop = nn.Dropout(config.dropout) |
| | |
| | |
| | |
| | |
| | |
| | self.universal_block = TransformerBlock(config) |
| | |
| | |
| | self.ln_f = RMSNorm(config.n_embd) |
| | |
| | |
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| |
|
| | self.register_buffer("freqs_cos", torch.zeros(1), persistent = False) |
| | self.register_buffer("freqs_sin", torch.zeros(1), persistent = False) |
| | |
| | |
| | if config.tie_weights: |
| | self._tied_weights_keys = ["lm_head.weight"] |
| | |
| | |
| | self.post_init() |
| | |
| | |
| | if config.tie_weights: |
| | self.tie_weights() |
| |
|
| | self.rotary = RotaryEmbedding( |
| | config.n_embd // config.n_head |
| | ) |
| |
|
| | def get_input_embeddings(self) -> nn.Module: |
| | return self.wte |
| |
|
| | def set_input_embeddings(self, new_embeddings: nn.Module): |
| | self.wte = new_embeddings |
| | if getattr(self.config, "tie_weights", False): |
| | self.tie_weights() |
| |
|
| | def get_output_embeddings(self) -> nn.Module: |
| | return self.lm_head |
| |
|
| | def set_output_embeddings(self, new_embeddings: nn.Module): |
| | self.lm_head = new_embeddings |
| |
|
| | def tie_weights(self): |
| | |
| | if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False): |
| | self._tie_or_clone_weights(self.lm_head, self.wte) |
| | |
| | def _init_weights(self, module: nn.Module): |
| | """Initialize weights following GPT-2 style.""" |
| | if isinstance(module, nn.Linear): |
| | torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| | if module.bias is not None: |
| | torch.nn.init.zeros_(module.bias) |
| | elif isinstance(module, nn.Embedding): |
| | torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| | elif isinstance(module, nn.LayerNorm): |
| | torch.nn.init.ones_(module.weight) |
| | torch.nn.init.zeros_(module.bias) |
| | |
| | def forward( |
| | self, |
| | input_ids: torch.LongTensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs, |
| | ) -> Union[Tuple, CausalLMOutputWithPast]: |
| | """ |
| | Forward pass of the model. |
| | |
| | Args: |
| | input_ids: Token IDs of shape (batch_size, seq_len). |
| | attention_mask: Attention mask of shape (batch_size, seq_len). |
| | position_ids: Position IDs of shape (batch_size, seq_len). |
| | labels: Labels for language modeling loss. |
| | return_dict: Whether to return a ModelOutput object. |
| | |
| | Returns: |
| | CausalLMOutputWithPast containing loss (if labels provided) and logits. |
| | """ |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| | |
| | batch_size, seq_len = input_ids.size() |
| | device = input_ids.device |
| | |
| | |
| | if position_ids is None: |
| | position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1) |
| | |
| | |
| | hidden_states = self.wte(input_ids) |
| | |
| | cos, sin = self.rotary(hidden_states, hidden_states.size(1)) |
| |
|
| | if cos.device != hidden_states.device: |
| | cos, sin = cos.to(hidden_states.device), sin.to(hidden_states.device) |
| |
|
| | all_logits = [] |
| | |
| | for step in range(8): |
| | hidden_states = self.universal_block(hidden_states, cos, sin, attention_mask=attention_mask) |
| | hidden_states = self.ln_f(hidden_states) |
| | step_logits = self.lm_head(hidden_states) |
| | all_logits.append(step_logits) |
| | |
| | |
| | hidden_states = self.ln_f(hidden_states) |
| | |
| | |
| | logits = self.lm_head(hidden_states) |
| | |
| | |
| | loss = None |
| | if labels is not None: |
| | |
| | shift_labels = labels[..., 1:].contiguous() |
| | loss_fct = nn.CrossEntropyLoss(ignore_index = -100) |
| | |
| | total_loss = 0.0 |
| | for step_logits in all_logits: |
| | shift_logits = step_logits[..., :-1, :].contiguous() |
| | total_loss += loss_fct( |
| | shift_logits.view(-1, self.config.vocab_size), |
| | shift_labels.view(-1) |
| | ) |
| | loss = total_loss / len(all_logits) |
| | |
| | if not return_dict: |
| | output = (logits,) |
| | return ((loss,) + output) if loss is not None else output |
| | |
| | return CausalLMOutputWithPast( |
| | loss=loss, |
| | logits=logits, |
| | past_key_values=None, |
| | hidden_states=None, |
| | attentions=None, |
| | ) |
| | |
| | @torch.no_grad() |
| | def generate_move( |
| | self, |
| | input_ids: torch.LongTensor, |
| | temperature: float = 1.0, |
| | top_k: Optional[int] = None, |
| | top_p: Optional[float] = None, |
| | ) -> int: |
| | """ |
| | Generate the next move given a sequence of moves. |
| | |
| | Args: |
| | input_ids: Token IDs of shape (1, seq_len). |
| | temperature: Sampling temperature (1.0 = no change). |
| | top_k: If set, only sample from top k tokens. |
| | top_p: If set, use nucleus sampling with this threshold. |
| | |
| | Returns: |
| | The token ID of the predicted next move. |
| | """ |
| | self.eval() |
| | |
| | |
| | outputs = self(input_ids) |
| | logits = outputs.logits[:, -1, :] / temperature |
| | |
| | |
| | if top_k is not None: |
| | indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] |
| | logits[indices_to_remove] = float("-inf") |
| | |
| | |
| | if top_p is not None: |
| | sorted_logits, sorted_indices = torch.sort(logits, descending=True) |
| | cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) |
| | |
| | |
| | sorted_indices_to_remove = cumulative_probs > top_p |
| | sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() |
| | sorted_indices_to_remove[..., 0] = 0 |
| | |
| | indices_to_remove = sorted_indices_to_remove.scatter( |
| | dim=-1, index=sorted_indices, src=sorted_indices_to_remove |
| | ) |
| | logits[indices_to_remove] = float("-inf") |
| | |
| | |
| | probs = F.softmax(logits, dim=-1) |
| | next_token = torch.multinomial(probs, num_samples=1) |
| | |
| | return next_token.item() |
| |
|
| |
|
| | |
| | from transformers import AutoConfig, AutoModelForCausalLM |
| |
|
| | AutoConfig.register("chess_transformer", ChessConfig) |
| | AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM) |
| |
|