| | """
|
| | Chess Transformer Model for the Chess Challenge.
|
| |
|
| | This module provides a simple GPT-style transformer architecture
|
| | designed to fit within the 1M parameter constraint.
|
| |
|
| | Key components:
|
| | - ChessConfig: Configuration class for model hyperparameters
|
| | - ChessForCausalLM: The main model class for next-move prediction
|
| | """
|
| |
|
| | from __future__ import annotations
|
| |
|
| | import math
|
| | from dataclasses import dataclass
|
| | from typing import Optional, Tuple, Union
|
| |
|
| | import torch
|
| | import torch.nn as nn
|
| | import torch.nn.functional as F
|
| | from transformers import PretrainedConfig, PreTrainedModel
|
| | from transformers.modeling_outputs import CausalLMOutputWithPast
|
| |
|
| |
|
| | class RMSNorm(nn.Module):
|
| | def __init__(self, dim: int, eps: float = 1e-6):
|
| | super().__init__()
|
| | self.eps = eps
|
| | self.weight = nn.Parameter(torch.ones(dim))
|
| |
|
| | def forward(self, x):
|
| | return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) * self.weight
|
| |
|
| | class RotaryEmbedding(nn.Module):
|
| | def __init__(self, dim, max_seq_len=256):
|
| | super().__init__()
|
| | inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
|
| | self.register_buffer("inv_freq", inv_freq)
|
| |
|
| | def forward(self, x, seq_len):
|
| | t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq)
|
| | freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
| | emb = torch.cat((freqs, freqs), dim=-1)
|
| | return emb[None, :, None, :]
|
| |
|
| | def apply_rotary_emb(q, k, freqs):
|
| | def rotate_half(x):
|
| | x1, x2 = x.chunk(2, dim=-1)
|
| | return torch.cat((-x2, x1), dim=-1)
|
| |
|
| | q_rot = (q * freqs.cos()) + (rotate_half(q) * freqs.sin())
|
| | k_rot = (k * freqs.cos()) + (rotate_half(k) * freqs.sin())
|
| | return q_rot, k_rot
|
| |
|
| | class SwiGLU(nn.Module):
|
| | def __init__(self, dim: int, inner_dim: int, dropout: float):
|
| | super().__init__()
|
| | self.w1 = nn.Linear(dim, inner_dim, bias=False)
|
| | self.w2 = nn.Linear(inner_dim, dim, bias=False)
|
| | self.w3 = nn.Linear(dim, inner_dim, bias=False)
|
| | self.dropout = nn.Dropout(dropout)
|
| |
|
| | def forward(self, x):
|
| |
|
| | return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
|
| |
|
| | class ModernAttention(nn.Module):
|
| | def __init__(self, config):
|
| | super().__init__()
|
| | self.n_head = config.n_head
|
| | self.head_dim = config.n_embd // config.n_head
|
| |
|
| | self.wq = nn.Linear(config.n_embd, config.n_embd, bias=False)
|
| | self.wk = nn.Linear(config.n_embd, config.n_embd, bias=False)
|
| | self.wv = nn.Linear(config.n_embd, config.n_embd, bias=False)
|
| | self.wo = nn.Linear(config.n_embd, config.n_embd, bias=False)
|
| | self.dropout = nn.Dropout(config.dropout)
|
| |
|
| | def forward(self, x, freqs, mask=None):
|
| | bsz, seqlen, _ = x.shape
|
| | q, k, v = self.wq(x), self.wk(x), self.wv(x)
|
| |
|
| | q = q.view(bsz, seqlen, self.n_head, self.head_dim)
|
| | k = k.view(bsz, seqlen, self.n_head, self.head_dim)
|
| | v = v.view(bsz, seqlen, self.n_head, self.head_dim)
|
| |
|
| | q, k = apply_rotary_emb(q, k, freqs)
|
| |
|
| | scores = torch.matmul(q.transpose(1, 2), k.transpose(1, 2).transpose(-2, -1)) / math.sqrt(self.head_dim)
|
| |
|
| | if mask is not None:
|
| | scores = scores + mask[:, :, :seqlen, :seqlen]
|
| |
|
| | scores = F.softmax(scores.float(), dim=-1).type_as(q)
|
| | output = torch.matmul(scores, v.transpose(1, 2))
|
| | output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
|
| | return self.dropout(self.wo(output))
|
| |
|
| | class ModernBlock(nn.Module):
|
| | def __init__(self, config):
|
| | super().__init__()
|
| | self.attention = ModernAttention(config)
|
| | self.feed_forward = SwiGLU(config.n_embd, config.n_inner, config.dropout)
|
| | self.attention_norm = RMSNorm(config.n_embd)
|
| | self.ffn_norm = RMSNorm(config.n_embd)
|
| |
|
| | def forward(self, x, freqs, mask):
|
| | x = x + self.attention(self.attention_norm(x), freqs, mask)
|
| | x = x + self.feed_forward(self.ffn_norm(x))
|
| | return x
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | class ChessConfig(PretrainedConfig):
|
| | """
|
| | Configuration class for the Chess Transformer model.
|
| |
|
| | This configuration is designed for a ~1M parameter model.
|
| | Students can adjust these values to explore different architectures.
|
| |
|
| | Parameter budget breakdown (with default values):
|
| | - Embeddings (vocab): 1200 x 128 = 153,600
|
| | - Position Embeddings: 256 x 128 = 32,768
|
| | - Transformer Layers: 6 x ~120,000 = ~720,000
|
| | - LM Head (with weight tying): 0 (shared with embeddings)
|
| | - Total: ~906,000 parameters
|
| |
|
| | Attributes:
|
| | vocab_size: Size of the vocabulary (number of unique moves).
|
| | n_embd: Embedding dimension (d_model).
|
| | n_layer: Number of transformer layers.
|
| | n_head: Number of attention heads.
|
| | n_ctx: Maximum sequence length (context window).
|
| | n_inner: Feed-forward inner dimension (default: 3 * n_embd).
|
| | dropout: Dropout probability.
|
| | layer_norm_epsilon: Epsilon for layer normalization.
|
| | tie_weights: Whether to tie embedding and output weights.
|
| | """
|
| |
|
| | model_type = "chess_transformer"
|
| |
|
| | def __init__(
|
| | self,
|
| | vocab_size: int = 1200,
|
| | n_embd: int = 128,
|
| | n_layer: int = 6,
|
| | n_head: int = 8,
|
| | n_ctx: int = 256,
|
| | n_inner: Optional[int] = None,
|
| | dropout: float = 0.1,
|
| | layer_norm_epsilon: float = 1e-5,
|
| | tie_weights: bool = True,
|
| | pad_token_id: int = 0,
|
| | bos_token_id: int = 1,
|
| | eos_token_id: int = 2,
|
| | **kwargs,
|
| | ):
|
| | super().__init__(
|
| | pad_token_id=pad_token_id,
|
| | bos_token_id=bos_token_id,
|
| | eos_token_id=eos_token_id,
|
| | **kwargs,
|
| | )
|
| |
|
| | self.vocab_size = vocab_size
|
| | self.n_embd = n_embd
|
| | self.n_layer = n_layer
|
| | self.n_head = n_head
|
| | self.n_ctx = n_ctx
|
| | self.n_inner = n_inner if n_inner is not None else 3 * n_embd
|
| | self.dropout = dropout
|
| | self.layer_norm_epsilon = layer_norm_epsilon
|
| | self.tie_weights = tie_weights
|
| |
|
| | self.tie_word_embeddings = bool(tie_weights)
|
| |
|
| |
|
| | class MultiHeadAttention(nn.Module):
|
| | """
|
| | Multi-head self-attention module.
|
| |
|
| | This is a standard scaled dot-product attention implementation
|
| | with causal masking for autoregressive generation.
|
| | """
|
| |
|
| | def __init__(self, config: ChessConfig):
|
| | super().__init__()
|
| |
|
| | assert config.n_embd % config.n_head == 0, \
|
| | f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
|
| |
|
| | self.n_head = config.n_head
|
| | self.n_embd = config.n_embd
|
| | self.head_dim = config.n_embd // config.n_head
|
| |
|
| |
|
| | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
|
| | self.c_proj = nn.Linear(config.n_embd, config.n_embd)
|
| |
|
| | self.dropout = nn.Dropout(config.dropout)
|
| |
|
| |
|
| | self.register_buffer(
|
| | "bias",
|
| | torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
|
| | 1, 1, config.n_ctx, config.n_ctx
|
| | ),
|
| | persistent=False,
|
| | )
|
| |
|
| | def forward(
|
| | self,
|
| | x: torch.Tensor,
|
| | attention_mask: Optional[torch.Tensor] = None,
|
| | ) -> torch.Tensor:
|
| | batch_size, seq_len, _ = x.size()
|
| |
|
| |
|
| | qkv = self.c_attn(x)
|
| | q, k, v = qkv.split(self.n_embd, dim=2)
|
| |
|
| |
|
| | q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
|
| | k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
|
| | v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
|
| |
|
| |
|
| | attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
|
| |
|
| |
|
| | causal_mask = self.bias[:, :, :seq_len, :seq_len]
|
| | attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
|
| |
|
| |
|
| | if attention_mask is not None:
|
| |
|
| | attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
| | attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
|
| |
|
| | attn_weights = F.softmax(attn_weights, dim=-1)
|
| | attn_weights = self.dropout(attn_weights)
|
| |
|
| |
|
| | attn_output = torch.matmul(attn_weights, v)
|
| |
|
| |
|
| | attn_output = attn_output.transpose(1, 2).contiguous().view(
|
| | batch_size, seq_len, self.n_embd
|
| | )
|
| |
|
| |
|
| | attn_output = self.c_proj(attn_output)
|
| |
|
| | return attn_output
|
| |
|
| |
|
| | class FeedForward(nn.Module):
|
| | """
|
| | Feed-forward network (MLP) module.
|
| |
|
| | Standard two-layer MLP with GELU activation.
|
| | """
|
| |
|
| | def __init__(self, config: ChessConfig):
|
| | super().__init__()
|
| |
|
| | self.c_fc = nn.Linear(config.n_embd, config.n_inner)
|
| | self.c_proj = nn.Linear(config.n_inner, config.n_embd)
|
| | self.dropout = nn.Dropout(config.dropout)
|
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| | x = self.c_fc(x)
|
| | x = F.gelu(x)
|
| | x = self.c_proj(x)
|
| | x = self.dropout(x)
|
| | return x
|
| |
|
| |
|
| | class TransformerBlock(nn.Module):
|
| | """
|
| | A single transformer block with attention and feed-forward layers.
|
| |
|
| | Uses pre-normalization (LayerNorm before attention/FFN) for better
|
| | training stability.
|
| | """
|
| |
|
| | def __init__(self, config: ChessConfig):
|
| | super().__init__()
|
| |
|
| | self.ln_1 = RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| | self.attn = MultiHeadAttention(config)
|
| | self.ln_2 = RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| | self.mlp = FeedForward(config)
|
| |
|
| | def forward(
|
| | self,
|
| | x: torch.Tensor,
|
| | attention_mask: Optional[torch.Tensor] = None,
|
| | ) -> torch.Tensor:
|
| |
|
| | x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
|
| |
|
| | x = x + self.mlp(self.ln_2(x))
|
| | return x
|
| |
|
| |
|
| | class ChessForCausalLM(PreTrainedModel):
|
| | config_class = ChessConfig
|
| | _tied_weights_keys = ["lm_head.weight"]
|
| |
|
| | def __init__(self, config: ChessConfig):
|
| | super().__init__(config)
|
| |
|
| | self.wte = nn.Embedding(config.vocab_size, config.n_embd)
|
| |
|
| | self.rope = RotaryEmbedding(config.n_embd // config.n_head)
|
| |
|
| | self.drop = nn.Dropout(config.dropout)
|
| | self.h = nn.ModuleList([ModernBlock(config) for _ in range(config.n_layer)])
|
| | self.ln_f = RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
| |
|
| | self.post_init()
|
| | if config.tie_weights:
|
| | self.tie_weights()
|
| |
|
| |
|
| | def forward(
|
| | self,
|
| | input_ids: torch.LongTensor,
|
| | attention_mask: Optional[torch.Tensor] = None,
|
| | labels: Optional[torch.LongTensor] = None,
|
| | return_dict: Optional[bool] = None,
|
| | **kwargs,
|
| | ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| | batch_size, seq_len = input_ids.size()
|
| | device = input_ids.device
|
| |
|
| | freqs = self.rope(input_ids, seq_len)
|
| |
|
| | mask = torch.full((seq_len, seq_len), float("-inf"), device=device)
|
| | mask = torch.triu(mask, diagonal=1)
|
| | mask = mask.view(1, 1, seq_len, seq_len)
|
| |
|
| | hidden_states = self.drop(self.wte(input_ids))
|
| |
|
| | for block in self.h:
|
| | hidden_states = block(hidden_states, freqs, mask)
|
| |
|
| | hidden_states = self.ln_f(hidden_states)
|
| | logits = self.lm_head(hidden_states)
|
| |
|
| | loss = None
|
| | if labels is not None:
|
| | shift_logits = logits[..., :-1, :].contiguous()
|
| | shift_labels = labels[..., 1:].contiguous()
|
| | loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
| |
|
| | if not return_dict:
|
| | output = (logits,)
|
| | return ((loss,) + output) if loss is not None else output
|
| |
|
| | return CausalLMOutputWithPast(
|
| | loss=loss,
|
| | logits=logits,
|
| | past_key_values=None,
|
| | hidden_states=None,
|
| | attentions=None,
|
| | )
|
| |
|
| | def get_input_embeddings(self):
|
| | return self.wte
|
| |
|
| | def set_input_embeddings(self, value):
|
| | self.wte = value
|
| |
|
| | def get_output_embeddings(self):
|
| | return self.lm_head
|
| |
|
| | def set_output_embeddings(self, new_embeddings):
|
| | self.lm_head = new_embeddings
|
| |
|
| | def tie_weights(self):
|
| | """
|
| | C'est cette méthode que HF appelle automatiquement si
|
| | config.tie_word_embeddings est True.
|
| | """
|
| | self._tie_or_clone_weights(self.lm_head, self.wte)
|
| |
|
| |
|
| |
|
| | from transformers import AutoConfig, AutoModelForCausalLM
|
| |
|
| | AutoConfig.register("chess_transformer", ChessConfig)
|
| | AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM) |