| | from __future__ import annotations
|
| |
|
| | import math
|
| | from dataclasses import dataclass
|
| | from typing import Optional, Tuple, Union
|
| |
|
| | import torch
|
| | import torch.nn as nn
|
| | import torch.nn.functional as F
|
| | from transformers import PretrainedConfig, PreTrainedModel
|
| | from transformers.modeling_outputs import CausalLMOutputWithPast
|
| |
|
| |
|
| | def precompute_rope_(head_dim: int, max_seq_length: int, base=10000):
|
| |
|
| | inv_freq = 1/(base ** (torch.arange(0, head_dim, 2).float() / head_dim))
|
| |
|
| | t = torch.arange(max_seq_length, dtype=torch.float32)
|
| |
|
| | freqs = torch.einsum("i,j->ij", t, inv_freq)
|
| |
|
| | emb = freqs.repeat_interleave(2, dim=-1)
|
| |
|
| | return emb
|
| |
|
| | def apply_rope_(x, rope_emb):
|
| |
|
| | seq_len = x.shape[1]
|
| | rope_emb_sliced = rope_emb[:seq_len, :]
|
| |
|
| |
|
| | emb = rope_emb_sliced.unsqueeze(0).unsqueeze(2)
|
| |
|
| | cos = emb.cos()
|
| | sin = emb.sin()
|
| |
|
| |
|
| | x_reshaped = x.float().reshape(*x.shape[:-1], -1, 2)
|
| | x_partner = torch.stack([-x_reshaped[..., 1], x_reshaped[...,0]], dim=-1)
|
| | x_partnet = x_partner.flatten(-2)
|
| |
|
| | return (x*cos + x_partner*sin).type_as(x)
|
| |
|
| |
|
| |
|
| | class ChessConfig(PretrainedConfig):
|
| | """
|
| | Configuration class for the Chess Transformer model.
|
| |
|
| | This configuration is designed for a ~1M parameter model.
|
| | Students can adjust these values to explore different architectures.
|
| |
|
| | Parameter budget breakdown (with default values):
|
| | - Embeddings (vocab): 1200 x 128 = 153,600
|
| | - Position Embeddings: 256 x 128 = 32,768
|
| | - Transformer Layers: 6 x ~120,000 = ~720,000
|
| | - LM Head (with weight tying): 0 (shared with embeddings)
|
| | - Total: ~906,000 parameters
|
| |
|
| | Attributes:
|
| | vocab_size: Size of the vocabulary (number of unique moves).
|
| | n_embd: Embedding dimension (d_model).
|
| | n_layer: Number of transformer layers.
|
| | n_head: Number of attention heads.
|
| | n_ctx: Maximum sequence length (context window).
|
| | n_inner: Feed-forward inner dimension (default: 3 * n_embd).
|
| | dropout: Dropout probability.
|
| | layer_norm_epsilon: Epsilon for layer normalization.
|
| | tie_weights: Whether to tie embedding and output weights.
|
| | """
|
| |
|
| | model_type = "chess_transformer"
|
| |
|
| | def __init__(
|
| | self,
|
| | vocab_size: int = 1200,
|
| | n_embd: int = 128,
|
| | n_layer: int = 6,
|
| | n_head: int = 4,
|
| | n_ctx: int = 256,
|
| | n_inner: Optional[int] = None,
|
| | dropout: float = 0.1,
|
| | layer_norm_epsilon: float = 1e-5,
|
| | tie_weights: bool = False,
|
| | pad_token_id: int = 0,
|
| | bos_token_id: int = 1,
|
| | eos_token_id: int = 2,
|
| | **kwargs,
|
| | ):
|
| | super().__init__(
|
| | pad_token_id=pad_token_id,
|
| | bos_token_id=bos_token_id,
|
| | eos_token_id=eos_token_id,
|
| | **kwargs,
|
| | )
|
| |
|
| | self.vocab_size = vocab_size
|
| | self.n_embd = n_embd
|
| | self.n_layer = n_layer
|
| | self.n_head = n_head
|
| | self.n_ctx = n_ctx
|
| | self.n_inner = n_inner if n_inner is not None else 3 * n_embd
|
| | self.dropout = dropout
|
| | self.layer_norm_epsilon = layer_norm_epsilon
|
| | self.tie_weights = tie_weights
|
| |
|
| | self.tie_word_embeddings = bool(tie_weights)
|
| |
|
| |
|
| |
|
| | class LlamaRotaryEmbedding(nn.Module):
|
| |
|
| |
|
| | def __init__(self, config: ChessConfig, device=None):
|
| | super().__init__()
|
| | self.max_seq_len_cached = config.n_ctx
|
| | self.original_max_seq_len = config.n_ctx
|
| |
|
| | self.config = config
|
| |
|
| | rope_init_fn = self.compute_default_rope_parameters
|
| | inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
|
| |
|
| | self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| |
|
| | @staticmethod
|
| | def compute_default_rope_parameters(
|
| | config: ChessConfig | None = None,
|
| | device: Optional["torch.device"] = None,
|
| | seq_len: int | None = None,
|
| | ) -> tuple["torch.Tensor", float]:
|
| | """
|
| | Computes the inverse frequencies according to the original RoPE implementation
|
| | Args:
|
| | config ([`~transformers.PreTrainedConfig`]):
|
| | The model configuration.
|
| | device (`torch.device`):
|
| | The device to use for initialization of the inverse frequencies.
|
| | seq_len (`int`, *optional*):
|
| | The current sequence length. Unused for this type of RoPE.
|
| | Returns:
|
| | Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
|
| | post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
|
| | """
|
| | base = 10_000.0
|
| | dim = config.n_embd // config.n_head
|
| |
|
| | attention_factor = 1.0
|
| |
|
| |
|
| | inv_freq = 1.0 / (
|
| | base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device).float() / dim)
|
| | )
|
| | return inv_freq, attention_factor
|
| |
|
| | @torch.no_grad()
|
| | def forward(self, x, position_ids):
|
| | inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
|
| | position_ids_expanded = position_ids[:, None, :].float()
|
| |
|
| | freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| | emb = torch.cat((freqs, freqs), dim=-1)
|
| | cos = emb.cos() * self.attention_scaling
|
| | sin = emb.sin() * self.attention_scaling
|
| |
|
| | return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| |
|
| |
|
| | def rotate_half(x):
|
| | """Rotates half the hidden dims of the input."""
|
| | x1 = x[..., : x.shape[-1] // 2]
|
| | x2 = x[..., x.shape[-1] // 2 :]
|
| | return torch.cat((-x2, x1), dim=-1)
|
| |
|
| |
|
| | def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
|
| | """Applies Rotary Position Embedding to the query and key tensors.
|
| |
|
| | Args:
|
| | q (`torch.Tensor`): The query tensor.
|
| | k (`torch.Tensor`): The key tensor.
|
| | cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| | sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| | unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| | The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| | sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| | that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| | k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| | cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| | the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| | Returns:
|
| | `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| | """
|
| | cos = cos.unsqueeze(unsqueeze_dim)
|
| | sin = sin.unsqueeze(unsqueeze_dim)
|
| | q_embed = (q * cos) + (rotate_half(q) * sin)
|
| | k_embed = (k * cos) + (rotate_half(k) * sin)
|
| | return q_embed, k_embed
|
| |
|
| |
|
| | class MultiHeadAttention(nn.Module):
|
| | """
|
| | Multi-head self-attention module.
|
| |
|
| | This is a standard scaled dot-product attention implementation
|
| | with causal masking for autoregressive generation.
|
| | """
|
| |
|
| | def __init__(self, config: ChessConfig):
|
| | super().__init__()
|
| |
|
| | assert config.n_embd % config.n_head == 0, \
|
| | f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
|
| |
|
| | self.n_head = config.n_head
|
| | self.n_embd = config.n_embd
|
| | self.head_dim = config.n_embd // config.n_head
|
| |
|
| |
|
| |
|
| | self.q_norm = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| | self.k_norm = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| |
|
| | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
|
| | self.c_proj = nn.Linear(config.n_embd, config.n_embd)
|
| |
|
| | self.dropout = nn.Dropout(config.dropout)
|
| |
|
| |
|
| | self.register_buffer(
|
| | "bias",
|
| | torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
|
| | 1, 1, config.n_ctx, config.n_ctx
|
| | ),
|
| | persistent=False,
|
| | )
|
| |
|
| | def forward(
|
| | self,
|
| | x: torch.Tensor,
|
| | attention_mask: Optional[torch.Tensor] = None,
|
| | position_embeds = None,
|
| | ) -> torch.Tensor:
|
| | batch_size, seq_len, _ = x.size()
|
| |
|
| |
|
| | qkv = self.c_attn(x)
|
| | q, k, v = qkv.split(self.n_embd, dim=2)
|
| |
|
| | q = self.q_norm(q)
|
| | k = self.k_norm(k)
|
| |
|
| |
|
| |
|
| | q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
|
| | k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
|
| | v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
|
| |
|
| | cos, sin = position_embeds
|
| |
|
| | q, k = apply_rotary_pos_emb(q, k, cos, sin)
|
| |
|
| |
|
| | attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
|
| |
|
| |
|
| | causal_mask = self.bias[:, :, :seq_len, :seq_len]
|
| | attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
|
| |
|
| |
|
| | if attention_mask is not None:
|
| |
|
| | attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
| | attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
|
| |
|
| | attn_weights = F.softmax(attn_weights, dim=-1)
|
| | attn_weights = self.dropout(attn_weights)
|
| |
|
| |
|
| | attn_output = torch.matmul(attn_weights, v)
|
| |
|
| |
|
| | attn_output = attn_output.transpose(1, 2).contiguous().view(
|
| | batch_size, seq_len, self.n_embd
|
| | )
|
| |
|
| |
|
| | attn_output = self.c_proj(attn_output)
|
| |
|
| | return attn_output
|
| |
|
| |
|
| | class FeedForward(nn.Module):
|
| | """
|
| | Feed-forward network (MLP) module.
|
| |
|
| | Standard two-layer MLP with GELU activation.
|
| | """
|
| |
|
| | def __init__(self, config: ChessConfig):
|
| | super().__init__()
|
| |
|
| | self.c_fc = nn.Linear(config.n_embd, config.n_inner)
|
| | self.c_proj = nn.Linear(config.n_inner, config.n_embd)
|
| | self.dropout = nn.Dropout(config.dropout)
|
| |
|
| | def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| | x = self.c_fc(x)
|
| | x = F.gelu(x)
|
| | x = self.c_proj(x)
|
| | x = self.dropout(x)
|
| | return x
|
| |
|
| |
|
| | class TransformerBlock(nn.Module):
|
| | """
|
| | A single transformer block with attention and feed-forward layers.
|
| |
|
| | Uses pre-normalization (LayerNorm before attention/FFN) for better
|
| | training stability.
|
| | """
|
| |
|
| | def __init__(self, config: ChessConfig):
|
| | super().__init__()
|
| |
|
| | self.ln_1 = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| | self.attn = MultiHeadAttention(config)
|
| | self.ln_2 = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| | self.mlp = FeedForward(config)
|
| |
|
| | def forward(
|
| | self,
|
| | x: torch.Tensor,
|
| | attention_mask: Optional[torch.Tensor] = None,
|
| | position_embeds = None,
|
| | ) -> torch.Tensor:
|
| | pass
|
| |
|
| | x = self.ln_1(self.attn(x, attention_mask=attention_mask, position_embeds=position_embeds)) + x
|
| |
|
| | x = self.ln_2(self.mlp(x)) + x
|
| | return x
|
| |
|
| |
|
| |
|
| | class ChessForCausalLM(PreTrainedModel):
|
| | """
|
| | Chess Transformer for Causal Language Modeling (next-move prediction).
|
| |
|
| | This model is designed to predict the next chess move given a sequence
|
| | of previous moves. It uses a GPT-style architecture with:
|
| | - Token embeddings for chess moves
|
| | - Learned positional embeddings
|
| | - Stacked transformer blocks
|
| | - Linear head for next-token prediction
|
| |
|
| | The model supports weight tying between the embedding layer and the
|
| | output projection to save parameters.
|
| |
|
| | Example:
|
| | >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
|
| | >>> model = ChessForCausalLM(config)
|
| | >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
|
| | >>> outputs = model(**inputs)
|
| | >>> next_move_logits = outputs.logits[:, -1, :]
|
| | """
|
| |
|
| | config_class = ChessConfig
|
| | base_model_prefix = "transformer"
|
| | supports_gradient_checkpointing = True
|
| |
|
| | keys_to_ignore_on_load_missing = ["lm_head.weight"]
|
| |
|
| | def __init__(self, config: ChessConfig):
|
| | super().__init__(config)
|
| |
|
| |
|
| | self.wte = nn.Embedding(config.vocab_size, config.n_embd)
|
| |
|
| | self.wpe = LlamaRotaryEmbedding(config)
|
| | self.drop = nn.Dropout(config.dropout)
|
| |
|
| |
|
| | self.h = nn.ModuleList([
|
| | TransformerBlock(config) for _ in range(config.n_layer)
|
| | ])
|
| |
|
| |
|
| | self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
| |
|
| |
|
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
| |
|
| |
|
| | if config.tie_weights:
|
| | self._tied_weights_keys = ["lm_head.weight"]
|
| |
|
| |
|
| | self.post_init()
|
| |
|
| |
|
| | if config.tie_weights:
|
| | self.tie_weights()
|
| |
|
| | def get_input_embeddings(self) -> nn.Module:
|
| | return self.wte
|
| |
|
| | def set_input_embeddings(self, new_embeddings: nn.Module):
|
| | self.wte = new_embeddings
|
| | if getattr(self.config, "tie_weights", False):
|
| | self.tie_weights()
|
| |
|
| | def get_output_embeddings(self) -> nn.Module:
|
| | return self.lm_head
|
| |
|
| | def set_output_embeddings(self, new_embeddings: nn.Module):
|
| | self.lm_head = new_embeddings
|
| |
|
| | def tie_weights(self):
|
| |
|
| | if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
|
| | self._tie_or_clone_weights(self.lm_head, self.wte)
|
| |
|
| | def _init_weights(self, module: nn.Module):
|
| | """Initialize weights following GPT-2 style."""
|
| | if isinstance(module, nn.Linear):
|
| | torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
| | if module.bias is not None:
|
| | torch.nn.init.zeros_(module.bias)
|
| | elif isinstance(module, nn.Embedding):
|
| | torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
| | elif isinstance(module, nn.LayerNorm):
|
| | torch.nn.init.ones_(module.weight)
|
| | torch.nn.init.zeros_(module.bias)
|
| |
|
| | def forward(
|
| | self,
|
| | input_ids: torch.LongTensor,
|
| | attention_mask: Optional[torch.Tensor] = None,
|
| | position_ids: Optional[torch.LongTensor] = None,
|
| | labels: Optional[torch.LongTensor] = None,
|
| | return_dict: Optional[bool] = None,
|
| | **kwargs,
|
| | ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| | """
|
| | Forward pass of the model.
|
| |
|
| | Args:
|
| | input_ids: Token IDs of shape (batch_size, seq_len).
|
| | attention_mask: Attention mask of shape (batch_size, seq_len).
|
| | position_ids: Position IDs of shape (batch_size, seq_len).
|
| | labels: Labels for language modeling loss.
|
| | return_dict: Whether to return a ModelOutput object.
|
| |
|
| | Returns:
|
| | CausalLMOutputWithPast containing loss (if labels provided) and logits.
|
| | """
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| |
|
| | batch_size, seq_len = input_ids.size()
|
| | device = input_ids.device
|
| |
|
| |
|
| | if position_ids is None:
|
| | position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
|
| |
|
| |
|
| | token_embeds = self.wte(input_ids)
|
| | position_embeds = self.wpe(token_embeds, position_ids)
|
| | hidden_states = self.drop(token_embeds)
|
| |
|
| |
|
| | for block in self.h:
|
| | hidden_states = block(hidden_states, attention_mask=attention_mask, position_embeds=position_embeds)
|
| |
|
| |
|
| | hidden_states = self.ln_f(hidden_states)
|
| |
|
| |
|
| | logits = self.lm_head(hidden_states)
|
| |
|
| |
|
| | loss = None
|
| | if labels is not None:
|
| |
|
| | shift_logits = logits[..., :-1, :].contiguous()
|
| | shift_labels = labels[..., 1:].contiguous()
|
| |
|
| |
|
| | loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
|
| |
|
| | loss = loss_fct(
|
| | shift_logits.view(-1, shift_logits.size(-1)),
|
| | shift_labels.view(-1),
|
| | )
|
| |
|
| | if not return_dict:
|
| | output = (logits,)
|
| | return ((loss,) + output) if loss is not None else output
|
| |
|
| | return CausalLMOutputWithPast(
|
| | loss=loss,
|
| | logits=logits,
|
| | past_key_values=None,
|
| | hidden_states=None,
|
| | attentions=None,
|
| | )
|
| |
|
| | @torch.no_grad()
|
| | def generate_move(
|
| | self,
|
| | input_ids: torch.LongTensor,
|
| | temperature: float = 1.0,
|
| | top_k: Optional[int] = None,
|
| | top_p: Optional[float] = None,
|
| | ) -> int:
|
| | """
|
| | Generate the next move given a sequence of moves.
|
| |
|
| | Args:
|
| | input_ids: Token IDs of shape (1, seq_len).
|
| | temperature: Sampling temperature (1.0 = no change).
|
| | top_k: If set, only sample from top k tokens.
|
| | top_p: If set, use nucleus sampling with this threshold.
|
| |
|
| | Returns:
|
| | The token ID of the predicted next move.
|
| | """
|
| | self.eval()
|
| |
|
| |
|
| | outputs = self(input_ids)
|
| | logits = outputs.logits[:, -1, :] / temperature
|
| |
|
| |
|
| | if top_k is not None:
|
| | indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
| | logits[indices_to_remove] = float("-inf")
|
| |
|
| |
|
| | if top_p is not None:
|
| | sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
| | cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
| |
|
| |
|
| | sorted_indices_to_remove = cumulative_probs > top_p
|
| | sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
| | sorted_indices_to_remove[..., 0] = 0
|
| |
|
| | indices_to_remove = sorted_indices_to_remove.scatter(
|
| | dim=-1, index=sorted_indices, src=sorted_indices_to_remove
|
| | )
|
| | logits[indices_to_remove] = float("-inf")
|
| |
|
| |
|
| | probs = F.softmax(logits, dim=-1)
|
| | next_token = torch.multinomial(probs, num_samples=1)
|
| |
|
| | return next_token.item()
|
| |
|
| |
|
| |
|
| | from transformers import AutoConfig, AutoModelForCausalLM
|
| |
|
| | AutoConfig.register("chess_transformer", ChessConfig)
|
| | AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)
|
| |
|