chess-stockbird2 / model.py
bouhss's picture
Chess Challenge submission by bouhss
663d8ea verified
"""
Chess Transformer Model for the Chess Challenge.
Modern small-LLM upgrades:
- RoPE (rotary positional embeddings): no learned positional embeddings needed
- RMSNorm (optional, default True)
- SwiGLU MLP (optional, default True)
- Weight tying (default True)
- Safe loss ignore_index = -100 (HF convention)
"""
from __future__ import annotations
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import PretrainedConfig, PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithPast
class ChessConfig(PretrainedConfig):
model_type = "chess_transformer"
def __init__(
self,
vocab_size: int = 1200,
# Architecture (defaults tuned to be < 1M params for common vocabs)
n_embd: int = 112,
n_layer: int = 7,
n_head: int = 7,
# Context window
n_ctx: int = 512,
# MLP hidden size:
# - if mlp_type="swiglu", this is SwiGLU hidden size h
# - if mlp_type="gelu", this is FFN inner size
n_inner: Optional[int] = 192,
dropout: float = 0.05,
layer_norm_epsilon: float = 1e-6,
# Position encoding
use_rope: bool = True,
rope_theta: float = 10000.0,
# Normalization / MLP type
use_rmsnorm: bool = True,
mlp_type: str = "swiglu", # "swiglu" or "gelu"
# Weight tying
tie_weights: bool = True,
pad_token_id: int = 0,
bos_token_id: int = 1,
eos_token_id: int = 2,
**kwargs,
):
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
if n_embd % n_head != 0:
raise ValueError(f"n_embd ({n_embd}) must be divisible by n_head ({n_head})")
head_dim = n_embd // n_head
if use_rope and (head_dim % 2 != 0):
raise ValueError(
f"RoPE requires even head_dim, got head_dim={head_dim}. "
f"Choose n_embd/n_head even."
)
self.vocab_size = vocab_size
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_ctx = n_ctx
self.n_inner = n_inner if n_inner is not None else (2 * n_embd)
self.dropout = dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.use_rope = use_rope
self.rope_theta = rope_theta
self.use_rmsnorm = use_rmsnorm
self.mlp_type = mlp_type
self.tie_weights = tie_weights
# HF uses this field for embedding tying behavior
self.tie_word_embeddings = bool(tie_weights)
class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
norm = torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps)
return x * norm * self.weight
def rotate_half(x: torch.Tensor) -> torch.Tensor:
x1 = x[..., 0::2]
x2 = x[..., 1::2]
out = torch.empty_like(x)
out[..., 0::2] = -x2
out[..., 1::2] = x1
return out
class RotaryEmbedding(nn.Module):
"""
RoPE cache builder. Applies RoPE to q,k with shape (B,H,T,D).
"""
def __init__(self, head_dim: int, theta: float = 10000.0):
super().__init__()
if head_dim % 2 != 0:
raise ValueError(f"RoPE requires even head_dim, got {head_dim}")
inv_freq = 1.0 / (theta ** (torch.arange(0, head_dim, 2).float() / head_dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
self._cos_cached = None
self._sin_cached = None
self._seq_len_cached = 0
self._device_cached = None
self._dtype_cached = None
def _build_cache(self, seq_len: int, device: torch.device, dtype: torch.dtype):
t = torch.arange(seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq) # (T, D/2)
cos = freqs.cos().to(dtype=dtype)
sin = freqs.sin().to(dtype=dtype)
self._cos_cached = cos
self._sin_cached = sin
self._seq_len_cached = seq_len
self._device_cached = device
self._dtype_cached = dtype
def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# q,k: (B,H,T,D)
T = q.size(-2)
device = q.device
dtype = q.dtype
if (
self._cos_cached is None
or T > self._seq_len_cached
or device != self._device_cached
or dtype != self._dtype_cached
):
self._build_cache(T, device, dtype)
cos = self._cos_cached[:T] # (T, D/2)
sin = self._sin_cached[:T] # (T, D/2)
# broadcast to (1,1,T,D) via repeat_interleave on last dim
cos = torch.repeat_interleave(cos.unsqueeze(0).unsqueeze(0), 2, dim=-1)
sin = torch.repeat_interleave(sin.unsqueeze(0).unsqueeze(0), 2, dim=-1)
q_out = (q * cos) + (rotate_half(q) * sin)
k_out = (k * cos) + (rotate_half(k) * sin)
return q_out, k_out
class MultiHeadAttention(nn.Module):
def __init__(self, config: ChessConfig):
super().__init__()
self.n_head = config.n_head
self.n_embd = config.n_embd
self.head_dim = config.n_embd // config.n_head
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
self.c_proj = nn.Linear(config.n_embd, config.n_embd)
self.dropout = nn.Dropout(config.dropout)
self.use_rope = bool(config.use_rope)
self.rope = RotaryEmbedding(self.head_dim, theta=config.rope_theta) if self.use_rope else None
# causal mask buffer (expandable)
self.register_buffer(
"bias",
torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(1, 1, config.n_ctx, config.n_ctx),
persistent=False,
)
def _ensure_causal_mask(self, seq_len: int, device: torch.device, dtype: torch.dtype):
if self.bias.size(-1) >= seq_len and self.bias.device == device:
return
self.bias = torch.tril(torch.ones(seq_len, seq_len, device=device, dtype=dtype)).view(1, 1, seq_len, seq_len)
def forward(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
B, T, _ = x.size()
qkv = self.c_attn(x)
q, k, v = qkv.split(self.n_embd, dim=2)
q = q.view(B, T, self.n_head, self.head_dim).transpose(1, 2) # (B,H,T,D)
k = k.view(B, T, self.n_head, self.head_dim).transpose(1, 2)
v = v.view(B, T, self.n_head, self.head_dim).transpose(1, 2)
if self.use_rope:
q, k = self.rope(q, k)
attn = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
self._ensure_causal_mask(T, attn.device, attn.dtype)
causal_mask = self.bias[:, :, :T, :T]
mask_value = torch.finfo(attn.dtype).min
attn = attn.masked_fill(causal_mask == 0, mask_value)
# padding mask (1=keep, 0=mask)
if attention_mask is not None:
am = attention_mask.unsqueeze(1).unsqueeze(2) # (B,1,1,T)
attn = attn.masked_fill(am == 0, mask_value)
attn = F.softmax(attn, dim=-1)
attn = self.dropout(attn)
y = torch.matmul(attn, v) # (B,H,T,D)
y = y.transpose(1, 2).contiguous().view(B, T, self.n_embd)
y = self.c_proj(y)
y = self.dropout(y)
return y
class SwiGLU(nn.Module):
def __init__(self, config: ChessConfig):
super().__init__()
h = config.n_inner
self.w12 = nn.Linear(config.n_embd, 2 * h)
self.w3 = nn.Linear(h, config.n_embd)
self.dropout = nn.Dropout(config.dropout)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x12 = self.w12(x)
x1, x2 = x12.chunk(2, dim=-1)
x = F.silu(x1) * x2
x = self.w3(x)
x = self.dropout(x)
return x
class FeedForwardGELU(nn.Module):
def __init__(self, config: ChessConfig):
super().__init__()
self.c_fc = nn.Linear(config.n_embd, config.n_inner)
self.c_proj = nn.Linear(config.n_inner, config.n_embd)
self.dropout = nn.Dropout(config.dropout)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.c_fc(x)
x = F.gelu(x)
x = self.c_proj(x)
x = self.dropout(x)
return x
class TransformerBlock(nn.Module):
def __init__(self, config: ChessConfig):
super().__init__()
if config.use_rmsnorm:
self.ln_1 = RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.ln_2 = RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
else:
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.attn = MultiHeadAttention(config)
if config.mlp_type.lower() == "swiglu":
self.mlp = SwiGLU(config)
else:
self.mlp = FeedForwardGELU(config)
def forward(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
x = x + self.mlp(self.ln_2(x))
return x
class ChessForCausalLM(PreTrainedModel):
config_class = ChessConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
keys_to_ignore_on_load_missing = ["lm_head.weight"]
_no_split_modules = ["TransformerBlock"]
def __init__(self, config: ChessConfig):
super().__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
# learned positional embeddings only if RoPE disabled
self.wpe = None
if not config.use_rope:
self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
self.drop = nn.Dropout(config.dropout)
self.h = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layer)])
if config.use_rmsnorm:
self.ln_f = RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
else:
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
if config.tie_weights:
self._tied_weights_keys = ["lm_head.weight"]
self.post_init()
if config.tie_weights:
self.tie_weights()
def get_input_embeddings(self) -> nn.Module:
return self.wte
def set_input_embeddings(self, new_embeddings: nn.Module):
self.wte = new_embeddings
if getattr(self.config, "tie_weights", False):
self.tie_weights()
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def set_output_embeddings(self, new_embeddings: nn.Module):
self.lm_head = new_embeddings
def tie_weights(self):
if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
self._tie_or_clone_weights(self.lm_head, self.wte)
def _init_weights(self, module: nn.Module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Tuple, CausalLMOutputWithPast]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
B, T = input_ids.size()
device = input_ids.device
x = self.wte(input_ids)
if self.wpe is not None:
if position_ids is None:
position_ids = torch.arange(T, device=device).unsqueeze(0).expand(B, -1)
x = x + self.wpe(position_ids)
x = self.drop(x)
for block in self.h:
x = block(x, attention_mask=attention_mask)
x = self.ln_f(x)
logits = self.lm_head(x)
loss = None
if labels is not None:
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(
shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1),
)
if not return_dict:
output = (logits,)
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
@torch.no_grad()
def generate_move(
self,
input_ids: torch.LongTensor,
temperature: float = 0.7,
top_k: Optional[int] = 50,
top_p: Optional[float] = None,
) -> int:
self.eval()
outputs = self(input_ids)
logits = outputs.logits[:, -1, :] / max(float(temperature), 1e-6)
if top_k is not None and top_k > 0:
k = min(int(top_k), logits.size(-1))
thresh = torch.topk(logits, k)[0][..., -1, None]
logits = logits.masked_fill(logits < thresh, torch.finfo(logits.dtype).min)
if top_p is not None:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
probs = F.softmax(sorted_logits, dim=-1)
cum = torch.cumsum(probs, dim=-1)
to_remove = cum > float(top_p)
to_remove[..., 1:] = to_remove[..., :-1].clone()
to_remove[..., 0] = 0
indices_to_remove = to_remove.scatter(dim=-1, index=sorted_indices, src=to_remove)
logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min)
probs = F.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
return int(next_token.item())
# Register the model with Auto classes
from transformers import AutoConfig, AutoModelForCausalLM
AutoConfig.register("chess_transformer", ChessConfig)
AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)