chess_MaximeMuh / model.py
MaximeMuhlethaler's picture
Chess Challenge submission by MaximeMuhlethaler
f98ff7a verified
"""
Chess Transformer Model - The "Nuclear Patch" Edition
"""
from __future__ import annotations
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import PretrainedConfig, PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithPast
class ChessConfig(PretrainedConfig):
model_type = "chess_transformer"
def __init__(
self,
vocab_size=1200,
n_embd=128,
n_layer=6,
n_head=4,
n_ctx=256,
n_inner=None,
dropout=0.1,
layer_norm_epsilon=1e-5,
tie_weights=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
unk_token_id=3,
**kwargs,
):
self.vocab_size = vocab_size
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_ctx = n_ctx
self.n_inner = n_inner if n_inner is not None else 3 * n_embd
self.dropout = dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.tie_weights = tie_weights
# On passe les IDs vitaux à kwargs pour le parent
kwargs["pad_token_id"] = pad_token_id
kwargs["bos_token_id"] = bos_token_id
kwargs["eos_token_id"] = eos_token_id
kwargs["unk_token_id"] = unk_token_id
super().__init__(**kwargs)
class MultiHeadAttention(nn.Module):
def __init__(self, config: ChessConfig):
super().__init__()
self.n_head = config.n_head
self.n_embd = config.n_embd
self.head_dim = config.n_embd // config.n_head
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
self.c_proj = nn.Linear(config.n_embd, config.n_embd)
self.dropout = nn.Dropout(config.dropout)
self.register_buffer("bias", torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(1, 1, config.n_ctx, config.n_ctx), persistent=False)
def forward(self, x, attention_mask=None):
B, T, C = x.size()
qkv = self.c_attn(x)
q, k, v = qkv.split(self.n_embd, dim=2)
q = q.view(B, T, self.n_head, self.head_dim).transpose(1, 2)
k = k.view(B, T, self.n_head, self.head_dim).transpose(1, 2)
v = v.view(B, T, self.n_head, self.head_dim).transpose(1, 2)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
if attention_mask is not None:
att = att.masked_fill(attention_mask.view(B, 1, 1, T) == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.dropout(att)
y = att @ v
y = y.transpose(1, 2).contiguous().view(B, T, C)
return self.c_proj(y)
class FeedForward(nn.Module):
def __init__(self, config: ChessConfig):
super().__init__()
self.c_fc = nn.Linear(config.n_embd, config.n_inner)
self.c_proj = nn.Linear(config.n_inner, config.n_embd)
self.dropout = nn.Dropout(config.dropout)
def forward(self, x):
return self.dropout(self.c_proj(F.gelu(self.c_fc(x))))
class TransformerBlock(nn.Module):
def __init__(self, config: ChessConfig):
super().__init__()
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.attn = MultiHeadAttention(config)
self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.mlp = FeedForward(config)
def forward(self, x, attention_mask=None):
x = x + self.attn(self.ln_1(x), attention_mask)
x = x + self.mlp(self.ln_2(x))
return x
class ChessForCausalLM(PreTrainedModel):
config_class = ChessConfig
base_model_prefix = "transformer"
def __init__(self, config: ChessConfig):
super().__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
self.drop = nn.Dropout(config.dropout)
self.h = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
if config.tie_weights: self.post_init()
def get_input_embeddings(self): return self.wte
def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings
def get_output_embeddings(self): return self.lm_head
def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings
def forward(self, input_ids, attention_mask=None, position_ids=None, labels=None, return_dict=None, **kwargs):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if return_dict is None: return_dict = True
device = input_ids.device
b, t = input_ids.size()
if position_ids is None: position_ids = torch.arange(t, device=device).unsqueeze(0)
x = self.wte(input_ids) + self.wpe(position_ids)
x = self.drop(x)
for block in self.h: x = block(x, attention_mask)
x = self.ln_f(x)
logits = self.lm_head(x)
if labels is None:
nuclear_bad_ids = [0, 1, 2, 3]
logits[:, :, nuclear_bad_ids] = float("-inf")
loss = None
if labels is not None:
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
return ((loss,) + (logits,)) if loss is not None else (logits,)
return CausalLMOutputWithPast(loss=loss, logits=logits)
from transformers import AutoConfig, AutoModelForCausalLM
AutoConfig.register("chess_transformer", ChessConfig)
AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)