chess-steph-v3 / tokenizer.py
stephecw's picture
Chess Challenge submission by stephecw
7586334 verified
"""
Custom Chess Tokenizer for the Chess Challenge.
This tokenizer treats each move as a single token using the extended UCI notation
from the Lichess dataset (e.g., WPe2e4, BNg8f6).
The dataset format uses:
- W/B prefix for White/Black
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
- Source and destination squares (e.g., e2e4)
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
"""
from __future__ import annotations
import json
import os
import re
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
SQUARE_RE = re.compile(r"[a-h][1-8]")
UCI_PROMO_RE = re.compile(r"^[a-h][1-8][a-h][1-8]([qrbn])$", re.IGNORECASE)
EQ_PROMO_RE = re.compile(r"=([QRBNqrbn])")
PAREN_PROMO_RE = re.compile(r"\(([QRBNqrbn])\)")
PROMOS = {"q", "r", "b", "n"}
class ChessTokenizer(PreTrainedTokenizer):
vocab_files_names = {"vocab_file": "vocab.json"}
model_input_names = ["input_ids", "attention_mask"]
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
self._vocab = self._create_fixed_vocab()
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _create_fixed_vocab(self) -> Dict[str, int]:
specials = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
# IMPORTANT: deterministic ids matching a1,a2,...,a8,b1,... style
squares = [f"{f}{r}" for f in "abcdefgh" for r in "12345678"]
promos = ["q", "r", "b", "n"]
tokens = specials + squares + promos
return {tok: i for i, tok in enumerate(tokens)}
@property
def vocab_size(self) -> int:
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
return dict(self._vocab)
def _extract_promo_anywhere(self, mv: str) -> Optional[str]:
m = EQ_PROMO_RE.search(mv)
if m:
return m.group(1).lower()
m = PAREN_PROMO_RE.search(mv)
if m:
return m.group(1).lower()
m = UCI_PROMO_RE.match(mv)
if m:
return m.group(1).lower()
return None
def _tokenize(self, text: str) -> List[str]:
"""
Robust tokenization:
- keeps special tokens ([BOS], etc.) as-is (HF handles them)
- accepts already-split squares: "e2 e4"
- accepts uci concat: "e2e4" -> e2,e4 (+promo)
- accepts verbose tokens containing squares: "WPe2e4(x+)" -> e2,e4 (+promo)
"""
tokens: List[str] = []
for chunk in text.strip().split():
# already-split square?
if re.fullmatch(r"[a-h][1-8]", chunk):
tokens.append(chunk)
continue
# promo alone?
if chunk in PROMOS:
tokens.append(chunk)
continue
# otherwise: extract squares from inside
squares = SQUARE_RE.findall(chunk)
if len(squares) >= 2:
tokens.append(squares[0])
tokens.append(squares[1])
promo = self._extract_promo_anywhere(chunk)
if promo in PROMOS:
tokens.append(promo)
else:
# allow special tokens to pass through if present
if chunk in {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}:
tokens.append(chunk)
else:
tokens.append(self.UNK_TOKEN)
return tokens
def _convert_token_to_id(self, token: str) -> int:
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
def _convert_id_to_token(self, index: int) -> str:
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Reconstruct "e2e4 e7e8q ..."
"""
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
clean = [t for t in tokens if t not in special]
moves: List[str] = []
i = 0
while i < len(clean):
if re.fullmatch(r"[a-h][1-8]", clean[i]) and i + 1 < len(clean) and re.fullmatch(r"[a-h][1-8]", clean[i + 1]):
mv = clean[i] + clean[i + 1]
i += 2
if i < len(clean) and clean[i] in PROMOS:
mv += clean[i]
i += 1
moves.append(mv)
else:
moves.append(clean[i])
i += 1
return " ".join(moves)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)
from transformers import AutoTokenizer
ChessTokenizer.register_for_auto_class("AutoTokenizer")