""" Custom Chess Tokenizer for the Chess Challenge. This tokenizer treats each move as a single token using the extended UCI notation from the Lichess dataset (e.g., WPe2e4, BNg8f6). The dataset format uses: - W/B prefix for White/Black - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King - Source and destination squares (e.g., e2e4) - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling """ from __future__ import annotations import json import os import re from pathlib import Path from typing import Dict, List, Optional from transformers import PreTrainedTokenizer MOVE_RE = re.compile( r"^(?P[WB])(?P[PNBRQK])" r"(?P[a-h][1-8])(?P[a-h][1-8])" r"(?P=([QRBN]))?(?P\\([^()]+\\))?$" ) SUFFIX_TOKENS = ["(x)", "(+)", "(+*)", "(x+)", "(x+*)", "(o)", "(O)"] PROMOTION_TOKENS = ["=Q", "=R", "=B", "=N"] PIECE_TOKENS = [ "WP", "WN", "WB", "WR", "WQ", "WK", "BP", "BN", "BB", "BR", "BQ", "BK", ] class ChessTokenizer(PreTrainedTokenizer): """ A custom tokenizer for chess moves using extended UCI notation. This tokenizer maps each possible chess move to a unique token ID. The vocabulary is built from the training dataset to ensure all moves encountered during training have a corresponding token. Example: >>> tokenizer = ChessTokenizer() >>> tokenizer.encode("WPe2e4 BPe7e5") [1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS] """ model_input_names = ["input_ids", "attention_mask"] vocab_files_names = {"vocab_file": "vocab.json"} # Special tokens PAD_TOKEN = "[PAD]" BOS_TOKEN = "[BOS]" EOS_TOKEN = "[EOS]" UNK_TOKEN = "[UNK]" def __init__( self, vocab_file: Optional[str] = None, vocab: Optional[Dict[str, int]] = None, tokenization_mode: str = "move", **kwargs, ): """ Initialize the chess tokenizer. Args: vocab_file: Path to a JSON file containing the vocabulary mapping. vocab: Dictionary mapping tokens to IDs (alternative to vocab_file). **kwargs: Additional arguments passed to PreTrainedTokenizer. """ # Initialize special tokens self._pad_token = self.PAD_TOKEN self._bos_token = self.BOS_TOKEN self._eos_token = self.EOS_TOKEN self._unk_token = self.UNK_TOKEN # Remove any duplicate special-token entries passed through kwargs # to avoid "multiple values for keyword" errors when loading from disk. kwargs.pop("pad_token", None) kwargs.pop("bos_token", None) kwargs.pop("eos_token", None) kwargs.pop("unk_token", None) # Tokenization mode: # - "move": one token per move (extended UCI) # - "move+suffix": split suffixes into separate tokens # - "factorized": piece/color + from + to (+ promotion/suffix) self.tokenization_mode = tokenization_mode if self.tokenization_mode not in {"move", "move+suffix", "factorized"}: raise ValueError(f"Unknown tokenization_mode: {self.tokenization_mode}") # Load or create vocabulary if vocab is not None: self._vocab = vocab elif vocab_file is not None and os.path.exists(vocab_file): with open(vocab_file, "r", encoding="utf-8") as f: self._vocab = json.load(f) else: # Create a minimal vocabulary with just special tokens # The full vocabulary should be built from the dataset self._vocab = self._create_default_vocab() # Create reverse mapping self._ids_to_tokens = {v: k for k, v in self._vocab.items()} # Call parent init AFTER setting up vocab super().__init__( pad_token=self._pad_token, bos_token=self._bos_token, eos_token=self._eos_token, unk_token=self._unk_token, tokenization_mode=self.tokenization_mode, **kwargs, ) def _create_default_vocab(self) -> Dict[str, int]: """ Create a minimal default vocabulary with just special tokens. For the full vocabulary, use `build_vocab_from_dataset()`. This minimal vocab is just a placeholder - you should build from data. """ special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] vocab = {token: idx for idx, token in enumerate(special_tokens)} return vocab @classmethod def _build_factorized_vocab(cls) -> Dict[str, int]: special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] squares = [f"{file}{rank}" for file in "abcdefgh" for rank in "12345678"] from_tokens = [f"{sq}_f" for sq in squares] to_tokens = [f"{sq}_t" for sq in squares] vocab_tokens = ( PIECE_TOKENS + from_tokens + to_tokens + PROMOTION_TOKENS + SUFFIX_TOKENS ) vocab = {token: idx for idx, token in enumerate(special_tokens + vocab_tokens)} return vocab @classmethod def _tokenize_move(cls, move: str, tokenization_mode: str) -> List[str]: if tokenization_mode == "move": return [move] match = MOVE_RE.match(move) if not match: return [move] color = match.group("color") piece = match.group("piece") from_sq = match.group("from_sq") to_sq = match.group("to_sq") promo = match.group("promo") or "" suffix = match.group("suffix") base = f"{color}{piece}{from_sq}{to_sq}{promo}" if tokenization_mode == "move+suffix": tokens = [base] if suffix: tokens.append(suffix) return tokens if tokenization_mode == "factorized": tokens = [f"{color}{piece}", f"{from_sq}_f", f"{to_sq}_t"] if promo: tokens.append(promo) if suffix: tokens.append(suffix) return tokens return [move] @classmethod def _tokenize_text(cls, text: str, tokenization_mode: str) -> List[str]: tokens: List[str] = [] for move in text.strip().split(): tokens.extend(cls._tokenize_move(move, tokenization_mode)) return tokens @classmethod def build_vocab_from_iterator( cls, iterator, min_frequency: int = 1, max_vocab_size: Optional[int] = None, tokenization_mode: str = "move", ) -> "ChessTokenizer": """ Build a tokenizer vocabulary from an iterator of game strings. Args: iterator: An iterator yielding game strings (space-separated moves). min_frequency: Minimum frequency for a token to be included. Returns: A ChessTokenizer with the built vocabulary. """ from collections import Counter if tokenization_mode == "factorized": return cls(vocab=cls._build_factorized_vocab(), tokenization_mode=tokenization_mode) token_counts = Counter() for game in iterator: tokens = cls._tokenize_text(game, tokenization_mode) token_counts.update(tokens) # Filter by frequency tokens = [token for token, count in token_counts.items() if count >= min_frequency] if max_vocab_size is not None and len(tokens) > max_vocab_size: # Keep the most frequent tokens, tie-broken by token string tokens = sorted(tokens, key=lambda t: (-token_counts[t], t))[:max_vocab_size] else: tokens = sorted(tokens) if tokenization_mode == "move+suffix": for token in SUFFIX_TOKENS: if token not in tokens: tokens.append(token) # Build vocabulary special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)} return cls(vocab=vocab, tokenization_mode=tokenization_mode) @classmethod def build_vocab_from_dataset( cls, dataset_name: str = "dlouapre/lichess_2025-01_1M", split: str = "train", column: str = "text", min_frequency: int = 500, max_vocab_size: Optional[int] = None, max_samples: Optional[int] = 100000, tokenization_mode: str = "move", ) -> "ChessTokenizer": """ Build a tokenizer vocabulary from a Hugging Face dataset. Args: dataset_name: Name of the dataset on Hugging Face Hub. split: Dataset split to use. column: Column containing the game strings. min_frequency: Minimum frequency for a token to be included (default: 500). max_samples: Maximum number of samples to process (default: 100k). Returns: A ChessTokenizer with the built vocabulary. """ if tokenization_mode == "factorized": return cls(vocab=cls._build_factorized_vocab(), tokenization_mode=tokenization_mode) from datasets import load_dataset dataset = load_dataset(dataset_name, split=split) if max_samples is not None: dataset = dataset.select(range(min(max_samples, len(dataset)))) def game_iterator(): for example in dataset: yield example[column] return cls.build_vocab_from_iterator( game_iterator(), min_frequency=min_frequency, max_vocab_size=max_vocab_size, tokenization_mode=tokenization_mode, ) @property def vocab_size(self) -> int: """Return the size of the vocabulary.""" return len(self._vocab) def get_vocab(self) -> Dict[str, int]: """Return the vocabulary as a dictionary.""" return dict(self._vocab) def _tokenize(self, text: str) -> List[str]: """ Tokenize a string of moves into a list of tokens. Args: text: A string of space-separated moves. Returns: List of move tokens. """ return self._tokenize_text(text, self.tokenization_mode) def _convert_token_to_id(self, token: str) -> int: """Convert a token to its ID.""" return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) def _convert_id_to_token(self, index: int) -> str: """Convert an ID to its token.""" return self._ids_to_tokens.get(index, self.UNK_TOKEN) def convert_tokens_to_string(self, tokens: List[str]) -> str: """Convert a list of tokens back to a string.""" # Filter out special tokens for cleaner output special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} return " ".join(t for t in tokens if t not in special) def save_vocabulary( self, save_directory: str, filename_prefix: Optional[str] = None, ) -> tuple: """ Save the vocabulary to a JSON file. Args: save_directory: Directory to save the vocabulary. filename_prefix: Optional prefix for the filename. Returns: Tuple containing the path to the saved vocabulary file. """ if not os.path.isdir(save_directory): os.makedirs(save_directory, exist_ok=True) vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json", ) with open(vocab_file, "w", encoding="utf-8") as f: json.dump(self._vocab, f, ensure_ascii=False, indent=2) return (vocab_file,) def count_vocab_from_dataset( dataset_name: str = "dlouapre/lichess_2025-01_1M", split: str = "train", column: str = "text", max_samples: Optional[int] = 10000, ) -> Dict[str, int]: """ Count token frequencies in a dataset (useful for vocabulary analysis). Args: dataset_name: Name of the dataset on Hugging Face Hub. split: Dataset split to use. column: Column containing the game strings. max_samples: Maximum number of samples to process. Returns: Dictionary mapping tokens to their frequencies. """ from collections import Counter from datasets import load_dataset dataset = load_dataset(dataset_name, split=split) if max_samples is not None: dataset = dataset.select(range(min(max_samples, len(dataset)))) token_counts = Counter() for example in dataset: moves = example[column].strip().split() token_counts.update(moves) return dict(token_counts)