| | """ |
| | Custom Chess Tokenizer for the Chess Challenge. |
| | |
| | This tokenizer tokenizes each move into 4 tokens using the extended UCI notation |
| | from the Lichess dataset (e.g., WPe2e4, BNg8f6). |
| | |
| | 4-token scheme per move: |
| | 1) Side: W / B |
| | 2) Piece: P/N/B/R/Q/K |
| | 3) Source square: e2 |
| | 4) Destination square + any suffix (capture/check/mate/promo/castling markers) |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import json |
| | import os |
| | import re |
| | from typing import Dict, List, Optional, Tuple |
| |
|
| | from transformers import PreTrainedTokenizer |
| |
|
| |
|
| | class ChessTokenizer(PreTrainedTokenizer): |
| | """ |
| | A custom tokenizer for chess moves using extended UCI notation. |
| | |
| | It splits each move into 4 tokens and builds a vocabulary from the dataset |
| | so that training-time tokens have IDs. |
| | |
| | Example move: |
| | WPe2e4 -> ["W", "P", "e2", "e4"] |
| | BNg8f6 -> ["B", "N", "g8", "f6"] |
| | WPe7e8=Q -> ["W", "P", "e7", "e8=Q"] (promotion kept in 4th token) |
| | WKe1g1(O) -> ["W", "K", "e1", "g1(O)"] (suffix kept in 4th token) |
| | """ |
| |
|
| | model_input_names = ["input_ids", "attention_mask"] |
| | vocab_files_names = {"vocab_file": "vocab.json"} |
| |
|
| | |
| | PAD_TOKEN = "[PAD]" |
| | BOS_TOKEN = "[BOS]" |
| | EOS_TOKEN = "[EOS]" |
| | UNK_TOKEN = "[UNK]" |
| |
|
| | |
| | |
| | MOVE_RE = re.compile(r"^([WB])([PNBRQK])([a-h][1-8])([a-h][1-8])(.*)$") |
| |
|
| | def __init__( |
| | self, |
| | vocab_file: Optional[str] = None, |
| | vocab: Optional[Dict[str, int]] = None, |
| | **kwargs, |
| | ): |
| | |
| | |
| | kwargs.pop("pad_token", None) |
| | kwargs.pop("bos_token", None) |
| | kwargs.pop("eos_token", None) |
| | kwargs.pop("unk_token", None) |
| |
|
| | |
| | if vocab is not None: |
| | self._vocab = dict(vocab) |
| | elif vocab_file is not None and os.path.exists(vocab_file): |
| | with open(vocab_file, "r", encoding="utf-8") as f: |
| | self._vocab = json.load(f) |
| | else: |
| | self._vocab = self._create_default_vocab() |
| |
|
| | |
| | self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
| |
|
| | |
| | super().__init__( |
| | pad_token=self.PAD_TOKEN, |
| | bos_token=self.BOS_TOKEN, |
| | eos_token=self.EOS_TOKEN, |
| | unk_token=self.UNK_TOKEN, |
| | **kwargs, |
| | ) |
| |
|
| | |
| | for tok in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]: |
| | if tok not in self._vocab: |
| | raise ValueError(f"Special token {tok} missing from vocab.") |
| |
|
| | def _create_default_vocab(self) -> Dict[str, int]: |
| | special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] |
| | return {token: idx for idx, token in enumerate(special_tokens)} |
| |
|
| | @classmethod |
| | def _move_to_4tokens(cls, move: str) -> List[str]: |
| | """ |
| | Convert a move string into exactly 4 subtokens. |
| | If parsing fails, returns 4x UNK_TOKEN. |
| | """ |
| | m = cls.MOVE_RE.match(move) |
| | if not m: |
| | return [cls.UNK_TOKEN, cls.UNK_TOKEN, cls.UNK_TOKEN, cls.UNK_TOKEN] |
| | side, piece, src, dst, suffix = m.groups() |
| | return [side, piece, src, dst + (suffix or "")] |
| |
|
| | @classmethod |
| | def build_vocab_from_iterator( |
| | cls, |
| | iterator, |
| | min_frequency: int = 1, |
| | ) -> "ChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from an iterator of game strings. |
| | |
| | IMPORTANT: since we tokenize each move into 4 tokens, we must count |
| | those subtokens here (not the raw full move strings). |
| | """ |
| | from collections import Counter |
| |
|
| | token_counts = Counter() |
| |
|
| | for game in iterator: |
| | for move in str(game).strip().split(): |
| | subtokens = cls._move_to_4tokens(move) |
| | token_counts.update(subtokens) |
| |
|
| | |
| | tokens = [tok for tok, count in token_counts.items() if count >= min_frequency] |
| |
|
| | |
| | tokens = sorted(tokens) |
| |
|
| | |
| | special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] |
| | tokens = [t for t in tokens if t not in set(special_tokens)] |
| | vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)} |
| |
|
| | return cls(vocab=vocab) |
| |
|
| | @classmethod |
| | def build_vocab_from_dataset( |
| | cls, |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | min_frequency: int = 500, |
| | max_samples: Optional[int] = 100000, |
| | ) -> "ChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from a Hugging Face dataset. |
| | """ |
| | from datasets import load_dataset |
| |
|
| | dataset = load_dataset(dataset_name, split=split) |
| |
|
| | if max_samples is not None: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| |
|
| | def game_iterator(): |
| | for example in dataset: |
| | yield example[column] |
| |
|
| | return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency) |
| |
|
| | @property |
| | def vocab_size(self) -> int: |
| | return len(self._vocab) |
| |
|
| | def get_vocab(self) -> Dict[str, int]: |
| | return dict(self._vocab) |
| |
|
| | def _tokenize(self, text: str) -> List[str]: |
| | """ |
| | Tokenize a space-separated game string into a flat list of subtokens, |
| | using exactly 4 tokens per move. |
| | """ |
| | out: List[str] = [] |
| | for move in str(text).strip().split(): |
| | out.extend(self._move_to_4tokens(move)) |
| | return out |
| |
|
| | def _convert_token_to_id(self, token: str) -> int: |
| | |
| | return self._vocab.get(token, self.unk_token_id) |
| |
|
| | def _convert_id_to_token(self, index: int) -> str: |
| | return self._ids_to_tokens.get(index, self.unk_token) |
| |
|
| | def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| | """ |
| | Convert tokens back to a string (space-separated). |
| | We drop PAD/BOS/EOS; keep UNK for debugging. |
| | """ |
| | drop = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN} |
| | return " ".join(t for t in tokens if t not in drop) |
| |
|
| | def save_vocabulary( |
| | self, |
| | save_directory: str, |
| | filename_prefix: Optional[str] = None, |
| | ) -> Tuple[str]: |
| | if not os.path.isdir(save_directory): |
| | os.makedirs(save_directory, exist_ok=True) |
| |
|
| | vocab_file = os.path.join( |
| | save_directory, |
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| | ) |
| |
|
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| |
|
| | return (vocab_file,) |
| |
|
| |
|
| | def count_vocab_from_dataset( |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | max_samples: Optional[int] = 10000, |
| | ) -> Dict[str, int]: |
| | """ |
| | Count token frequencies in a dataset. |
| | |
| | NOTE: This counts the 4-subtoken scheme (not whole moves). |
| | """ |
| | from collections import Counter |
| | from datasets import load_dataset |
| |
|
| | dataset = load_dataset(dataset_name, split=split) |
| |
|
| | if max_samples is not None: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| |
|
| | token_counts = Counter() |
| |
|
| | for example in dataset: |
| | for move in str(example[column]).strip().split(): |
| | token_counts.update(ChessTokenizer._move_to_4tokens(move)) |
| |
|
| | return dict(token_counts) |
| |
|