| | """ |
| | Custom Chess Tokenizer for the Chess Challenge (Improved Version). |
| | |
| | This tokenizer uses an atomic approach: each move is decomposed into component tokens: |
| | - Side: [W] or [B] |
| | - Piece: [P], [N], [B], [R], [Q], [K] |
| | - Squares: [a1] through [h8] (64 tokens) |
| | - Flags: [x] (capture), [+] (check), [#] (mate), [O-O], [O-O-O] (castling) |
| | - Promotions: [=q], [=r], [=b], [=n] |
| | |
| | This approach reduces vocabulary from ~1200 to 84 tokens, saving ~142K parameters! |
| | |
| | Example: |
| | "WPe2e4" -> ["[W]", "[P]", "[e2]", "[e4]"] |
| | "BNg8f6(x)" -> ["[B]", "[N]", "[g8]", "[f6]", "[x]"] |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import json |
| | import os |
| | import re |
| | from pathlib import Path |
| | from typing import Dict, List, Optional, Tuple |
| |
|
| | from transformers import PreTrainedTokenizer |
| |
|
| |
|
| | |
| | _MOVE_RE = re.compile( |
| | r"^(?P<side>[WB])" |
| | r"(?P<piece>[PNBRQK])" |
| | r"(?P<src>[a-h][1-8])" |
| | r"(?P<dst>[a-h][1-8])" |
| | r"(?P<suffix>.*)$" |
| | ) |
| |
|
| |
|
| | |
| | _PROMO_RE = re.compile(r"=([QRBNqrbn])") |
| |
|
| |
|
| | def _parse_suffix(suffix: str) -> Tuple[bool, bool, bool, Optional[str], Optional[str]]: |
| | """ |
| | Returns: |
| | is_capture, is_check, is_mate, castle_kind, promo_piece |
| | |
| | castle_kind: "k" (kingside) or "q" (queenside) or None |
| | promo_piece: one of "q","r","b","n" or None |
| | """ |
| | if not suffix: |
| | return False, False, False, None, None |
| |
|
| | |
| | suf = suffix.strip() |
| |
|
| | is_capture = "x" in suf |
| | is_check = "+" in suf |
| |
|
| | |
| | |
| | is_mate = "*" in suf |
| |
|
| | |
| | castle_kind = None |
| | if "(O)" in suf: |
| | castle_kind = "q" |
| | elif "(o)" in suf: |
| | castle_kind = "k" |
| |
|
| | promo_piece = None |
| | m = _PROMO_RE.search(suf) |
| | if m: |
| | promo_piece = m.group(1).lower() |
| |
|
| | return is_capture, is_check, is_mate, castle_kind, promo_piece |
| |
|
| |
|
| | def _reindex_vocab(vocab: Dict[str, int]) -> Dict[str, int]: |
| | |
| | items = sorted(vocab.items(), key=lambda kv: kv[1]) |
| | return {tok: new_id for new_id, (tok, _) in enumerate(items)} |
| |
|
| |
|
| |
|
| | class ChessTokenizer(PreTrainedTokenizer): |
| | """ |
| | A custom tokenizer for chess moves using atomic decomposition. |
| | |
| | This tokenizer maps each move component to a unique token ID. |
| | The vocabulary is fixed and small (84 tokens), saving parameters. |
| | |
| | Example: |
| | >>> tokenizer = ChessTokenizer() |
| | >>> tokenizer.encode("WPe2e4 BPe7e5") |
| | [1, 4, 5, 44, 46, 4, 5, 47, 45, 2] # [BOS, W, P, e2, e4, B, P, e7, e5, EOS] |
| | """ |
| |
|
| | model_input_names = ["input_ids", "attention_mask"] |
| | vocab_files_names = {"vocab_file": "vocab.json"} |
| |
|
| | |
| | PAD_TOKEN = "[PAD]" |
| | BOS_TOKEN = "[BOS]" |
| | EOS_TOKEN = "[EOS]" |
| | UNK_TOKEN = "[UNK]" |
| |
|
| |
|
| | |
| | SIDE_TOKENS = ("[W]", "[B]") |
| | PIECE_TOKENS = ("[P]", "[N]", "[B]", "[R]", "[Q]", "[K]") |
| | |
| | FLAG_TOKENS = ( |
| | "[x]", |
| | "[+]", |
| | "[#]", |
| | "[O-O]", |
| | "[O-O-O]", |
| | |
| | "[=q]", "[=r]", "[=b]", "[=n]", |
| | ) |
| | def __init__( |
| | self, |
| | vocab_file: Optional[str] = None, |
| | vocab: Optional[Dict[str, int]] = None, |
| | **kwargs, |
| | ): |
| | """ |
| | Initialize the chess tokenizer. |
| | |
| | Args: |
| | vocab_file: Path to a JSON file containing the vocabulary mapping. |
| | vocab: Dictionary mapping tokens to IDs (alternative to vocab_file). |
| | **kwargs: Additional arguments passed to PreTrainedTokenizer. |
| | """ |
| | |
| | self._pad_token = self.PAD_TOKEN |
| | self._bos_token = self.BOS_TOKEN |
| | self._eos_token = self.EOS_TOKEN |
| | self._unk_token = self.UNK_TOKEN |
| |
|
| | |
| | |
| | kwargs.pop("pad_token", None) |
| | kwargs.pop("bos_token", None) |
| | kwargs.pop("eos_token", None) |
| | kwargs.pop("unk_token", None) |
| |
|
| | |
| | if vocab is not None: |
| | self._vocab = vocab |
| | elif vocab_file is not None and os.path.exists(vocab_file): |
| | with open(vocab_file, "r", encoding="utf-8") as f: |
| | self._vocab = json.load(f) |
| | else: |
| | |
| | self._vocab = self._create_default_vocab() |
| |
|
| | self._vocab = _reindex_vocab(self._vocab) |
| |
|
| | |
| | self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
| |
|
| | |
| | super().__init__( |
| | pad_token=self._pad_token, |
| | bos_token=self._bos_token, |
| | eos_token=self._eos_token, |
| | unk_token=self._unk_token, |
| | **kwargs, |
| | ) |
| |
|
| | def _create_default_vocab(self) -> Dict[str, int]: |
| | """ |
| | Create the atomic vocabulary with component tokens. |
| | |
| | Total: 4 (special) + 2 (sides) + 6 (pieces) + 64 (squares) + 8 (flags) = 84 tokens |
| | """ |
| | tokens: List[str] = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] |
| | tokens += list(self.SIDE_TOKENS) |
| | tokens += list(self.PIECE_TOKENS) |
| |
|
| | |
| | for file in "abcdefgh": |
| | for rank in "12345678": |
| | tokens.append(f"[{file}{rank}]") |
| |
|
| | tokens += list(self.FLAG_TOKENS) |
| |
|
| | return {tok: idx for idx, tok in enumerate(tokens)} |
| |
|
| | @classmethod |
| | def build_vocab_from_iterator( |
| | cls, |
| | iterator, |
| | min_frequency: int = 1, |
| | ) -> "ChessTokenizer": |
| | """Build vocab (not needed for atomic approach, vocab is fixed).""" |
| | return cls() |
| |
|
| | @classmethod |
| | def build_vocab_from_dataset( |
| | cls, |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | min_frequency: int = 1, |
| | max_samples: Optional[int] = None, |
| | ) -> "ChessTokenizer": |
| | """Build vocab (not needed for atomic approach, vocab is fixed).""" |
| | return cls() |
| |
|
| | @property |
| | def vocab_size(self) -> int: |
| | """Return the size of the vocabulary.""" |
| | return len(self._vocab) |
| |
|
| | def get_vocab(self) -> Dict[str, int]: |
| | """Return the vocabulary as a dictionary.""" |
| | return dict(self._vocab) |
| |
|
| | def _tokenize(self, text: str) -> List[str]: |
| | """ |
| | Tokenize a string of moves into a list of atomic tokens. |
| | |
| | Args: |
| | text: A string of space-separated moves. |
| | |
| | Returns: |
| | List of atomic move tokens. |
| | """ |
| | text = (text or "").strip() |
| | if not text: |
| | return [] |
| |
|
| | chunks = text.split() |
| | out: List[str] = [] |
| |
|
| | for chunk in chunks: |
| | |
| | if re.fullmatch(r"[a-h][1-8][a-h][1-8][qrbn]?", chunk): |
| | src = chunk[0:2] |
| | dst = chunk[2:4] |
| | out.append(f"[{src}]") |
| | out.append(f"[{dst}]") |
| | if len(chunk) == 5 and chunk[4] in "qrbn": |
| | out.append(f"[={chunk[4]}]") |
| | continue |
| |
|
| | m = _MOVE_RE.match(chunk) |
| | if not m: |
| | out.append(self.UNK_TOKEN) |
| | continue |
| |
|
| | side = "[W]" if m.group("side") == "W" else "[B]" |
| | piece = m.group("piece") |
| | src = m.group("src") |
| | dst = m.group("dst") |
| | suffix = m.group("suffix") or "" |
| |
|
| | out.append(side) |
| | out.append(f"[{piece}]") |
| | out.append(f"[{src}]") |
| | out.append(f"[{dst}]") |
| |
|
| | is_cap, is_chk, is_mate, castle_kind, promo = _parse_suffix(suffix) |
| |
|
| | |
| | if castle_kind == "k": |
| | out.append("[O-O]") |
| | elif castle_kind == "q": |
| | out.append("[O-O-O]") |
| |
|
| | if is_cap: |
| | out.append("[x]") |
| | if is_mate: |
| | out.append("[#]") |
| | elif is_chk: |
| | out.append("[+]") |
| |
|
| | if promo in ("q", "r", "b", "n"): |
| | out.append(f"[={promo}]") |
| |
|
| | return out |
| |
|
| |
|
| | def _convert_token_to_id(self, token: str) -> int: |
| | """Convert a token to its ID.""" |
| | return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
| |
|
| | def _convert_id_to_token(self, index: int) -> str: |
| | """Convert an ID to its token.""" |
| | return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
| |
|
| | def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| | """Convert a list of tokens back to a string.""" |
| | |
| | special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
| | return " ".join(t for t in tokens if t not in special) |
| |
|
| | def save_vocabulary( |
| | self, |
| | save_directory: str, |
| | filename_prefix: Optional[str] = None, |
| | ) -> tuple: |
| | """ |
| | Save the vocabulary to a JSON file. |
| | |
| | Args: |
| | save_directory: Directory to save the vocabulary. |
| | filename_prefix: Optional prefix for the filename. |
| | |
| | Returns: |
| | Tuple containing the path to the saved vocabulary file. |
| | """ |
| | if not os.path.isdir(save_directory): |
| | os.makedirs(save_directory, exist_ok=True) |
| |
|
| | vocab_file = os.path.join( |
| | save_directory, |
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| | ) |
| |
|
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| |
|
| | return (vocab_file,) |
| |
|