| | """ |
| | Chess Move Tokenizer - Component-based approach. |
| | |
| | This tokenizer decomposes chess moves into atomic components for efficient |
| | representation. Each move is broken down into: color, piece type, source square, |
| | destination square, and optional annotations (capture, check, promotion, etc.). |
| | |
| | The vocabulary is built from atomic components rather than full moves, which |
| | allows for better generalization and a smaller vocabulary size. |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import json |
| | import os |
| | from pathlib import Path |
| | from typing import Dict, List, Optional |
| |
|
| | import re |
| | from transformers import PreTrainedTokenizer |
| |
|
| |
|
| | |
| | |
| | MOVE_PATTERN = re.compile( |
| | r"^(?P<side>[WB])" |
| | r"(?P<piece>[PNBRQK])" |
| | r"(?P<src>[a-h][1-8])" |
| | r"(?P<dst>[a-h][1-8])" |
| | r"(?P<suffix>.*)$" |
| | ) |
| |
|
| |
|
| | class ChessTokenizer(PreTrainedTokenizer): |
| | """ |
| | Component-based chess move tokenizer. |
| | |
| | Instead of treating each complete move as a single token, this tokenizer |
| | breaks down moves into atomic components (color, piece, squares, annotations). |
| | This approach results in a much smaller vocabulary while maintaining |
| | the ability to represent all possible chess moves. |
| | |
| | Example usage: |
| | >>> tokenizer = ChessTokenizer() |
| | >>> tokens = tokenizer._tokenize("WPe2e4 BPe7e5") |
| | >>> # Returns: ['[W]', '[P]', '[e2]', '[e4]', '[B]', '[P]', '[e7]', '[e5]'] |
| | """ |
| | |
| | model_input_names = ["input_ids", "attention_mask"] |
| | vocab_files_names = {"vocab_file": "vocab.json"} |
| | |
| | |
| | PAD_TOKEN = "[PAD]" |
| | BOS_TOKEN = "[BOS]" |
| | EOS_TOKEN = "[EOS]" |
| | UNK_TOKEN = "[UNK]" |
| | |
| | def __init__( |
| | self, |
| | vocab_file: Optional[str] = None, |
| | vocab: Optional[Dict[str, int]] = None, |
| | **kwargs, |
| | ): |
| | """ |
| | Initialize the chess tokenizer. |
| | |
| | Args: |
| | vocab_file: Path to a JSON file containing the vocabulary mapping. |
| | vocab: Dictionary mapping tokens to IDs (alternative to vocab_file). |
| | **kwargs: Additional arguments passed to PreTrainedTokenizer. |
| | """ |
| | |
| | self._pad_token = self.PAD_TOKEN |
| | self._bos_token = self.BOS_TOKEN |
| | self._eos_token = self.EOS_TOKEN |
| | self._unk_token = self.UNK_TOKEN |
| |
|
| | |
| | |
| | for token_key in ["pad_token", "bos_token", "eos_token", "unk_token"]: |
| | kwargs.pop(token_key, None) |
| | |
| | |
| | if vocab is not None: |
| | self._vocab = vocab |
| | elif vocab_file is not None and os.path.exists(vocab_file): |
| | with open(vocab_file, "r", encoding="utf-8") as f: |
| | self._vocab = json.load(f) |
| | else: |
| | |
| | self._vocab = self._create_default_vocab() |
| | |
| | |
| | self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
| | |
| | |
| | super().__init__( |
| | pad_token=self._pad_token, |
| | bos_token=self._bos_token, |
| | eos_token=self._eos_token, |
| | unk_token=self._unk_token, |
| | **kwargs, |
| | ) |
| | |
| | def _create_default_vocab(self) -> Dict[str, int]: |
| | """ |
| | Construct the default component-based vocabulary. |
| | |
| | Creates a vocabulary from atomic chess move components: |
| | - Special tokens (padding, start, end, unknown) |
| | - Color indicators (White/Black) |
| | - Piece types (Pawn, Knight, Bishop, Rook, Queen, King) |
| | - Board squares (64 squares: a1-h8) |
| | - Move annotations (capture, check, checkmate, castling, promotions) |
| | """ |
| | |
| | special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] |
| |
|
| | |
| | color_tokens = ["[W]", "[B]"] |
| |
|
| | |
| | piece_tokens = ["[P]", "[N]", "[BISHOP]", "[R]", "[Q]", "[K]"] |
| |
|
| | |
| | square_tokens = [f"[{file}{rank}]" for rank in "12345678" for file in "abcdefgh"] |
| |
|
| | |
| | annotation_tokens = ["[x]", "[+]", "[#]", "[O-O]", "[O-O-O]", |
| | "[prom_Q]", "[prom_R]", "[prom_B]", "[prom_N]"] |
| |
|
| | |
| | all_tokens = special_tokens + color_tokens + piece_tokens + square_tokens + annotation_tokens |
| | vocab = {token: idx for idx, token in enumerate(all_tokens)} |
| | return vocab |
| | |
| | @classmethod |
| | def build_vocab_from_iterator( |
| | cls, |
| | iterator, |
| | min_frequency: int = 1, |
| | ) -> "ChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from an iterator of game strings. |
| | |
| | Args: |
| | iterator: An iterator yielding game strings (space-separated moves). |
| | min_frequency: Minimum frequency for a token to be included. |
| | |
| | Returns: |
| | A ChessTokenizer with the built vocabulary. |
| | """ |
| | return cls() |
| | |
| | @classmethod |
| | def build_vocab_from_dataset( |
| | cls, |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | min_frequency: int = 500, |
| | max_samples: Optional[int] = 100000, |
| | ) -> "ChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from a Hugging Face dataset. |
| | |
| | Args: |
| | dataset_name: Name of the dataset on Hugging Face Hub. |
| | split: Dataset split to use. |
| | column: Column containing the game strings. |
| | min_frequency: Minimum frequency for a token to be included (default: 500). |
| | max_samples: Maximum number of samples to process (default: 100k). |
| | |
| | Returns: |
| | A ChessTokenizer with the built vocabulary. |
| | """ |
| | return cls() |
| | |
| | @property |
| | def vocab_size(self) -> int: |
| | """Return the size of the vocabulary.""" |
| | return len(self._vocab) |
| | |
| | def get_vocab(self) -> Dict[str, int]: |
| | """Return the vocabulary as a dictionary.""" |
| | return dict(self._vocab) |
| | |
| | def _tokenize(self, text: str) -> List[str]: |
| | """ |
| | Decompose chess moves into component tokens. |
| | |
| | Parses each move and breaks it down into atomic components: |
| | color, piece, source square, destination square, and annotations. |
| | |
| | Args: |
| | text: Space-separated sequence of moves in extended UCI format. |
| | |
| | Returns: |
| | List of component tokens representing the moves. |
| | """ |
| | token_list: List[str] = [] |
| | move_sequence = text.strip().split() |
| |
|
| | for move_str in move_sequence: |
| | |
| | if "O-O-O" in move_str: |
| | player_color = "[W]" if move_str.startswith("W") else "[B]" |
| | token_list.append(player_color) |
| | token_list.append("[O-O-O]") |
| | continue |
| |
|
| | |
| | if "O-O" in move_str: |
| | player_color = "[W]" if move_str.startswith("W") else "[B]" |
| | token_list.append(player_color) |
| | token_list.append("[O-O]") |
| | continue |
| |
|
| | |
| | match = MOVE_PATTERN.match(move_str) |
| | if not match: |
| | token_list.append(self.UNK_TOKEN) |
| | continue |
| |
|
| | |
| | player_color = "[W]" if match.group("side") == "W" else "[B]" |
| | piece_type = match.group("piece") |
| | from_square = match.group("src") |
| | to_square = match.group("dst") |
| | move_annotations = match.group("suffix") or "" |
| |
|
| | |
| | token_list.append(player_color) |
| | |
| | |
| | if piece_type == "B": |
| | token_list.append("[BISHOP]") |
| | else: |
| | token_list.append(f"[{piece_type}]") |
| |
|
| | |
| | token_list.append(f"[{from_square}]") |
| | token_list.append(f"[{to_square}]") |
| |
|
| | |
| | if "x" in move_annotations: |
| | token_list.append("[x]") |
| |
|
| | |
| | if "*" in move_annotations: |
| | token_list.append("[#]") |
| | elif "+" in move_annotations: |
| | token_list.append("[+]") |
| |
|
| | |
| | if "=" in move_annotations: |
| | promo_idx = move_annotations.find("=") |
| | if promo_idx != -1 and promo_idx + 1 < len(move_annotations): |
| | promoted_piece = move_annotations[promo_idx + 1].upper() |
| | if promoted_piece in ("Q", "R", "B", "N"): |
| | token_list.append(f"[prom_{promoted_piece}]") |
| |
|
| | return token_list |
| | |
| | def _convert_token_to_id(self, token: str) -> int: |
| | """Map token string to its vocabulary ID.""" |
| | return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
| | |
| | def _convert_id_to_token(self, index: int) -> str: |
| | """Map vocabulary ID back to token string.""" |
| | return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
| | |
| | def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| | """Reconstruct string from token list, filtering special tokens.""" |
| | special_token_set = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
| | return " ".join(t for t in tokens if t not in special_token_set) |
| | |
| | def save_vocabulary( |
| | self, |
| | save_directory: str, |
| | filename_prefix: Optional[str] = None, |
| | ) -> tuple: |
| | """ |
| | Save the vocabulary to a JSON file. |
| | |
| | Args: |
| | save_directory: Directory to save the vocabulary. |
| | filename_prefix: Optional prefix for the filename. |
| | |
| | Returns: |
| | Tuple containing the path to the saved vocabulary file. |
| | """ |
| | if not os.path.isdir(save_directory): |
| | os.makedirs(save_directory, exist_ok=True) |
| | |
| | vocab_file = os.path.join( |
| | save_directory, |
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| | ) |
| | |
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| | |
| | return (vocab_file,) |
| |
|
| |
|
| | def count_vocab_from_dataset( |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | max_samples: Optional[int] = 10000, |
| | ) -> Dict[str, int]: |
| | """ |
| | Analyze token frequency distribution in the dataset. |
| | |
| | Useful for understanding which components appear most frequently |
| | and for vocabulary size planning. |
| | |
| | Args: |
| | dataset_name: HuggingFace dataset identifier. |
| | split: Which dataset split to analyze. |
| | column: Column name containing the game sequences. |
| | max_samples: Limit number of samples for faster analysis. |
| | |
| | Returns: |
| | Frequency dictionary: token -> count. |
| | """ |
| | from collections import Counter |
| | from datasets import load_dataset |
| | |
| | |
| | dataset = load_dataset(dataset_name, split=split) |
| | |
| | |
| | if max_samples is not None: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| | |
| | |
| | tokenizer = ChessTokenizer() |
| | frequency_counter = Counter() |
| | |
| | for sample in dataset: |
| | component_tokens = tokenizer._tokenize(sample[column]) |
| | frequency_counter.update(component_tokens) |
| | |
| | return dict(frequency_counter) |