chess_gasp / tokenizer.py
gaspardbd's picture
Chess Challenge submission by gaspardbd
916a5ba verified
"""
Custom Chess Tokenizer for the Chess Challenge.
This tokenizer uses a compact hybrid scheme optimized for small models:
- Frequent moves are single tokens (e.g., WPe2e4).
- Rare moves fall back to two tokens: piece+from (e.g., WPe2) and to-square (e.g., e4).
- Promotions add a third token (q/r/b/n).
The dataset format uses:
- W/B prefix for White/Black
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
- Source and destination squares (e.g., e2e4)
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
"""
from __future__ import annotations
import json
import os
import re
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from transformers import PreTrainedTokenizer
class ChessTokenizer(PreTrainedTokenizer):
"""
A custom tokenizer for chess moves using extended UCI notation.
This tokenizer uses a compact base vocabulary (piece+from, to-square,
promotion tokens) and optionally adds frequent full-move tokens for
shorter sequences and better sample efficiency.
Example:
>>> tokenizer = ChessTokenizer()
>>> tokenizer.encode("WPe2e4 BPe7e5")
[1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS]
"""
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
# Special tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
_MOVE_RE = re.compile(
r"^(?P<color>[WB])(?P<piece>[PNBRQK])(?P<from>[a-h][1-8])(?P<to>[a-h][1-8])(?P<rest>.*)$"
)
_PROMO_RE = re.compile(r"=([NBRQnrbq])")
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
"""
Initialize the chess tokenizer.
Args:
vocab_file: Path to a JSON file containing the vocabulary mapping.
vocab: Dictionary mapping tokens to IDs (alternative to vocab_file).
**kwargs: Additional arguments passed to PreTrainedTokenizer.
"""
# Initialize special tokens
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
# Remove any duplicate special-token entries passed through kwargs
# to avoid "multiple values for keyword" errors when loading from disk.
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
# Load or create vocabulary
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
# Create a compact default vocabulary that can tokenize any move
self._vocab = self._create_default_vocab()
# Create reverse mapping
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
# Call parent init AFTER setting up vocab
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _create_default_vocab(self) -> Dict[str, int]:
"""
Create a compact default vocabulary with full move coverage.
For better compression, use `build_vocab_from_dataset()` to add
frequent full-move tokens.
"""
tokens = self._create_base_vocab_tokens()
return {token: idx for idx, token in enumerate(tokens)}
@classmethod
def _create_base_vocab_tokens(cls) -> List[str]:
special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
pieces = ["P", "N", "B", "R", "Q", "K"]
colors = ["W", "B"]
files = "abcdefgh"
ranks = "12345678"
squares = [f"{f}{r}" for f in files for r in ranks]
piece_from_tokens = [f"{c}{p}{sq}" for c in colors for p in pieces for sq in squares]
to_tokens = squares
promo_tokens = ["q", "r", "b", "n"]
return special_tokens + piece_from_tokens + to_tokens + promo_tokens
@classmethod
def _parse_move(cls, token: str) -> Optional[Tuple[str, str, str, str, Optional[str]]]:
match = cls._MOVE_RE.match(token)
if not match:
return None
color = match.group("color")
piece = match.group("piece")
from_sq = match.group("from")
to_sq = match.group("to")
rest = match.group("rest")
promo_match = cls._PROMO_RE.search(rest)
promo = promo_match.group(1).upper() if promo_match else None
return color, piece, from_sq, to_sq, promo
@classmethod
def build_vocab_from_iterator(
cls,
iterator,
min_frequency: int = 1,
max_full_move_tokens: Optional[int] = 1200,
) -> "ChessTokenizer":
"""
Build a tokenizer vocabulary from an iterator of game strings.
Args:
iterator: An iterator yielding game strings (space-separated moves).
min_frequency: Minimum frequency for a token to be included.
max_full_move_tokens: Maximum number of full-move tokens to keep.
Returns:
A ChessTokenizer with the built vocabulary.
"""
from collections import Counter
token_counts = Counter()
for game in iterator:
moves = game.strip().split()
for move in moves:
parsed = cls._parse_move(move)
if not parsed:
continue
color, piece, from_sq, to_sq, promo = parsed
if promo:
continue
token_counts[f"{color}{piece}{from_sq}{to_sq}"] += 1
# Filter by frequency
tokens = [
token for token, count in token_counts.items()
if count >= min_frequency
]
# Sort by frequency, then lexicographically for reproducibility
tokens.sort(key=lambda t: (-token_counts[t], t))
if max_full_move_tokens is not None:
tokens = tokens[:max_full_move_tokens]
base_tokens = cls._create_base_vocab_tokens()
vocab = {token: idx for idx, token in enumerate(base_tokens + tokens)}
return cls(vocab=vocab)
@classmethod
def build_vocab_from_dataset(
cls,
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
min_frequency: int = 500,
max_samples: Optional[int] = 100000,
max_full_move_tokens: Optional[int] = 1200,
) -> "ChessTokenizer":
"""
Build a tokenizer vocabulary from a Hugging Face dataset.
Args:
dataset_name: Name of the dataset on Hugging Face Hub.
split: Dataset split to use.
column: Column containing the game strings.
min_frequency: Minimum frequency for a token to be included (default: 500).
max_samples: Maximum number of samples to process (default: 100k).
max_full_move_tokens: Maximum number of full-move tokens to keep.
Returns:
A ChessTokenizer with the built vocabulary.
"""
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
def game_iterator():
for example in dataset:
yield example[column]
return cls.build_vocab_from_iterator(
game_iterator(),
min_frequency=min_frequency,
max_full_move_tokens=max_full_move_tokens,
)
@property
def vocab_size(self) -> int:
"""Return the size of the vocabulary."""
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
"""Return the vocabulary as a dictionary."""
return dict(self._vocab)
def _tokenize(self, text: str) -> List[str]:
"""
Tokenize a string of moves into a list of tokens.
Args:
text: A string of space-separated moves.
Returns:
List of move tokens.
"""
raw = text.strip()
if not raw:
return []
parts = raw.split()
out: List[str] = []
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
for part in parts:
if part in special:
out.append(part)
continue
parsed = self._parse_move(part)
if not parsed:
out.append(self.UNK_TOKEN)
continue
color, piece, from_sq, to_sq, promo = parsed
full_move = f"{color}{piece}{from_sq}{to_sq}"
if promo is None and full_move in self._vocab:
out.append(full_move)
continue
piece_from = f"{color}{piece}{from_sq}"
to_token = f"{to_sq}"
out.append(piece_from if piece_from in self._vocab else self.UNK_TOKEN)
out.append(to_token if to_token in self._vocab else self.UNK_TOKEN)
if promo:
promo_token = promo.lower()
out.append(promo_token if promo_token in self._vocab else self.UNK_TOKEN)
return out
def _convert_token_to_id(self, token: str) -> int:
"""Convert a token to its ID."""
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
def _convert_id_to_token(self, index: int) -> str:
"""Convert an ID to its token."""
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Convert a list of tokens back to a string."""
# Filter out special tokens for cleaner output
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
return " ".join(t for t in tokens if t not in special)
def save_vocabulary(
self,
save_directory: str,
filename_prefix: Optional[str] = None,
) -> tuple:
"""
Save the vocabulary to a JSON file.
Args:
save_directory: Directory to save the vocabulary.
filename_prefix: Optional prefix for the filename.
Returns:
Tuple containing the path to the saved vocabulary file.
"""
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)
def count_vocab_from_dataset(
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
max_samples: Optional[int] = 10000,
) -> Dict[str, int]:
"""
Count normalized move frequencies in a dataset (useful for vocabulary analysis).
Args:
dataset_name: Name of the dataset on Hugging Face Hub.
split: Dataset split to use.
column: Column containing the game strings.
max_samples: Maximum number of samples to process.
Returns:
Dictionary mapping normalized full-move tokens to their frequencies.
"""
from collections import Counter
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
token_counts = Counter()
for example in dataset:
moves = example[column].strip().split()
for move in moves:
parsed = ChessTokenizer._parse_move(move)
if not parsed:
continue
color, piece, from_sq, to_sq, promo = parsed
if promo:
continue
token_counts[f"{color}{piece}{from_sq}{to_sq}"] += 1
return dict(token_counts)