chess_willy_v3 / tokenizer.py
Willy Vo
add model and tokenizer
6ee5e27
"""
Custom Chess Tokenizer for the Chess Challenge.
This tokenizer treats each move as a single token using the extended UCI notation
from the Lichess dataset (e.g., WPe2e4, BNg8f6).
The dataset format uses:
- W/B prefix for White/Black
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
- Source and destination squares (e.g., e2e4)
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
"""
from __future__ import annotations
import json
import os
from pathlib import Path
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
class ChessTokenizer(PreTrainedTokenizer):
"""
A custom tokenizer for chess moves using extended UCI notation.
This tokenizer maps each possible chess move to a unique token ID.
The vocabulary is built from the training dataset to ensure all moves
encountered during training have a corresponding token.
Example:
>>> tokenizer = ChessTokenizer()
>>> tokenizer.encode("WPe2e4 BPe7e5")
[1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS]
"""
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
# Special tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
"""
Initialize the chess tokenizer.
Args:
vocab_file: Path to a JSON file containing the vocabulary mapping.
vocab: Dictionary mapping tokens to IDs (alternative to vocab_file).
**kwargs: Additional arguments passed to PreTrainedTokenizer.
"""
# Initialize special tokens
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
# Remove any duplicate special-token entries passed through kwargs
# to avoid "multiple values for keyword" errors when loading from disk.
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
# Load or create vocabulary
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
# Create a minimal vocabulary with just special tokens
# The full vocabulary should be built from the dataset
self._vocab = self._create_default_vocab()
# Create reverse mapping
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
# Call parent init AFTER setting up vocab
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _create_default_vocab(self) -> Dict[str, int]:
"""
Create a fixed structured vocabulary (no dataset-dependent move tokens).
Tokens:
- Special: [PAD], [BOS], [EOS], [UNK]
- Color: [W], [B]
- Pieces: [P], [N], [BISHOP], [R], [Q], [K]
- Squares: [a1]..[h8]
- Suffixes: [x], [+], [#]
- Castling: [O-O], [O-O-O]
- Promotions: [prom_Q], [prom_R], [prom_B], [prom_N]
- Move separator: [MOVE_END]
"""
special = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
colors = ["[W]", "[B]"]
pieces = ["[P]", "[N]", "[BISHOP]", "[R]", "[Q]", "[K]"]
files = "abcdefgh"
ranks = "12345678"
squares = [f"[{f}{r}]" for r in ranks for f in files] # a1..h8
suffixes = ["[x]", "[+]", "[#]"]
castling = ["[O-O]", "[O-O-O]"]
promotions = ["[prom_Q]", "[prom_R]", "[prom_B]", "[prom_N]"]
move_end = ["[MOVE_END]"]
tokens = special + colors + pieces + squares + suffixes + castling + promotions + move_end
return {tok: i for i, tok in enumerate(tokens)}
@classmethod
def build_vocab_from_iterator(cls, iterator, min_frequency: int = 1) -> "ChessTokenizer":
# Structured tokenizer uses a fixed vocab; iterator is unused.
return cls(vocab=cls().get_vocab())
# @classmethod
# def build_vocab_from_dataset(
# cls,
# dataset_name: str = "dlouapre/lichess_2025-01_1M",
# split: str = "train",
# column: str = "text",
# min_frequency: int = 500,
# max_samples: Optional[int] = 100000,
# ) -> "ChessTokenizer":
# """
# Build a tokenizer vocabulary from a Hugging Face dataset.
# Args:
# dataset_name: Name of the dataset on Hugging Face Hub.
# split: Dataset split to use.
# column: Column containing the game strings.
# min_frequency: Minimum frequency for a token to be included (default: 500).
# max_samples: Maximum number of samples to process (default: 100k).
# Returns:
# A ChessTokenizer with the built vocabulary.
# """
# from datasets import load_dataset
# dataset = load_dataset(dataset_name, split=split)
# if max_samples is not None:
# dataset = dataset.select(range(min(max_samples, len(dataset))))
# def game_iterator():
# for example in dataset:
# yield example[column]
# return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)
@classmethod
def build_vocab_from_dataset(cls,dataset_name: str = "dlouapre/lichess_2025-01_1M",split: str = "train",column: str = "text",min_frequency: int = 500,max_samples: Optional[int] = 100000,) -> "ChessTokenizer":
# Structured tokenizer uses a fixed vocab; dataset params are unused.
return cls(vocab=cls().get_vocab())
@property
def vocab_size(self) -> int:
"""Return the size of the vocabulary."""
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
"""Return the vocabulary as a dictionary."""
return dict(self._vocab)
def _move_to_tokens(self, move: str) -> List[str]:
"""
Convert one extended-UCI move string to structured tokens.
Examples:
"WPe2e4" -> ["[W]","[P]","[e2]","[e4]"]
"WBb5c6(x+)" -> ["[W]","[BISHOP]","[b5]","[c6]","[x]","[+]"]
"BKe8g8(o)" -> ["[B]","[O-O]"]
"WPa7a8(Q)" -> ["[W]","[P]","[a7]","[a8]","[prom_Q]"]
"""
toks: List[str] = []
if not move:
return [self.UNK_TOKEN]
# Color
color = move[0]
toks.append("[W]" if color == "W" else "[B]")
# Basic fields
# move[1] is piece letter in dataset (P,N,B,R,Q,K)
piece_char = move[1] if len(move) > 1 else ""
piece_map = {"P": "[P]", "N": "[N]", "B": "[BISHOP]", "R": "[R]", "Q": "[Q]", "K": "[K]"}
toks.append(piece_map.get(piece_char, self.UNK_TOKEN))
# Source and destination squares assumed at positions 2:4 and 4:6
# e.g. WPe2e4 -> from=e2 to=e4
if len(move) >= 6:
from_sq = move[2:4]
to_sq = move[4:6]
toks.append(f"[{from_sq}]")
toks.append(f"[{to_sq}]")
else:
# malformed
toks.append(self.UNK_TOKEN)
toks.append(self.UNK_TOKEN)
# --- Castling ---
# Dataset mentions (o)/(O)=castling, sometimes attached to king moves.
# We'll map based on king destination:
if "(o)" in move or "(O)" in move:
# King ends on g-file => O-O ; on c-file => O-O-O
if len(move) >= 6:
to_sq = move[4:6]
if to_sq[0] == "g":
return [toks[0], "[O-O]"]
if to_sq[0] == "c":
return [toks[0], "[O-O-O]"]
# --- Promotion ---
if "(Q)" in move:
toks.append("[prom_Q]")
elif "(R)" in move:
toks.append("[prom_R]")
elif "(B)" in move:
toks.append("[prom_B]")
elif "(N)" in move:
toks.append("[prom_N]")
# --- Capture / check / mate ---
# Capture patterns: "(x)" "(x+)" "(x+*)" etc.
if "(x" in move:
toks.append("[x]")
# Checkmate sometimes written (+*) or similar
if "(+*)" in move:
toks.append("[#]")
elif "(+)" in move or "(x+)" in move:
toks.append("[+]")
return toks
def _tokenize(self, text: str) -> List[str]:
"""
Tokenize a game string into structured tokens.
Each move becomes:
[W]/[B], [PIECE], [from], [to], optional flags, then [MOVE_END]
"""
moves = text.strip().split()
out: List[str] = []
for mv in moves:
out.extend(self._move_to_tokens(mv))
out.append("[MOVE_END]")
return out
def _convert_token_to_id(self, token: str) -> int:
"""Convert a token to its ID."""
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
def _convert_id_to_token(self, index: int) -> str:
"""Convert an ID to its token."""
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
return " ".join(t for t in tokens if (t not in special and t != "[MOVE_END]"))
def save_vocabulary(
self,
save_directory: str,
filename_prefix: Optional[str] = None,
) -> tuple:
"""
Save the vocabulary to a JSON file.
Args:
save_directory: Directory to save the vocabulary.
filename_prefix: Optional prefix for the filename.
Returns:
Tuple containing the path to the saved vocabulary file.
"""
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)
def count_vocab_from_dataset(
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
max_samples: Optional[int] = 10000,
) -> Dict[str, int]:
"""
Count token frequencies in a dataset (useful for vocabulary analysis).
Args:
dataset_name: Name of the dataset on Hugging Face Hub.
split: Dataset split to use.
column: Column containing the game strings.
max_samples: Maximum number of samples to process.
Returns:
Dictionary mapping tokens to their frequencies.
"""
from collections import Counter
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
token_counts = Counter()
for example in dataset:
moves = example[column].strip().split()
token_counts.update(moves)
return dict(token_counts)