chess-normal-BPE / tokenizer.py
Chiensaucisse67's picture
Chess Challenge submission by Chiensaucisse67
4648185 verified
"""
Custom Chess Tokenizer for the Chess Challenge.
This tokenizer treats each move as a single token using the extended UCI notation
from the Lichess dataset (e.g., WPe2e4, BNg8f6).
The dataset format uses:
- W/B prefix for White/Black
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
- Source and destination squares (e.g., e2e4)
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
"""
from __future__ import annotations
import json
import os
from pathlib import Path
from token import OP
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
import re
class ChessTokenizer(PreTrainedTokenizer):
"""
A custom tokenizer for chess moves using extended UCI notation.
This tokenizer maps each possible chess move to a unique token ID.
The vocabulary is built from the training dataset to ensure all moves
encountered during training have a corresponding token.
Example:
>>> tokenizer = ChessTokenizer()
>>> tokenizer.encode("WPe2e4 BPe7e5")
[1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS]
"""
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
# Special tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
"""
Initialize the chess tokenizer.
Args:
vocab_file: Path to a JSON file containing the vocabulary mapping.
vocab: Dictionary mapping tokens to IDs (alternative to vocab_file).
**kwargs: Additional arguments passed to PreTrainedTokenizer.
"""
# Initialize special tokens
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
# Remove any duplicate special-token entries passed through kwargs
# to avoid "multiple values for keyword" errors when loading from disk.
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
# Load or create vocabulary
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
# Create a minimal vocabulary with just special tokens
# The full vocabulary should be built from the dataset
self._vocab = self._create_default_vocab()
# Create reverse mapping
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
# Call parent init AFTER setting up vocab
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _create_default_vocab(self) -> Dict[str, int]:
"""
Create a minimal default vocabulary with just special tokens.
For the full vocabulary, use `build_vocab_from_dataset()`.
This minimal vocab is just a placeholder - you should build from data.
"""
special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
vocab = {token: idx for idx, token in enumerate(special_tokens)}
return vocab
@classmethod
def build_vocab_from_iterator(
cls,
iterator,
min_frequency: int = 1,
) -> "ChessTokenizer":
"""
Build a tokenizer vocabulary from an iterator of game strings.
Args:
iterator: An iterator yielding game strings (space-separated moves).
min_frequency: Minimum frequency for a token to be included.
Returns:
A ChessTokenizer with the built vocabulary.
"""
from collections import Counter
token_counts = Counter()
for game in iterator:
moves = game.strip().split()
token_counts.update(moves)
# Filter by frequency
tokens = [
token for token, count in token_counts.items()
if count >= min_frequency
]
# Sort for reproducibility
tokens = sorted(tokens)
# Build vocabulary
special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
return cls(vocab=vocab)
@classmethod
def build_vocab_from_dataset(
cls,
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
min_frequency: int = 500,
max_samples: Optional[int] = 100000,
) -> "ChessTokenizer":
"""
Build a tokenizer vocabulary from a Hugging Face dataset.
Args:
dataset_name: Name of the dataset on Hugging Face Hub.
split: Dataset split to use.
column: Column containing the game strings.
min_frequency: Minimum frequency for a token to be included (default: 500).
max_samples: Maximum number of samples to process (default: 100k).
Returns:
A ChessTokenizer with the built vocabulary.
"""
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
def game_iterator():
for example in dataset:
yield example[column]
return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)
@property
def vocab_size(self) -> int:
"""Return the size of the vocabulary."""
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
"""Return the vocabulary as a dictionary."""
return dict(self._vocab)
def _tokenize(self, text: str) -> List[str]:
"""
Tokenize a string of moves into a list of tokens.
Args:
text: A string of space-separated moves.
Returns:
List of move tokens.
"""
return text.strip().split()
def _convert_token_to_id(self, token: str) -> int:
"""Convert a token to its ID."""
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
def _convert_id_to_token(self, index: int) -> str:
"""Convert an ID to its token."""
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Convert a list of tokens back to a string."""
# Filter out special tokens for cleaner output
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
return " ".join(t for t in tokens if t not in special)
def save_vocabulary(
self,
save_directory: str,
filename_prefix: Optional[str] = None,
) -> tuple:
"""
Save the vocabulary to a JSON file.
Args:
save_directory: Directory to save the vocabulary.
filename_prefix: Optional prefix for the filename.
Returns:
Tuple containing the path to the saved vocabulary file.
"""
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)
def count_vocab_from_dataset(
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
max_samples: Optional[int] = 10000,
) -> Dict[str, int]:
"""
Count token frequencies in a dataset (useful for vocabulary analysis).
Args:
dataset_name: Name of the dataset on Hugging Face Hub.
split: Dataset split to use.
column: Column containing the game strings.
max_samples: Maximum number of samples to process.
Returns:
Dictionary mapping tokens to their frequencies.
"""
from collections import Counter
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
token_counts = Counter()
for example in dataset:
moves = example[column].strip().split()
token_counts.update(moves)
return dict(token_counts)
class CoordinateTokenizer(ChessTokenizer):
def __init__(self, **kwargs):
squares = [f"{f}{r}" for f in "abcdefgh" for r in "12345678"]
promotions = ["q", "r", "b", "n"]
control = ["[PAD]", "[BOS]", "[EOS]", "[UNK]"]
vocab_list = control + squares + promotions
self._vocab = {t: i for i, t in enumerate(vocab_list)}
self._ids_to_token = {i: t for t, i in self._vocab.items()}
super().__init__(
vocab=self._vocab,
pad_token="[PAD]",
bos_token="[BOS]",
eos_token="[EOS]",
unk_token="[UNK]",
truncation_side="left",
**kwargs
)
def _tokenize(self, text: str) -> List[str]:
raw_moves = text.strip().split()
tokens = []
for raw_move in raw_moves:
squares = re.findall(r'[a-h][1-8]', raw_move)
tokens.extend(squares)
if "=" in raw_move:
idx = raw_move.index("=")
if idx + 1 < len(raw_move):
tokens.append(raw_move[idx+1].lower())
elif "q" in raw_move[-2:].lower():
tokens.append(raw_move[-1].lower())
return tokens
class CoordinateChessTokenizer(PreTrainedTokenizer):
"""
Tokenizer that decomposes chess moves into coordinate components.
Example:
WPe2e4 -> ['e2', 'e4']
WPa7a8q -> ['a7', 'a8', 'q'] # pawn promotion
Vocabulary size: 72 tokens
- 64 squares (a1-h8)
- 4 promotions (q, r, b, n)
- 4 special tokens
"""
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
# Regex to extract from-square, to-square, and optional promotion
MOVE_PATTERN = re.compile(r'([a-h][1-8])([a-h][1-8])([qrbn])?')
def __init__(self, vocab_file: Optional[str] = None, **kwargs):
# Remove duplicate special token kwargs
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
# Build fixed vocabulary
if vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
self._vocab = self._create_vocab()
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(
pad_token=self.PAD_TOKEN,
bos_token=self.BOS_TOKEN,
eos_token=self.EOS_TOKEN,
unk_token=self.UNK_TOKEN,
**kwargs,
)
def _create_vocab(self) -> Dict[str, int]:
"""Create fixed vocabulary of 72 tokens."""
tokens = [
self.PAD_TOKEN,
self.BOS_TOKEN,
self.EOS_TOKEN,
self.UNK_TOKEN,
]
# Add all 64 squares
for file in 'abcdefgh':
for rank in '12345678':
tokens.append(f"{file}{rank}")
# Add promotion pieces
tokens.extend(['q', 'r', 'b', 'n'])
return {token: idx for idx, token in enumerate(tokens)}
@property
def vocab_size(self) -> int:
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
return dict(self._vocab)
def _tokenize(self, text: str) -> List[str]:
"""
Tokenize move string into coordinate components.
Args:
text: Space-separated moves like "WPe2e4 BNg8f6"
Returns:
List of coordinate tokens: ['e2', 'e4', 'g8', 'f6']
"""
tokens = []
raw_moves = text.strip().split()
for move in raw_moves:
match = self.MOVE_PATTERN.search(move)
if match:
from_sq, to_sq, promotion = match.groups()
tokens.append(from_sq)
tokens.append(to_sq)
if promotion:
tokens.append(promotion)
return tokens
def _convert_token_to_id(self, token: str) -> int:
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
def _convert_id_to_token(self, index: int) -> str:
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Reconstruct moves from coordinate tokens."""
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
clean = [t for t in tokens if t not in special]
# Group into moves (2 or 3 tokens per move)
moves = []
i = 0
while i < len(clean):
if i + 1 < len(clean):
move = clean[i] + clean[i + 1]
i += 2
# Check for promotion
if i < len(clean) and clean[i] in ['q', 'r', 'b', 'n']:
move += clean[i]
i += 1
moves.append(move)
else:
i += 1
return " ".join(moves)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)
class EnhancedCoordinateTokenizer(CoordinateChessTokenizer):
"""
Extended version that preserves piece information as optional metadata.
Vocabulary: 76 tokens (adds W, B, P, N, B, R, Q, K but makes them optional)
Use this if you want to preserve color/piece info with minimal vocab growth.
"""
def _create_vocab(self) -> Dict[str, int]:
vocab = super()._create_vocab()
# Add optional color and piece tokens
piece_tokens = ['W', 'B', 'P', 'N', 'R', 'Q', 'K'] # Note: B appears in both contexts
next_id = len(vocab)
for token in piece_tokens:
if token not in vocab:
vocab[token] = next_id
next_id += 1
return vocab
def _tokenize(self, text: str) -> List[str]:
"""
Optionally include piece info: WPe2e4 -> ['W', 'P', 'e2', 'e4']
Or strip it for minimal version: WPe2e4 -> ['e2', 'e4']
"""
tokens = []
raw_moves = text.strip().split()
for move in raw_moves:
# Extract color and piece if present
if len(move) >= 2 and move[0] in 'WB' and move[1] in 'PNBRQK':
# Uncomment to include piece info (increases sequence length):
# tokens.extend([move[0], move[1]])
pass
# Extract coordinates
match = self.MOVE_PATTERN.search(move)
if match:
from_sq, to_sq, promotion = match.groups()
tokens.append(from_sq)
tokens.append(to_sq)
if promotion:
tokens.append(promotion)
return tokens
class SanitizedChessTokenizer(ChessTokenizer):
# Strategy:
# 1. Strip suffixes: (, ), x, +, *, o, O, E
# 2. Strip prefixes: W or B followed by P, N, B, R, Q, K
# Regex: ^[WB][PNBRQK] matches the start of the string
# We can use a single regex to find the "Pure Move" part.
# We look for the square-to-square pattern (e.g., e2e4) and optional promotion (q,r,b,n)
# This is safer than stripping because it ignores all noise around the move.
MOVE_PATTERN = re.compile(r'([a-h][1-8][a-h][1-8][qrbn]?)')
def _sanitize(self, text: str) -> str:
# Extract just the move part (e.g., "WPe2e4(x)" -> "e2e4")
match = self.MOVE_PATTERN.search(text)
if match:
return match.group(1)
return self.unk_token # Fallback if no valid move found
def _tokenize(self, text: str) -> List[str]:
# Tokenize by splitting space, then extracting the move
tokens = []
for t in text.strip().split():
clean = self._sanitize(t)
if clean != self.unk_token:
tokens.append(clean)
return tokens
@classmethod
def build_vocab_from_iterator(cls, iterator, min_frequency: int = 1) -> "SanitizedChessTokenizer":
from collections import Counter
token_counts = Counter()
for game in iterator:
moves = game.strip().split()
# Extract only the Pure UCI part
clean_moves = []
for m in moves:
match = cls.MOVE_PATTERN.search(m)
if match:
clean_moves.append(match.group(1))
token_counts.update(clean_moves)
# Filter by frequency
tokens = [
token for token, count in token_counts.items()
if count >= min_frequency
]
tokens = sorted(tokens)
# Build vocabulary
special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
return cls(vocab=vocab)