|
|
"""
|
|
|
Custom Chess Tokenizer for the Chess Challenge.
|
|
|
|
|
|
This tokenizer treats each move as a single token using the extended UCI notation
|
|
|
from the Lichess dataset (e.g., WPe2e4, BNg8f6).
|
|
|
|
|
|
The dataset format uses:
|
|
|
- W/B prefix for White/Black
|
|
|
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
|
|
|
- Source and destination squares (e.g., e2e4)
|
|
|
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
|
|
|
"""
|
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
import json
|
|
|
import os
|
|
|
from pathlib import Path
|
|
|
from typing import Dict, List, Optional
|
|
|
|
|
|
from transformers import PreTrainedTokenizer
|
|
|
|
|
|
|
|
|
class ChessTokenizer(PreTrainedTokenizer):
|
|
|
"""
|
|
|
A custom tokenizer for chess moves using extended UCI notation.
|
|
|
|
|
|
This tokenizer maps each possible chess move to a unique token ID.
|
|
|
The vocabulary is built from the training dataset to ensure all moves
|
|
|
encountered during training have a corresponding token.
|
|
|
|
|
|
Example:
|
|
|
>>> tokenizer = ChessTokenizer()
|
|
|
>>> tokenizer.encode("WPe2e4 BPe7e5")
|
|
|
[1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS]
|
|
|
"""
|
|
|
|
|
|
model_input_names = ["input_ids", "attention_mask"]
|
|
|
vocab_files_names = {"vocab_file": "vocab.json"}
|
|
|
|
|
|
|
|
|
PAD_TOKEN = "[PAD]"
|
|
|
BOS_TOKEN = "[BOS]"
|
|
|
EOS_TOKEN = "[EOS]"
|
|
|
UNK_TOKEN = "[UNK]"
|
|
|
|
|
|
def __init__(
|
|
|
self,
|
|
|
vocab_file: Optional[str] = None,
|
|
|
vocab: Optional[Dict[str, int]] = None,
|
|
|
model_max_length: Optional[int] = None,
|
|
|
**kwargs,
|
|
|
):
|
|
|
"""
|
|
|
Initialize the chess tokenizer.
|
|
|
|
|
|
Args:
|
|
|
vocab_file: Path to a JSON file containing the vocabulary mapping.
|
|
|
vocab: Dictionary mapping tokens to IDs (alternative to vocab_file).
|
|
|
model_max_length: The maximum sequence length for the tokenizer.
|
|
|
**kwargs: Additional arguments passed to PreTrainedTokenizer.
|
|
|
"""
|
|
|
|
|
|
self.pad_token = self.PAD_TOKEN
|
|
|
self.bos_token = self.BOS_TOKEN
|
|
|
self.eos_token = self.EOS_TOKEN
|
|
|
self.unk_token = self.UNK_TOKEN
|
|
|
|
|
|
|
|
|
|
|
|
kwargs.pop("pad_token", None)
|
|
|
kwargs.pop("bos_token", None)
|
|
|
kwargs.pop("eos_token", None)
|
|
|
kwargs.pop("unk_token", None)
|
|
|
|
|
|
|
|
|
if vocab is not None:
|
|
|
self.vocab = vocab
|
|
|
elif vocab_file is not None and os.path.exists(vocab_file):
|
|
|
with open(vocab_file, "r", encoding="utf-8") as f:
|
|
|
self.vocab = json.load(f)
|
|
|
else:
|
|
|
|
|
|
|
|
|
self.vocab = self._create_default_vocab()
|
|
|
|
|
|
|
|
|
self.ids_to_tokens = {v: k for k, v in self.vocab.items()}
|
|
|
|
|
|
|
|
|
super().__init__(
|
|
|
pad_token=self.pad_token,
|
|
|
bos_token=self.bos_token,
|
|
|
eos_token=self.eos_token,
|
|
|
unk_token=self.unk_token,
|
|
|
model_max_length=model_max_length,
|
|
|
**kwargs,
|
|
|
)
|
|
|
|
|
|
def _create_default_vocab(self) -> Dict[str, int]:
|
|
|
"""
|
|
|
Create a minimal default vocabulary with just special tokens.
|
|
|
|
|
|
For the full vocabulary, use `build_vocab_from_dataset()`.
|
|
|
This minimal vocab is just a placeholder - you should build from data.
|
|
|
"""
|
|
|
special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
|
|
|
vocab = {token: idx for idx, token in enumerate(special_tokens)}
|
|
|
return vocab
|
|
|
|
|
|
@classmethod
|
|
|
def build_vocab_from_iterator(
|
|
|
cls,
|
|
|
iterator,
|
|
|
min_frequency: int = 1,
|
|
|
) -> "ChessTokenizer":
|
|
|
"""
|
|
|
Build a tokenizer vocabulary from an iterator of game strings.
|
|
|
|
|
|
Args:
|
|
|
iterator: An iterator yielding game strings (space-separated moves).
|
|
|
min_frequency: Minimum frequency for a token to be included.
|
|
|
|
|
|
Returns:
|
|
|
A ChessTokenizer with the built vocabulary.
|
|
|
"""
|
|
|
from collections import Counter
|
|
|
|
|
|
token_counts = Counter()
|
|
|
|
|
|
for game in iterator:
|
|
|
moves = game.strip().split()
|
|
|
token_counts.update(moves)
|
|
|
|
|
|
|
|
|
tokens = [
|
|
|
token for token, count in token_counts.items()
|
|
|
if count >= min_frequency
|
|
|
]
|
|
|
|
|
|
|
|
|
tokens = sorted(tokens)
|
|
|
|
|
|
|
|
|
special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
|
|
|
vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
|
|
|
|
|
|
return cls(vocab=vocab)
|
|
|
|
|
|
@classmethod
|
|
|
def build_vocab_from_dataset(
|
|
|
cls,
|
|
|
dataset_name: str = "dlouapre/lichess_2025-01_1M",
|
|
|
split: str = "train",
|
|
|
column: str = "text",
|
|
|
min_frequency: int = 500,
|
|
|
max_samples: Optional[int] = 100000,
|
|
|
) -> "ChessTokenizer":
|
|
|
"""
|
|
|
Build a tokenizer vocabulary from a Hugging Face dataset.
|
|
|
|
|
|
Args:
|
|
|
dataset_name: Name of the dataset on Hugging Face Hub.
|
|
|
split: Dataset split to use.
|
|
|
column: Column containing the game strings.
|
|
|
min_frequency: Minimum frequency for a token to be included (default: 500).
|
|
|
max_samples: Maximum number of samples to process (default: 100k).
|
|
|
|
|
|
Returns:
|
|
|
A ChessTokenizer with the built vocabulary.
|
|
|
"""
|
|
|
from datasets import load_dataset
|
|
|
|
|
|
dataset = load_dataset(dataset_name, split=split)
|
|
|
|
|
|
if max_samples is not None:
|
|
|
dataset = dataset.select(range(min(max_samples, len(dataset))))
|
|
|
|
|
|
def game_iterator():
|
|
|
for example in dataset:
|
|
|
yield example[column]
|
|
|
|
|
|
return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)
|
|
|
|
|
|
@property
|
|
|
def vocab_size(self) -> int:
|
|
|
"""Return the size of the vocabulary."""
|
|
|
return len(self.vocab)
|
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]:
|
|
|
"""Return the vocabulary as a dictionary."""
|
|
|
return dict(self.vocab)
|
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]:
|
|
|
"""
|
|
|
Tokenize a string of moves into a list of tokens.
|
|
|
|
|
|
Args:
|
|
|
text: A string of space-separated moves.
|
|
|
|
|
|
Returns:
|
|
|
List of move tokens.
|
|
|
"""
|
|
|
return text.strip().split()
|
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int:
|
|
|
"""Convert a token to its ID."""
|
|
|
return self.vocab.get(token, self.vocab.get(self.unk_token, 0))
|
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str:
|
|
|
"""Convert an ID to its token."""
|
|
|
return self.ids_to_tokens.get(index, self.unk_token)
|
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
|
|
"""Convert a list of tokens back to a string."""
|
|
|
|
|
|
special = {self.pad_token, self.bos_token, self.eos_token, self.unk_token}
|
|
|
return " ".join(t for t in tokens if t not in special)
|
|
|
|
|
|
def save_vocabulary(
|
|
|
self,
|
|
|
save_directory: str,
|
|
|
filename_prefix: Optional[str] = None,
|
|
|
) -> tuple:
|
|
|
"""
|
|
|
Save the vocabulary to a JSON file.
|
|
|
|
|
|
Args:
|
|
|
save_directory: Directory to save the vocabulary.
|
|
|
filename_prefix: Optional prefix for the filename.
|
|
|
|
|
|
Returns:
|
|
|
Tuple containing the path to the saved vocabulary file.
|
|
|
"""
|
|
|
if not os.path.isdir(save_directory):
|
|
|
os.makedirs(save_directory, exist_ok=True)
|
|
|
|
|
|
vocab_file = os.path.join(
|
|
|
save_directory,
|
|
|
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
|
|
|
)
|
|
|
|
|
|
with open(vocab_file, "w", encoding="utf-8") as f:
|
|
|
json.dump(self.vocab, f, ensure_ascii=False, indent=2)
|
|
|
|
|
|
return (vocab_file,)
|
|
|
|
|
|
|
|
|
def count_vocab_from_dataset(
|
|
|
dataset_name: str = "dlouapre/lichess_2025-01_1M",
|
|
|
split: str = "train",
|
|
|
column: str = "text",
|
|
|
max_samples: Optional[int] = 10000,
|
|
|
) -> Dict[str, int]:
|
|
|
"""
|
|
|
Count token frequencies in a dataset (useful for vocabulary analysis).
|
|
|
|
|
|
Args:
|
|
|
dataset_name: Name of the dataset on Hugging Face Hub.
|
|
|
split: Dataset split to use.
|
|
|
column: Column containing the game strings.
|
|
|
max_samples: Maximum number of samples to process.
|
|
|
|
|
|
Returns:
|
|
|
Dictionary mapping tokens to their frequencies.
|
|
|
"""
|
|
|
from collections import Counter
|
|
|
from datasets import load_dataset
|
|
|
|
|
|
dataset = load_dataset(dataset_name, split=split)
|
|
|
|
|
|
if max_samples is not None:
|
|
|
dataset = dataset.select(range(min(max_samples, len(dataset))))
|
|
|
|
|
|
token_counts = Counter()
|
|
|
|
|
|
for example in dataset:
|
|
|
moves = example[column].strip().split()
|
|
|
token_counts.update(moves)
|
|
|
|
|
|
return dict(token_counts)
|
|
|
|