|
|
""" |
|
|
Custom Chess Tokenizer for the Chess Challenge. |
|
|
|
|
|
This tokenizer treats each move as a single token using the extended UCI notation |
|
|
from the Lichess dataset (e.g., WPe2e4, BNg8f6). |
|
|
|
|
|
The dataset format uses: |
|
|
- W/B prefix for White/Black |
|
|
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King |
|
|
- Source and destination squares (e.g., e2e4) |
|
|
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import json |
|
|
import os |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Optional |
|
|
|
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
|
|
|
class ChessTokenizer(PreTrainedTokenizer): |
|
|
model_input_names = ["input_ids", "attention_mask"] |
|
|
|
|
|
|
|
|
PAD_TOKEN = "[PAD]" |
|
|
BOS_TOKEN = "[BOS]" |
|
|
EOS_TOKEN = "[EOS]" |
|
|
UNK_TOKEN = "[UNK]" |
|
|
|
|
|
vocab_files_names = { |
|
|
"vocab_file": "vocab.json" |
|
|
} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_file: Optional[str] = None, |
|
|
vocab: Optional[Dict[str, int]] = None, |
|
|
**kwargs, |
|
|
): |
|
|
self._pad_token = self.PAD_TOKEN |
|
|
self._bos_token = self.BOS_TOKEN |
|
|
self._eos_token = self.EOS_TOKEN |
|
|
self._unk_token = self.UNK_TOKEN |
|
|
|
|
|
|
|
|
kwargs.pop("pad_token", None) |
|
|
kwargs.pop("bos_token", None) |
|
|
kwargs.pop("eos_token", None) |
|
|
kwargs.pop("unk_token", None) |
|
|
|
|
|
|
|
|
self.vocab_file = vocab_file |
|
|
|
|
|
if vocab is not None: |
|
|
self._vocab = vocab |
|
|
elif vocab_file is not None and os.path.exists(vocab_file): |
|
|
with open(vocab_file, "r", encoding="utf-8") as f: |
|
|
self._vocab = json.load(f) |
|
|
else: |
|
|
self._vocab = self._create_default_vocab() |
|
|
|
|
|
self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
|
|
|
|
|
super().__init__( |
|
|
pad_token=self._pad_token, |
|
|
bos_token=self._bos_token, |
|
|
eos_token=self._eos_token, |
|
|
unk_token=self._unk_token, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
def _create_default_vocab(self) -> Dict[str, int]: |
|
|
"""Creates basic vocab. Use build_vocab_from_dataset for full vocab.""" |
|
|
|
|
|
special = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] |
|
|
vocab = {t: i for i, t in enumerate(special)} |
|
|
return vocab |
|
|
|
|
|
@property |
|
|
def vocab_size(self) -> int: |
|
|
return len(self._vocab) |
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]: |
|
|
return dict(self._vocab) |
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]: |
|
|
""" |
|
|
Splits text "WPe2e4 BNg8f6" into ["WP", "e2", "e4", "BN", "g8", "f6"] |
|
|
""" |
|
|
tokens = [] |
|
|
|
|
|
raw_moves = text.strip().split() |
|
|
|
|
|
|
|
|
for move in raw_moves: |
|
|
|
|
|
if len(move) >= 6: |
|
|
|
|
|
tokens.append(move[:2]) |
|
|
|
|
|
|
|
|
tokens.append(move[2:4]) |
|
|
|
|
|
|
|
|
tokens.append(move[4:]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
else: |
|
|
tokens.append(self.UNK_TOKEN) |
|
|
|
|
|
|
|
|
return tokens |
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int: |
|
|
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN)) |
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str: |
|
|
return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str: |
|
|
""" |
|
|
Reconstructs the move string. |
|
|
Note: This simply joins them. You might need custom logic |
|
|
if you want to strictly recreate 'WPe2e4' from ['WP','e2','e4']. |
|
|
""" |
|
|
return " ".join(t for t in tokens if t not in [ |
|
|
self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN |
|
|
]) |
|
|
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple: |
|
|
if not os.path.isdir(save_directory): |
|
|
os.makedirs(save_directory, exist_ok=True) |
|
|
|
|
|
vocab_file = os.path.join( |
|
|
save_directory, |
|
|
(filename_prefix + "-" if filename_prefix else "") + "vocab.json" |
|
|
) |
|
|
|
|
|
with open(vocab_file, "w", encoding="utf-8") as f: |
|
|
json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
return (vocab_file,) |
|
|
|
|
|
@classmethod |
|
|
def build_vocab_from_dataset(cls, dataset_name="dlouapre/lichess_2025-01_1M", split="train", max_samples=10000): |
|
|
"""Scans dataset to find all unique pieces and squares.""" |
|
|
from datasets import load_dataset |
|
|
dataset = load_dataset(dataset_name, split=split, streaming=True) |
|
|
|
|
|
pieces = set() |
|
|
squares = set() |
|
|
endings = set() |
|
|
|
|
|
print("Building vocabulary...") |
|
|
count = 0 |
|
|
for example in dataset: |
|
|
moves = example["text"].split() |
|
|
for move in moves: |
|
|
if len(move) >= 6: |
|
|
pieces.add(move[:2]) |
|
|
squares.add(move[2:4]) |
|
|
squares.add(move[4:]) |
|
|
|
|
|
|
|
|
count += 1 |
|
|
if count >= max_samples: |
|
|
break |
|
|
|
|
|
|
|
|
special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] |
|
|
all_tokens = special + sorted(list(pieces)) + sorted(list(endings)) + sorted(list(squares)) |
|
|
|
|
|
vocab = {token: idx for idx, token in enumerate(all_tokens)} |
|
|
return cls(vocab=vocab) |