|
|
"""
|
|
|
Custom Chess Tokenizer for the Chess Challenge.
|
|
|
|
|
|
This tokenizer treats each move as a single token using the extended UCI notation
|
|
|
from the Lichess dataset (e.g., WPe2e4, BNg8f6).
|
|
|
|
|
|
We normalize moves by stripping special suffixes:
|
|
|
- (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
|
|
|
|
|
|
Example normalization:
|
|
|
WBb5c6(x) -> WBb5c6
|
|
|
BPd7c6(x) -> BPd7c6
|
|
|
... (x)(+) -> ... (both removed)
|
|
|
"""
|
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
import json
|
|
|
import os
|
|
|
from typing import Dict, List, Optional
|
|
|
|
|
|
from transformers import PreTrainedTokenizer
|
|
|
|
|
|
|
|
|
class ChessTokenizer(PreTrainedTokenizer):
|
|
|
model_input_names = ["input_ids", "attention_mask"]
|
|
|
vocab_files_names = {"vocab_file": "vocab.json"}
|
|
|
|
|
|
|
|
|
PAD_TOKEN = "[PAD]"
|
|
|
BOS_TOKEN = "[BOS]"
|
|
|
EOS_TOKEN = "[EOS]"
|
|
|
UNK_TOKEN = "[UNK]"
|
|
|
|
|
|
|
|
|
_SPECIAL_SUFFIXES = ("(x)", "(+*)", "(+)", "(o)", "(O)")
|
|
|
|
|
|
def __init__(
|
|
|
self,
|
|
|
vocab_file: Optional[str] = None,
|
|
|
vocab: Optional[Dict[str, int]] = None,
|
|
|
**kwargs,
|
|
|
):
|
|
|
self._pad_token = self.PAD_TOKEN
|
|
|
self._bos_token = self.BOS_TOKEN
|
|
|
self._eos_token = self.EOS_TOKEN
|
|
|
self._unk_token = self.UNK_TOKEN
|
|
|
|
|
|
|
|
|
kwargs.pop("pad_token", None)
|
|
|
kwargs.pop("bos_token", None)
|
|
|
kwargs.pop("eos_token", None)
|
|
|
kwargs.pop("unk_token", None)
|
|
|
|
|
|
|
|
|
if vocab is not None:
|
|
|
self._vocab = vocab
|
|
|
elif vocab_file is not None and os.path.exists(vocab_file):
|
|
|
with open(vocab_file, "r", encoding="utf-8") as f:
|
|
|
self._vocab = json.load(f)
|
|
|
else:
|
|
|
self._vocab = self._create_default_vocab()
|
|
|
|
|
|
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
|
|
|
|
|
|
super().__init__(
|
|
|
pad_token=self._pad_token,
|
|
|
bos_token=self._bos_token,
|
|
|
eos_token=self._eos_token,
|
|
|
unk_token=self._unk_token,
|
|
|
**kwargs,
|
|
|
)
|
|
|
|
|
|
def _create_default_vocab(self) -> Dict[str, int]:
|
|
|
special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
|
|
|
return {token: idx for idx, token in enumerate(special_tokens)}
|
|
|
|
|
|
@classmethod
|
|
|
def _normalize_move(cls, move: str) -> str:
|
|
|
"""
|
|
|
Strip known special suffixes from the end of a move token.
|
|
|
Handles stacked suffixes like "...(x)(+)" by stripping repeatedly.
|
|
|
"""
|
|
|
move = move.strip()
|
|
|
|
|
|
changed = True
|
|
|
while changed:
|
|
|
changed = False
|
|
|
for suf in cls._SPECIAL_SUFFIXES:
|
|
|
if move.endswith(suf):
|
|
|
move = move[: -len(suf)]
|
|
|
changed = True
|
|
|
return move
|
|
|
|
|
|
@classmethod
|
|
|
def build_vocab_from_iterator(
|
|
|
cls,
|
|
|
iterator,
|
|
|
min_frequency: int = 1,
|
|
|
) -> "ChessTokenizer":
|
|
|
from collections import Counter
|
|
|
|
|
|
token_counts = Counter()
|
|
|
|
|
|
for game in iterator:
|
|
|
raw_moves = game.strip().split()
|
|
|
moves = [cls._normalize_move(m) for m in raw_moves if m.strip()]
|
|
|
token_counts.update(moves)
|
|
|
|
|
|
tokens = [token for token, count in token_counts.items() if count >= min_frequency]
|
|
|
tokens = sorted(tokens)
|
|
|
|
|
|
special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
|
|
|
vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
|
|
|
return cls(vocab=vocab)
|
|
|
|
|
|
@classmethod
|
|
|
def build_vocab_from_dataset(
|
|
|
cls,
|
|
|
dataset_name: str = "dlouapre/lichess_2025-01_1M",
|
|
|
split: str = "train",
|
|
|
column: str = "text",
|
|
|
min_frequency: int = 500,
|
|
|
max_samples: Optional[int] = 100000,
|
|
|
) -> "ChessTokenizer":
|
|
|
from datasets import load_dataset
|
|
|
|
|
|
dataset = load_dataset(dataset_name, split=split)
|
|
|
|
|
|
if max_samples is not None:
|
|
|
dataset = dataset.select(range(min(max_samples, len(dataset))))
|
|
|
|
|
|
def game_iterator():
|
|
|
for example in dataset:
|
|
|
yield example[column]
|
|
|
|
|
|
return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)
|
|
|
|
|
|
@property
|
|
|
def vocab_size(self) -> int:
|
|
|
return len(self._vocab)
|
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]:
|
|
|
return dict(self._vocab)
|
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]:
|
|
|
raw_moves = text.strip().split()
|
|
|
return [self._normalize_move(m) for m in raw_moves if m.strip()]
|
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int:
|
|
|
|
|
|
token = self._normalize_move(token)
|
|
|
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
|
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str:
|
|
|
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
|
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
|
|
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
|
|
|
return " ".join(t for t in tokens if t not in special)
|
|
|
|
|
|
def save_vocabulary(
|
|
|
self,
|
|
|
save_directory: str,
|
|
|
filename_prefix: Optional[str] = None,
|
|
|
) -> tuple:
|
|
|
if not os.path.isdir(save_directory):
|
|
|
os.makedirs(save_directory, exist_ok=True)
|
|
|
|
|
|
vocab_file = os.path.join(
|
|
|
save_directory,
|
|
|
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
|
|
|
)
|
|
|
|
|
|
with open(vocab_file, "w", encoding="utf-8") as f:
|
|
|
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
|
|
|
|
|
|
return (vocab_file,)
|
|
|
|
|
|
|
|
|
def count_vocab_from_dataset(
|
|
|
dataset_name: str = "dlouapre/lichess_2025-01_1M",
|
|
|
split: str = "train",
|
|
|
column: str = "text",
|
|
|
max_samples: Optional[int] = 10000,
|
|
|
) -> Dict[str, int]:
|
|
|
from collections import Counter
|
|
|
from datasets import load_dataset
|
|
|
|
|
|
dataset = load_dataset(dataset_name, split=split)
|
|
|
|
|
|
if max_samples is not None:
|
|
|
dataset = dataset.select(range(min(max_samples, len(dataset))))
|
|
|
|
|
|
token_counts = Counter()
|
|
|
|
|
|
for example in dataset:
|
|
|
raw_moves = example[column].strip().split()
|
|
|
moves = [ChessTokenizer._normalize_move(m) for m in raw_moves if m.strip()]
|
|
|
token_counts.update(moves)
|
|
|
|
|
|
return dict(token_counts)
|
|
|
|