chess-version4-malek / tokenizer.py
malekfeki14's picture
Chess Challenge submission by malekfeki14
efe69ce verified
"""
Custom Chess Tokenizer for the Chess Challenge.
This tokenizer decomposes each move written in extended UCI notation
into structured sub-tokens.
The dataset format uses:
- W/B prefix for White/Black
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
- Source and destination squares (e.g., e2e4)
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
"""
from __future__ import annotations
import json
import os
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
class ChessTokenizer(PreTrainedTokenizer):
"""
A custom tokenizer for chess moves using extended UCI notation.
Each move is decomposed into semantic sub-components rather than
treated as a single atomic token.
"""
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
self._vocab = self._create_default_vocab()
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _create_default_vocab(self) -> Dict[str, int]:
special_tokens = [
self.PAD_TOKEN,
self.BOS_TOKEN,
self.EOS_TOKEN,
self.UNK_TOKEN,
]
return {tok: i for i, tok in enumerate(special_tokens)}
@classmethod
def build_vocab_from_iterator(
cls,
iterator,
min_frequency: int = 1,
) -> "ChessTokenizer":
from collections import Counter
counter = Counter()
tokenizer = cls()
for game in iterator:
for move in game.strip().split():
counter.update(tokenizer._tokenize(move))
tokens = [
token for token, count in counter.items()
if count >= min_frequency
]
tokens = sorted(tokens)
special_tokens = [
cls.PAD_TOKEN,
cls.BOS_TOKEN,
cls.EOS_TOKEN,
cls.UNK_TOKEN,
]
vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
return cls(vocab=vocab)
@classmethod
def build_vocab_from_dataset(
cls,
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
min_frequency: int = 500,
max_samples: Optional[int] = 100000,
) -> "ChessTokenizer":
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
def game_iterator():
for example in dataset:
yield example[column]
return cls.build_vocab_from_iterator(
game_iterator(),
min_frequency=min_frequency,
)
@property
def vocab_size(self) -> int:
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
return dict(self._vocab)
def _tokenize(self, text: str) -> List[str]:
"""
Tokenize a string of moves into factorized chess tokens.
"""
tokens: List[str] = []
for move in text.strip().split():
# core components
tokens.append(move[0]) # side
tokens.append(move[1]) # piece
tokens.append(move[2:4]) # from-square
tokens.append(move[4:6]) # to-square
# suffix parsing
suffix = move[6:]
i = 0
while i < len(suffix):
if suffix.startswith("(x)", i):
tokens.append("x")
i += 3
elif suffix[i] == "+":
tokens.append("+")
i += 1
elif suffix[i] == "*":
tokens.append("#")
i += 1
elif suffix[i].lower() == "o":
tokens.append("castle")
i += 1
else:
i += 1
return tokens
def _convert_token_to_id(self, token: str) -> int:
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
def _convert_id_to_token(self, index: int) -> str:
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
special = {
self.PAD_TOKEN,
self.BOS_TOKEN,
self.EOS_TOKEN,
self.UNK_TOKEN,
}
return " ".join(t for t in tokens if t not in special)
def save_vocabulary(
self,
save_directory: str,
filename_prefix: Optional[str] = None,
) -> tuple:
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)
def count_vocab_from_dataset(
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
max_samples: Optional[int] = 10000,
) -> Dict[str, int]:
from collections import Counter
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
tokenizer = ChessTokenizer()
token_counts = Counter()
for example in dataset:
for move in example[column].strip().split():
token_counts.update(tokenizer._tokenize(move))
return dict(token_counts)