|
|
""" |
|
|
Custom Chess Tokenizer for the Chess Challenge. |
|
|
|
|
|
This tokenizer decomposes moves into atomic tokens: |
|
|
Piece -> Source Square -> Target Square -> Suffixes. |
|
|
Example: "WPe2e4" -> ['P', 'e2', 'e4'] (Color is implicit to save context) |
|
|
Example: "Bxb7+" -> ['B', 'c8', 'b7', '(x)', '(+)'] |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import json |
|
|
import os |
|
|
import re |
|
|
from typing import Dict, List, Optional |
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
|
|
|
class ChessTokenizer(PreTrainedTokenizer): |
|
|
model_input_names = ["input_ids", "attention_mask"] |
|
|
vocab_files_names = {"vocab_file": "vocab.json"} |
|
|
|
|
|
|
|
|
PAD_TOKEN = "[PAD]" |
|
|
BOS_TOKEN = "[BOS]" |
|
|
EOS_TOKEN = "[EOS]" |
|
|
UNK_TOKEN = "[UNK]" |
|
|
|
|
|
|
|
|
PIECES = ["P", "N", "B", "R", "Q", "K"] |
|
|
FILES = "abcdefgh" |
|
|
RANKS = "12345678" |
|
|
SUFFIXES = ["(x)", "(+)", "(+*)", "(o)", "(O)", "(=)"] |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_file: Optional[str] = None, |
|
|
vocab: Optional[Dict[str, int]] = None, |
|
|
**kwargs, |
|
|
): |
|
|
|
|
|
self._pad_token = self.PAD_TOKEN |
|
|
self._bos_token = self.BOS_TOKEN |
|
|
self._eos_token = self.EOS_TOKEN |
|
|
self._unk_token = self.UNK_TOKEN |
|
|
|
|
|
|
|
|
kwargs.pop("pad_token", None) |
|
|
kwargs.pop("bos_token", None) |
|
|
kwargs.pop("eos_token", None) |
|
|
kwargs.pop("unk_token", None) |
|
|
|
|
|
|
|
|
if vocab is not None: |
|
|
self._vocab = vocab |
|
|
else: |
|
|
self._vocab = self._create_fixed_vocab() |
|
|
|
|
|
self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
|
|
|
|
|
super().__init__( |
|
|
pad_token=self._pad_token, |
|
|
bos_token=self._bos_token, |
|
|
eos_token=self._eos_token, |
|
|
unk_token=self._unk_token, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
def _create_fixed_vocab(self) -> Dict[str, int]: |
|
|
"""Creates the fixed vocabulary of ~80 atomic tokens.""" |
|
|
vocab = {} |
|
|
idx = 0 |
|
|
|
|
|
|
|
|
for token in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]: |
|
|
vocab[token] = idx |
|
|
idx += 1 |
|
|
|
|
|
|
|
|
for p in self.PIECES: |
|
|
vocab[p] = idx |
|
|
idx += 1 |
|
|
|
|
|
|
|
|
|
|
|
for f in self.FILES: |
|
|
for r in self.RANKS: |
|
|
vocab[f"{f}{r}"] = idx |
|
|
idx += 1 |
|
|
|
|
|
|
|
|
for s in self.SUFFIXES: |
|
|
vocab[s] = idx |
|
|
idx += 1 |
|
|
|
|
|
return vocab |
|
|
|
|
|
@classmethod |
|
|
def build_vocab_from_dataset( |
|
|
cls, |
|
|
dataset_name: str = "dlouapre/lichess_2025-01_1M", |
|
|
**kwargs |
|
|
) -> "ChessTokenizer": |
|
|
""" |
|
|
Override: Returns the tokenizer with the fixed vocabulary immediately. |
|
|
We do not need to scan the dataset anymore. |
|
|
""" |
|
|
print("Initializing Fixed Vocabulary Tokenizer (Deconstructed Strategy)...") |
|
|
return cls() |
|
|
|
|
|
@property |
|
|
def vocab_size(self) -> int: |
|
|
return len(self._vocab) |
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]: |
|
|
return dict(self._vocab) |
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]: |
|
|
""" |
|
|
Decomposes move strings into atomic tokens. |
|
|
Input: "[BOS] WPe2e4 BNg8f6" |
|
|
Output: ['[BOS]', 'P', 'e2', 'e4', 'N', 'g8', 'f6'] |
|
|
""" |
|
|
tokens = [] |
|
|
moves = text.strip().split() |
|
|
|
|
|
|
|
|
special_tokens = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
|
|
|
|
|
for move in moves: |
|
|
|
|
|
if not move: continue |
|
|
|
|
|
|
|
|
if move in special_tokens: |
|
|
tokens.append(move) |
|
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
match = re.match(r"([WB])([PNBRQK])([a-h][1-8])([a-h][1-8])(.*)", move) |
|
|
|
|
|
if match: |
|
|
_, piece, src, dst, suffix = match.groups() |
|
|
tokens.extend([piece, src, dst]) |
|
|
if suffix: |
|
|
if suffix in self._vocab: |
|
|
tokens.append(suffix) |
|
|
else: |
|
|
|
|
|
found_any = False |
|
|
|
|
|
|
|
|
for p in self.PIECES: |
|
|
if p in move: |
|
|
tokens.append(p) |
|
|
found_any = True |
|
|
break |
|
|
|
|
|
|
|
|
squares = re.findall(r"[a-h][1-8]", move) |
|
|
tokens.extend(squares) |
|
|
if squares: found_any = True |
|
|
|
|
|
|
|
|
for s in self.SUFFIXES: |
|
|
if s in move: |
|
|
tokens.append(s) |
|
|
found_any = True |
|
|
|
|
|
if not found_any: |
|
|
tokens.append(self.UNK_TOKEN) |
|
|
|
|
|
return tokens |
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int: |
|
|
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN)) |
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str: |
|
|
return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str: |
|
|
|
|
|
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
|
|
return " ".join(t for t in tokens if t not in special) |
|
|
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple: |
|
|
if not os.path.isdir(save_directory): |
|
|
os.makedirs(save_directory, exist_ok=True) |
|
|
vocab_file = os.path.join( |
|
|
save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json" |
|
|
) |
|
|
with open(vocab_file, "w", encoding="utf-8") as f: |
|
|
json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
|
|
return (vocab_file,) |