|
|
""" |
|
|
Custom Chess Tokenizer for the Chess Challenge. |
|
|
Strategy: Semantic Split (Piece, Square, Suffix) |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import json |
|
|
import os |
|
|
import re |
|
|
from typing import Dict, List, Optional |
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
|
|
|
class ChessTokenizer(PreTrainedTokenizer): |
|
|
model_input_names = ["input_ids", "attention_mask"] |
|
|
|
|
|
|
|
|
|
|
|
PAD_TOKEN = "[PAD]" |
|
|
BOS_TOKEN = "[BOS]" |
|
|
EOS_TOKEN = "[EOS]" |
|
|
UNK_TOKEN = "[UNK]" |
|
|
|
|
|
|
|
|
PIECES = [ |
|
|
"WP", "WN", "WB", "WR", "WQ", "WK", |
|
|
"BP", "BN", "BB", "BR", "BQ", "BK" |
|
|
] |
|
|
|
|
|
|
|
|
SQUARES = [f"{c}{r}" for c in "abcdefgh" for r in "12345678"] |
|
|
|
|
|
|
|
|
|
|
|
SUFFIXES = [ |
|
|
"(x)", "(+)", "(+*)", "(o)", "(O)", |
|
|
"q", "r", "b", "n", "Q", "R", "B", "N" |
|
|
] |
|
|
|
|
|
def __init__(self, **kwargs): |
|
|
|
|
|
self._pad_token = self.PAD_TOKEN |
|
|
self._bos_token = self.BOS_TOKEN |
|
|
self._eos_token = self.EOS_TOKEN |
|
|
self._unk_token = self.UNK_TOKEN |
|
|
|
|
|
|
|
|
for token in ["pad_token", "bos_token", "eos_token", "unk_token"]: |
|
|
kwargs.pop(token, None) |
|
|
|
|
|
|
|
|
self.all_tokens = ( |
|
|
[self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] + |
|
|
self.PIECES + |
|
|
self.SQUARES + |
|
|
self.SUFFIXES |
|
|
) |
|
|
self._vocab = {token: idx for idx, token in enumerate(self.all_tokens)} |
|
|
self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
escaped_suffixes = [re.escape(s) for s in self.SUFFIXES] |
|
|
suffix_pattern = "|".join(sorted(escaped_suffixes, key=len, reverse=True)) |
|
|
|
|
|
self.token_pattern = re.compile( |
|
|
r'([WB][PNBRQK])|([a-h][1-8])|(' + suffix_pattern + r')' |
|
|
) |
|
|
|
|
|
super().__init__( |
|
|
pad_token=self._pad_token, |
|
|
bos_token=self._bos_token, |
|
|
eos_token=self._eos_token, |
|
|
unk_token=self._unk_token, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
@property |
|
|
def vocab_size(self) -> int: |
|
|
return len(self._vocab) |
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]: |
|
|
return dict(self._vocab) |
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]: |
|
|
""" |
|
|
Splits a game string using Regex. |
|
|
Example: "WPe2e4" -> ["WP", "e2", "e4"] |
|
|
""" |
|
|
|
|
|
|
|
|
matches = self.token_pattern.findall(text) |
|
|
tokens = [token for group in matches for token in group if token] |
|
|
return tokens |
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int: |
|
|
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN)) |
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str: |
|
|
return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str: |
|
|
|
|
|
|
|
|
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
|
|
return " ".join(t for t in tokens if t not in special) |
|
|
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple: |
|
|
if not os.path.isdir(save_directory): |
|
|
os.makedirs(save_directory, exist_ok=True) |
|
|
vocab_file = os.path.join( |
|
|
save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json" |
|
|
) |
|
|
with open(vocab_file, "w", encoding="utf-8") as f: |
|
|
json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
|
|
return (vocab_file,) |
|
|
|
|
|
|
|
|
|
|
|
@classmethod |
|
|
def build_vocab_from_dataset(cls, *args, **kwargs) -> "ChessTokenizer": |
|
|
""" |
|
|
Override: Returns a pre-initialized tokenizer with fixed vocab. |
|
|
We don't need to scan the dataset because we know the rules of Chess. |
|
|
""" |
|
|
print("Using fixed vocabulary (Pieces + Squares + Suffixes). No dataset scan needed.") |
|
|
return cls() |