| |
| from __future__ import annotations |
|
|
| import json |
| import os |
| import re |
| from typing import Dict, List, Optional |
|
|
| from transformers import PreTrainedTokenizer |
|
|
|
|
| class ChessTokenizer(PreTrainedTokenizer): |
| """ |
| Ultra-simple square tokenizer. |
| |
| Vocab (68 tokens): |
| - 4 specials: [PAD] [BOS] [EOS] [UNK] |
| - 64 squares: a1..h8 |
| |
| Tokenization: |
| - Any text containing two squares -> emits those squares as tokens |
| - Accepts: |
| "WPe2e4(x+)" , "e2e4" , "e2 e4" -> ["e2","e4"] |
| - For longer histories, extracts ALL squares in order. |
| |
| Decoding: |
| - Joins square tokens with spaces => evaluator regex sees them easily. |
| """ |
|
|
| model_input_names = ["input_ids", "attention_mask"] |
| vocab_files_names = {"vocab_file": "vocab.json"} |
|
|
| PAD_TOKEN = "[PAD]" |
| BOS_TOKEN = "[BOS]" |
| EOS_TOKEN = "[EOS]" |
| UNK_TOKEN = "[UNK]" |
|
|
| _SQUARE_PATTERN = r"[a-h][1-8]" |
| _SQUARE_RE = re.compile(_SQUARE_PATTERN) |
|
|
| def __init__( |
| self, |
| vocab_file: Optional[str] = None, |
| vocab: Optional[Dict[str, int]] = None, |
| **kwargs, |
| ): |
| self._pad_token = self.PAD_TOKEN |
| self._bos_token = self.BOS_TOKEN |
| self._eos_token = self.EOS_TOKEN |
| self._unk_token = self.UNK_TOKEN |
|
|
| kwargs.pop("pad_token", None) |
| kwargs.pop("bos_token", None) |
| kwargs.pop("eos_token", None) |
| kwargs.pop("unk_token", None) |
|
|
| if vocab is not None: |
| self._vocab = vocab |
| elif vocab_file is not None and os.path.exists(vocab_file): |
| with open(vocab_file, "r", encoding="utf-8") as f: |
| self._vocab = json.load(f) |
| else: |
| self._vocab = self._create_fixed_vocab() |
|
|
| self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
|
|
| super().__init__( |
| pad_token=self._pad_token, |
| bos_token=self._bos_token, |
| eos_token=self._eos_token, |
| unk_token=self._unk_token, |
| **kwargs, |
| ) |
|
|
| @classmethod |
| def _create_fixed_vocab(cls) -> Dict[str, int]: |
| specials = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] |
| files = "abcdefgh" |
| ranks = "12345678" |
| squares = [f + r for r in ranks for f in files] |
| tokens = specials + squares |
| return {tok: i for i, tok in enumerate(tokens)} |
|
|
| @classmethod |
| def build_vocab_from_iterator(cls, iterator, **kwargs) -> "ChessTokenizer": |
| return cls(vocab=cls._create_fixed_vocab()) |
|
|
| @classmethod |
| def build_vocab_from_dataset(cls, *args, **kwargs) -> "ChessTokenizer": |
| return cls(vocab=cls._create_fixed_vocab()) |
|
|
| @property |
| def vocab_size(self) -> int: |
| return len(self._vocab) |
|
|
| def get_vocab(self) -> Dict[str, int]: |
| return dict(self._vocab) |
|
|
| def _tokenize(self, text: str) -> List[str]: |
| text = text.strip() |
| if not text: |
| return [] |
|
|
| |
| |
| if text in {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}: |
| return [text] |
|
|
| |
| squares = self._SQUARE_RE.findall(text) |
| if not squares: |
| |
| return [self.UNK_TOKEN] |
|
|
| |
| out = [sq for sq in squares if sq in self._vocab] |
| return out if out else [self.UNK_TOKEN] |
|
|
| def _convert_token_to_id(self, token: str) -> int: |
| return self._vocab.get(token, self._vocab[self.UNK_TOKEN]) |
|
|
| def _convert_id_to_token(self, index: int) -> str: |
| return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
|
|
| def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| |
| special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
| toks = [t for t in tokens if t not in special] |
| return " ".join(toks) |
|
|
| def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple: |
| if not os.path.isdir(save_directory): |
| os.makedirs(save_directory, exist_ok=True) |
|
|
| vocab_file = os.path.join( |
| save_directory, |
| (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| ) |
| with open(vocab_file, "w", encoding="utf-8") as f: |
| json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
|
|
| return (vocab_file,) |
|
|