| | """ |
| | Custom Chess Tokenizer for the Chess Challenge. |
| | |
| | This tokenizer breaks down moves into 5 components: |
| | Color, Piece, Source, Destination, Suffix. |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import json |
| | import os |
| | import re |
| | from typing import Dict, List, Optional |
| |
|
| | from transformers import PreTrainedTokenizer |
| |
|
| |
|
| | class ChessTokenizer(PreTrainedTokenizer): |
| | """ |
| | A component-based tokenizer for chess moves. |
| | |
| | Each move is split into 5 tokens: |
| | [Color, Piece, Source, Destination, Suffix] |
| | |
| | Vocabulary is fixed and deterministic. |
| | """ |
| | |
| | model_input_names = ["input_ids", "attention_mask"] |
| | vocab_files_names = {"vocab_file": "vocab.json"} |
| | |
| | |
| | PAD_TOKEN = "[PAD]" |
| | BOS_TOKEN = "[BOS]" |
| | EOS_TOKEN = "[EOS]" |
| | UNK_TOKEN = "[UNK]" |
| | |
| | |
| | COLORS = ["W", "B"] |
| | PIECES = ["P", "N", "B", "R", "Q", "K"] |
| | FILES = ["a", "b", "c", "d", "e", "f", "g", "h"] |
| | RANKS = ["1", "2", "3", "4", "5", "6", "7", "8"] |
| | SUFFIXES = ["", "(x)", "(+)", "(+*)", "(o)", "(O)"] |
| | |
| | def __init__( |
| | self, |
| | vocab_file: Optional[str] = None, |
| | vocab: Optional[Dict[str, int]] = None, |
| | **kwargs, |
| | ): |
| | |
| | self._pad_token = self.PAD_TOKEN |
| | self._bos_token = self.BOS_TOKEN |
| | self._eos_token = self.EOS_TOKEN |
| | self._unk_token = self.UNK_TOKEN |
| | |
| | kwargs.pop("pad_token", None) |
| | kwargs.pop("bos_token", None) |
| | kwargs.pop("eos_token", None) |
| | kwargs.pop("unk_token", None) |
| | |
| | if vocab is not None: |
| | self._vocab = vocab |
| | elif vocab_file is not None and os.path.exists(vocab_file): |
| | with open(vocab_file, "r", encoding="utf-8") as f: |
| | self._vocab = json.load(f) |
| | else: |
| | self._vocab = self._create_default_vocab() |
| | |
| | self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
| | |
| | super().__init__( |
| | pad_token=self._pad_token, |
| | bos_token=self._bos_token, |
| | eos_token=self._eos_token, |
| | unk_token=self._unk_token, |
| | **kwargs, |
| | ) |
| |
|
| | def _create_default_vocab(self) -> Dict[str, int]: |
| | tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] |
| | |
| | |
| | tokens.extend(self.COLORS) |
| | tokens.extend(self.PIECES) |
| | |
| | |
| | squares = [f"{f}{r}" for f in self.FILES for r in self.RANKS] |
| | tokens.extend(squares) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | for s in self.SUFFIXES: |
| | if s == "": |
| | tokens.append("[None]") |
| | else: |
| | tokens.append(s) |
| | |
| | |
| | seen = set() |
| | unique_tokens = [] |
| | for t in tokens: |
| | if t not in seen: |
| | unique_tokens.append(t) |
| | seen.add(t) |
| | |
| | return {t: i for i, t in enumerate(unique_tokens)} |
| |
|
| | @property |
| | def vocab_size(self) -> int: |
| | return len(self._vocab) |
| |
|
| | def get_vocab(self) -> Dict[str, int]: |
| | return dict(self._vocab) |
| |
|
| | def _tokenize(self, text: str) -> List[str]: |
| | |
| | moves = text.strip().split() |
| | tokens = [] |
| | |
| | for move in moves: |
| | |
| | if move in [self.BOS_TOKEN, self.EOS_TOKEN, self.PAD_TOKEN, self.UNK_TOKEN]: |
| | |
| | |
| | |
| | |
| | tokens.extend([move] * 5) |
| | continue |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | if len(move) < 6: |
| | |
| | |
| | tokens.extend([self.UNK_TOKEN] * 5) |
| | continue |
| | |
| | c = move[0] |
| | p = move[1] |
| | src = move[2:4] |
| | dst = move[4:6] |
| | suf = move[6:] |
| | |
| | if suf == "": |
| | suf_tok = "[None]" |
| | else: |
| | suf_tok = suf |
| | |
| | |
| | raw_components = [c, p, src, dst, suf_tok] |
| | |
| | |
| | final_components = [] |
| | for comp in raw_components: |
| | if comp in self._vocab: |
| | final_components.append(comp) |
| | else: |
| | final_components.append(self.UNK_TOKEN) |
| | |
| | tokens.extend(final_components) |
| | |
| | return tokens |
| |
|
| | def _convert_token_to_id(self, token: str) -> int: |
| | return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN)) |
| |
|
| | def _convert_id_to_token(self, index: int) -> str: |
| | return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
| |
|
| | def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| | |
| | |
| | output = [] |
| | |
| | for i in range(0, len(tokens), 5): |
| | chunk = tokens[i:i+5] |
| | if len(chunk) < 5: |
| | break |
| | |
| | |
| | if chunk[0] in [self.BOS_TOKEN, self.EOS_TOKEN, self.PAD_TOKEN]: |
| | continue |
| | |
| | c, p, src, dst, suf = chunk |
| | if suf == "[None]": |
| | suf = "" |
| | |
| | output.append(f"{c}{p}{src}{dst}{suf}") |
| | |
| | return " ".join(output) |
| |
|
| | @classmethod |
| | def build_vocab_from_dataset(cls, *args, **kwargs): |
| | |
| | return cls() |
| | |
| | def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple: |
| | if not os.path.isdir(save_directory): |
| | os.makedirs(save_directory, exist_ok=True) |
| | vocab_file = os.path.join( |
| | save_directory, |
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| | ) |
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| | return (vocab_file,) |
| |
|