| | from __future__ import annotations
|
| |
|
| | import json
|
| | import os
|
| | from typing import Dict, List, Optional
|
| | import re
|
| |
|
| | from transformers import PreTrainedTokenizer
|
| |
|
| |
|
| | class ChessTokenizer(PreTrainedTokenizer):
|
| | """
|
| | Chess tokenizer with structured move tokens:
|
| | Each move is split into: [side][piece][from][to][suffixes].
|
| | Example:
|
| | "WPe2e4 BNg8xf6+" -> [W][P][e2][e4] [B][N][g8][f6][x][+]
|
| | """
|
| |
|
| | model_input_names = ["input_ids", "attention_mask"]
|
| | vocab_files_names = {"vocab_file": "vocab.json"}
|
| |
|
| |
|
| | PAD_TOKEN = "[PAD]"
|
| | BOS_TOKEN = "[BOS]"
|
| | EOS_TOKEN = "[EOS]"
|
| | UNK_TOKEN = "[UNK]"
|
| |
|
| | MOVE_RE = re.compile(
|
| | r"^(?P<side>[WB])"
|
| | r"(?P<piece>[PNBRQK])"
|
| | r"(?P<src>[a-h][1-8])"
|
| | r"(?P<dst>[a-h][1-8])"
|
| | r"(?P<suffix>.*)$"
|
| | )
|
| |
|
| | def __init__(
|
| | self,
|
| | vocab_file: Optional[str] = None,
|
| | vocab: Optional[Dict[str, int]] = None,
|
| | **kwargs,
|
| | ):
|
| | self._pad_token = self.PAD_TOKEN
|
| | self._bos_token = self.BOS_TOKEN
|
| | self._eos_token = self.EOS_TOKEN
|
| | self._unk_token = self.UNK_TOKEN
|
| |
|
| |
|
| | kwargs.pop("pad_token", None)
|
| | kwargs.pop("bos_token", None)
|
| | kwargs.pop("eos_token", None)
|
| | kwargs.pop("unk_token", None)
|
| |
|
| |
|
| | if vocab is not None:
|
| | self._vocab = vocab
|
| | elif vocab_file is not None and os.path.exists(vocab_file):
|
| | with open(vocab_file, "r", encoding="utf-8") as f:
|
| | self._vocab = json.load(f)
|
| | else:
|
| | self._vocab = self._create_default_vocab()
|
| |
|
| |
|
| | self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
|
| |
|
| | super().__init__(
|
| | pad_token=self._pad_token,
|
| | bos_token=self._bos_token,
|
| | eos_token=self._eos_token,
|
| | unk_token=self._unk_token,
|
| | **kwargs,
|
| | )
|
| |
|
| | def _create_default_vocab(self) -> Dict[str, int]:
|
| | special = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
|
| |
|
| | sides = ["[W]", "[B]"]
|
| | pieces = ["[P]", "[N]", "[B]", "[R]", "[Q]", "[K]"]
|
| | squares = [f"[{f}{r}]" for f in "abcdefgh" for r in "12345678"]
|
| | suffixes = ["[x]", "[+]", "[#]", "[O-O]", "[O-O-O]",
|
| | "[prom_Q]", "[prom_R]", "[prom_B]", "[prom_N]"]
|
| |
|
| | vocab_list = special + sides + pieces + squares + suffixes
|
| | return {tok: i for i, tok in enumerate(vocab_list)}
|
| |
|
| | @classmethod
|
| | def build_vocab_from_iterator(cls, iterator, min_frequency: int = 1) -> "ChessTokenizer":
|
| | from collections import Counter
|
| |
|
| | token_counts = Counter()
|
| | tokenizer = cls()
|
| |
|
| | for game in iterator:
|
| | tokens = tokenizer._tokenize(game)
|
| | token_counts.update(tokens)
|
| |
|
| |
|
| | tokens = [t for t, c in token_counts.items() if c >= min_frequency]
|
| | tokens = sorted(tokens)
|
| |
|
| | special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
|
| | vocab = {tok: i for i, tok in enumerate(special + tokens)}
|
| |
|
| | return cls(vocab=vocab)
|
| |
|
| | @classmethod
|
| | def build_vocab_from_dataset(
|
| | cls,
|
| | dataset_name: str = "dlouapre/lichess_2025-01_1M",
|
| | split: str = "train",
|
| | column: str = "text",
|
| | min_frequency: int = 500,
|
| | max_samples: Optional[int] = 100000,
|
| | ) -> "ChessTokenizer":
|
| | from datasets import load_dataset
|
| |
|
| | dataset = load_dataset(dataset_name, split=split)
|
| | if max_samples is not None:
|
| | dataset = dataset.select(range(min(max_samples, len(dataset))))
|
| |
|
| | def game_iterator():
|
| | for example in dataset:
|
| | yield example[column]
|
| |
|
| | return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)
|
| |
|
| | @property
|
| | def vocab_size(self) -> int:
|
| | return len(self._vocab)
|
| |
|
| | def get_vocab(self) -> Dict[str, int]:
|
| | return dict(self._vocab)
|
| |
|
| | def _tokenize(self, text: str) -> List[str]:
|
| | tokens: List[str] = []
|
| |
|
| | moves = text.strip().split()
|
| | for move in moves:
|
| |
|
| | if "O-O-O" in move:
|
| | tokens.append("[W]" if move.startswith("W") else "[B]")
|
| | tokens.append("[O-O-O]")
|
| | continue
|
| | if "O-O" in move:
|
| | tokens.append("[W]" if move.startswith("W") else "[B]")
|
| | tokens.append("[O-O]")
|
| | continue
|
| |
|
| | m = self.MOVE_RE.match(move)
|
| | if not m:
|
| | tokens.append(self.UNK_TOKEN)
|
| | continue
|
| |
|
| | tokens.append(f"[{m.group('side')}]")
|
| | tokens.append(f"[{m.group('piece')}]")
|
| | tokens.append(f"[{m.group('src')}]")
|
| | tokens.append(f"[{m.group('dst')}]")
|
| |
|
| | suffix = m.group("suffix")
|
| | if "x" in suffix:
|
| | tokens.append("[x]")
|
| | if "+" in suffix:
|
| | tokens.append("[+]")
|
| | if "*" in suffix:
|
| | tokens.append("[#]")
|
| | if "=" in suffix:
|
| | promo = suffix.split("=")[-1].upper()
|
| | tokens.append(f"[prom_{promo}]")
|
| |
|
| | return tokens
|
| |
|
| | def _convert_token_to_id(self, token: str) -> int:
|
| | return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
|
| |
|
| | def _convert_id_to_token(self, index: int) -> str:
|
| | return self._ids_to_tokens.get(index, self.UNK_TOKEN)
|
| |
|
| | def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
| | special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
|
| | return " ".join(t for t in tokens if t not in special)
|
| |
|
| | def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
|
| | if not os.path.isdir(save_directory):
|
| | os.makedirs(save_directory, exist_ok=True)
|
| | vocab_file = os.path.join(
|
| | save_directory,
|
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
|
| | )
|
| | with open(vocab_file, "w", encoding="utf-8") as f:
|
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2)
|
| | return (vocab_file,)
|
| |
|