|
|
""" |
|
|
Decomposed Chess Tokenizer v2 for the Chess Challenge. |
|
|
|
|
|
This tokenizer decomposes moves into structural components: |
|
|
- Color (W/B) |
|
|
- Piece (P/N/B/R/Q/K) |
|
|
- From square (a1-h8) |
|
|
- To square (a1-h8) |
|
|
- Modifiers (capture, check, checkmate, promotion, castling) |
|
|
|
|
|
This allows the model to learn chess structure and generalize better |
|
|
while using a much smaller vocabulary (~90 tokens vs ~1200+). |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import json |
|
|
import os |
|
|
import re |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Optional, Tuple |
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
|
|
|
|
|
|
class ChessTokenizer(PreTrainedTokenizer): |
|
|
""" |
|
|
Decomposed chess move tokenizer. |
|
|
|
|
|
Breaks moves into structural components for better learning. |
|
|
|
|
|
Example: |
|
|
>>> tokenizer = ChessTokenizer() |
|
|
>>> tokens = tokenizer.tokenize("WPe2e4 BPe7e5") |
|
|
>>> print(tokens) |
|
|
['W', 'P', 'e2', 'e4', 'B', 'P', 'e7', 'e5'] |
|
|
|
|
|
>>> tokenizer.encode("WNg1f3(+)") |
|
|
[1, 5, 8, 39, 29, 12, 2] # [BOS, W, N, g1, f3, +, EOS] |
|
|
""" |
|
|
|
|
|
model_input_names = ["input_ids", "attention_mask"] |
|
|
vocab_files_names = {"vocab_file": "vocab.json"} |
|
|
|
|
|
|
|
|
PAD_TOKEN = "[PAD]" |
|
|
BOS_TOKEN = "[BOS]" |
|
|
EOS_TOKEN = "[EOS]" |
|
|
UNK_TOKEN = "[UNK]" |
|
|
SEP_TOKEN = "[SEP]" |
|
|
|
|
|
|
|
|
|
|
|
COLORS = ["[W]", "[B]"] |
|
|
PIECES = ["P", "N", "B", "R", "Q", "K"] |
|
|
FILES = ["a", "b", "c", "d", "e", "f", "g", "h"] |
|
|
RANKS = ["1", "2", "3", "4", "5", "6", "7", "8"] |
|
|
|
|
|
SQUARES = [f + r for f in FILES for r in ["1", "2", "3", "4", "5", "6", "7", "8"]] |
|
|
|
|
|
|
|
|
MODIFIERS = [ |
|
|
"x", |
|
|
"+", |
|
|
"#", |
|
|
"+*", |
|
|
"=Q", |
|
|
"=R", |
|
|
"=B", |
|
|
"=N", |
|
|
"O-O", |
|
|
"O-O-O", |
|
|
"o", |
|
|
"O", |
|
|
] |
|
|
|
|
|
|
|
|
|
|
|
MOVE_PATTERN = re.compile( |
|
|
r'^([WB])' |
|
|
r'([PNBRQK])' |
|
|
r'([a-h][1-8])' |
|
|
r'([a-h][1-8])' |
|
|
r'(=[QRBN])?' |
|
|
r'(\([xoO+*]+\))?$' |
|
|
) |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_file: Optional[str] = None, |
|
|
vocab: Optional[Dict[str, int]] = None, |
|
|
add_move_separator: bool = False, |
|
|
**kwargs, |
|
|
): |
|
|
""" |
|
|
Initialize the decomposed chess tokenizer. |
|
|
|
|
|
Args: |
|
|
vocab_file: Path to vocabulary JSON file. |
|
|
vocab: Pre-built vocabulary dictionary. |
|
|
add_move_separator: Whether to add [SEP] between moves. |
|
|
""" |
|
|
self._pad_token = self.PAD_TOKEN |
|
|
self._bos_token = self.BOS_TOKEN |
|
|
self._eos_token = self.EOS_TOKEN |
|
|
self._unk_token = self.UNK_TOKEN |
|
|
self.add_move_separator = add_move_separator |
|
|
|
|
|
|
|
|
kwargs.pop("pad_token", None) |
|
|
kwargs.pop("bos_token", None) |
|
|
kwargs.pop("eos_token", None) |
|
|
kwargs.pop("unk_token", None) |
|
|
|
|
|
|
|
|
if vocab is not None: |
|
|
self._vocab = vocab |
|
|
elif vocab_file is not None and os.path.exists(vocab_file): |
|
|
with open(vocab_file, "r", encoding="utf-8") as f: |
|
|
self._vocab = json.load(f) |
|
|
else: |
|
|
self._vocab = self._create_vocab() |
|
|
|
|
|
|
|
|
self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
|
|
|
|
|
super().__init__( |
|
|
pad_token=self._pad_token, |
|
|
bos_token=self._bos_token, |
|
|
eos_token=self._eos_token, |
|
|
unk_token=self._unk_token, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
def _create_vocab(self) -> Dict[str, int]: |
|
|
"""Create the fixed vocabulary from chess components.""" |
|
|
tokens = [] |
|
|
|
|
|
|
|
|
tokens.extend([self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]) |
|
|
if self.add_move_separator: |
|
|
tokens.append(self.SEP_TOKEN) |
|
|
|
|
|
|
|
|
tokens.extend(self.COLORS) |
|
|
|
|
|
|
|
|
tokens.extend(self.PIECES) |
|
|
|
|
|
|
|
|
tokens.extend(self.SQUARES) |
|
|
|
|
|
|
|
|
tokens.extend(self.MODIFIERS) |
|
|
|
|
|
return {token: idx for idx, token in enumerate(tokens)} |
|
|
|
|
|
@property |
|
|
def vocab_size(self) -> int: |
|
|
return len(self._vocab) |
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]: |
|
|
return dict(self._vocab) |
|
|
|
|
|
def _parse_move(self, move: str) -> List[str]: |
|
|
""" |
|
|
Parse a single move into component tokens. |
|
|
|
|
|
Args: |
|
|
move: Move in extended UCI format (e.g., "WPe2e4", "BNg8f6(x+)") |
|
|
|
|
|
Returns: |
|
|
List of component tokens. |
|
|
""" |
|
|
match = self.MOVE_PATTERN.match(move) |
|
|
|
|
|
if not match: |
|
|
|
|
|
return [self.UNK_TOKEN] |
|
|
|
|
|
tokens = [] |
|
|
|
|
|
|
|
|
color = match.group(1) |
|
|
tokens.append(f"[{color}]") |
|
|
|
|
|
|
|
|
tokens.append(match.group(2)) |
|
|
|
|
|
|
|
|
tokens.append(match.group(3)) |
|
|
|
|
|
|
|
|
tokens.append(match.group(4)) |
|
|
|
|
|
|
|
|
if match.group(5): |
|
|
tokens.append(match.group(5)) |
|
|
|
|
|
|
|
|
if match.group(6): |
|
|
suffix = match.group(6) |
|
|
|
|
|
suffix_content = suffix[1:-1] |
|
|
|
|
|
|
|
|
if "x" in suffix_content: |
|
|
tokens.append("x") |
|
|
if "+*" in suffix_content: |
|
|
tokens.append("+*") |
|
|
elif "+" in suffix_content: |
|
|
tokens.append("+") |
|
|
if suffix_content == "o": |
|
|
tokens.append("o") |
|
|
elif suffix_content == "O": |
|
|
tokens.append("O") |
|
|
|
|
|
return tokens |
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]: |
|
|
""" |
|
|
Tokenize a string of moves. |
|
|
|
|
|
Args: |
|
|
text: Space-separated moves in extended UCI format. |
|
|
|
|
|
Returns: |
|
|
List of component tokens. |
|
|
""" |
|
|
tokens = [] |
|
|
moves = text.strip().split() |
|
|
|
|
|
for i, move in enumerate(moves): |
|
|
move_tokens = self._parse_move(move) |
|
|
tokens.extend(move_tokens) |
|
|
|
|
|
|
|
|
if self.add_move_separator and i < len(moves) - 1: |
|
|
tokens.append(self.SEP_TOKEN) |
|
|
|
|
|
return tokens |
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int: |
|
|
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str: |
|
|
return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str: |
|
|
""" |
|
|
Convert tokens back to move string. |
|
|
|
|
|
Reconstructs moves from component tokens. |
|
|
""" |
|
|
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN, self.SEP_TOKEN} |
|
|
|
|
|
result = [] |
|
|
current_move = [] |
|
|
|
|
|
for token in tokens: |
|
|
if token in special: |
|
|
if current_move: |
|
|
result.append(self._reconstruct_move(current_move)) |
|
|
current_move = [] |
|
|
continue |
|
|
|
|
|
current_move.append(token) |
|
|
|
|
|
|
|
|
if self._is_complete_move(current_move): |
|
|
result.append(self._reconstruct_move(current_move)) |
|
|
current_move = [] |
|
|
|
|
|
|
|
|
if current_move: |
|
|
result.append(self._reconstruct_move(current_move)) |
|
|
|
|
|
return " ".join(result) |
|
|
|
|
|
def _is_complete_move(self, tokens: List[str]) -> bool: |
|
|
"""Check if tokens form a complete move.""" |
|
|
if len(tokens) < 4: |
|
|
return False |
|
|
|
|
|
|
|
|
if (tokens[0] in self.COLORS and |
|
|
tokens[1] in self.PIECES and |
|
|
tokens[2] in self.SQUARES and |
|
|
tokens[3] in self.SQUARES): |
|
|
|
|
|
|
|
|
if len(tokens) == 4: |
|
|
return True |
|
|
|
|
|
|
|
|
remaining = tokens[4:] |
|
|
for t in remaining: |
|
|
if t in self.COLORS: |
|
|
return True |
|
|
if t not in self.MODIFIERS and not t.startswith("="): |
|
|
return True |
|
|
|
|
|
return True |
|
|
|
|
|
return False |
|
|
|
|
|
def _reconstruct_move(self, tokens: List[str]) -> str: |
|
|
"""Reconstruct a move string from component tokens.""" |
|
|
if not tokens: |
|
|
return "" |
|
|
|
|
|
|
|
|
if len(tokens) >= 4: |
|
|
|
|
|
color = tokens[0] |
|
|
if color in self.COLORS: |
|
|
color = color[1] |
|
|
|
|
|
move = color + "".join(tokens[1:4]) |
|
|
|
|
|
|
|
|
suffixes = [] |
|
|
for t in tokens[4:]: |
|
|
if t.startswith("="): |
|
|
move += t |
|
|
elif t in ["x", "+", "+*", "o", "O"]: |
|
|
suffixes.append(t) |
|
|
|
|
|
if suffixes: |
|
|
move += "(" + "".join(suffixes) + ")" |
|
|
|
|
|
return move |
|
|
|
|
|
return "".join(tokens) |
|
|
|
|
|
def save_vocabulary( |
|
|
self, |
|
|
save_directory: str, |
|
|
filename_prefix: Optional[str] = None, |
|
|
) -> Tuple[str]: |
|
|
if not os.path.isdir(save_directory): |
|
|
os.makedirs(save_directory, exist_ok=True) |
|
|
|
|
|
vocab_file = os.path.join( |
|
|
save_directory, |
|
|
(filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
|
|
) |
|
|
|
|
|
with open(vocab_file, "w", encoding="utf-8") as f: |
|
|
json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
|
|
|
|
|
|
config = { |
|
|
"tokenizer_class": "ChessTokenizer", |
|
|
"auto_map": { |
|
|
"AutoTokenizer": ["tokenizer.ChessTokenizer", None] |
|
|
}, |
|
|
"add_move_separator": self.add_move_separator, |
|
|
"vocab_size": self.vocab_size, |
|
|
} |
|
|
config_file = os.path.join(save_directory, "tokenizer_config.json") |
|
|
with open(config_file, "w", encoding="utf-8") as f: |
|
|
json.dump(config, f, indent=2) |
|
|
|
|
|
return (vocab_file,) |
|
|
|
|
|
@classmethod |
|
|
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): |
|
|
"""Load tokenizer from directory or hub.""" |
|
|
path = Path(pretrained_model_name_or_path) |
|
|
|
|
|
if path.is_dir(): |
|
|
vocab_file = path / "vocab.json" |
|
|
config_file = path / "tokenizer_config.json" |
|
|
|
|
|
add_move_separator = False |
|
|
if config_file.exists(): |
|
|
with open(config_file, "r") as f: |
|
|
config = json.load(f) |
|
|
add_move_separator = config.get("add_move_separator", False) |
|
|
|
|
|
return cls( |
|
|
vocab_file=str(vocab_file) if vocab_file.exists() else None, |
|
|
add_move_separator=add_move_separator, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
|
|
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
vocab_file = hf_hub_download( |
|
|
repo_id=pretrained_model_name_or_path, |
|
|
filename="vocab.json", |
|
|
) |
|
|
|
|
|
return cls(vocab_file=vocab_file, **kwargs) |
|
|
|
|
|
|