chess_swdo_subTok / tokenizer.py
swdo's picture
Chess Challenge submission by swdo
86d1117 verified
"""
Decomposed Chess Tokenizer v2 for the Chess Challenge.
This tokenizer decomposes moves into structural components:
- Color (W/B)
- Piece (P/N/B/R/Q/K)
- From square (a1-h8)
- To square (a1-h8)
- Modifiers (capture, check, checkmate, promotion, castling)
This allows the model to learn chess structure and generalize better
while using a much smaller vocabulary (~90 tokens vs ~1200+).
"""
from __future__ import annotations
import json
import os
import re
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from transformers import PreTrainedTokenizer
class ChessTokenizer(PreTrainedTokenizer):
"""
Decomposed chess move tokenizer.
Breaks moves into structural components for better learning.
Example:
>>> tokenizer = ChessTokenizer()
>>> tokens = tokenizer.tokenize("WPe2e4 BPe7e5")
>>> print(tokens)
['W', 'P', 'e2', 'e4', 'B', 'P', 'e7', 'e5']
>>> tokenizer.encode("WNg1f3(+)")
[1, 5, 8, 39, 29, 12, 2] # [BOS, W, N, g1, f3, +, EOS]
"""
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
# Special tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
SEP_TOKEN = "[SEP]" # Optional: separate moves
# Chess components
# Use [W] and [B] for colors to avoid collision with piece 'B' (Bishop)
COLORS = ["[W]", "[B]"]
PIECES = ["P", "N", "B", "R", "Q", "K"]
FILES = ["a", "b", "c", "d", "e", "f", "g", "h"]
RANKS = ["1", "2", "3", "4", "5", "6", "7", "8"]
# Generate all 64 squares
SQUARES = [f + r for f in FILES for r in ["1", "2", "3", "4", "5", "6", "7", "8"]]
# Modifiers
MODIFIERS = [
"x", # Capture
"+", # Check
"#", # Checkmate (alternative to +*)
"+*", # Checkmate (dataset format)
"=Q", # Promotion to Queen
"=R", # Promotion to Rook
"=B", # Promotion to Bishop
"=N", # Promotion to Knight
"O-O", # Kingside castling (alternative)
"O-O-O", # Queenside castling (alternative)
"o", # Kingside castling (dataset format)
"O", # Queenside castling (dataset format)
]
# Regex pattern to parse extended UCI moves
# Format: [W|B][Piece][from_sq][to_sq][promotion]?[suffixes]?
MOVE_PATTERN = re.compile(
r'^([WB])' # Color
r'([PNBRQK])' # Piece
r'([a-h][1-8])' # From square
r'([a-h][1-8])' # To square
r'(=[QRBN])?' # Promotion (optional)
r'(\([xoO+*]+\))?$' # Suffixes in parentheses (optional)
)
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
add_move_separator: bool = False,
**kwargs,
):
"""
Initialize the decomposed chess tokenizer.
Args:
vocab_file: Path to vocabulary JSON file.
vocab: Pre-built vocabulary dictionary.
add_move_separator: Whether to add [SEP] between moves.
"""
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
self.add_move_separator = add_move_separator
# Remove duplicates from kwargs
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
# Load or create vocabulary
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
self._vocab = self._create_vocab()
# Reverse mapping
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _create_vocab(self) -> Dict[str, int]:
"""Create the fixed vocabulary from chess components."""
tokens = []
# Special tokens first
tokens.extend([self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN])
if self.add_move_separator:
tokens.append(self.SEP_TOKEN)
# Colors
tokens.extend(self.COLORS)
# Pieces
tokens.extend(self.PIECES)
# Squares (64)
tokens.extend(self.SQUARES)
# Modifiers
tokens.extend(self.MODIFIERS)
return {token: idx for idx, token in enumerate(tokens)}
@property
def vocab_size(self) -> int:
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
return dict(self._vocab)
def _parse_move(self, move: str) -> List[str]:
"""
Parse a single move into component tokens.
Args:
move: Move in extended UCI format (e.g., "WPe2e4", "BNg8f6(x+)")
Returns:
List of component tokens.
"""
match = self.MOVE_PATTERN.match(move)
if not match:
# Fallback: return as unknown
return [self.UNK_TOKEN]
tokens = []
# Color - map 'W' -> '[W]' and 'B' -> '[B]' to avoid collision with piece Bishop
color = match.group(1)
tokens.append(f"[{color}]")
# Piece
tokens.append(match.group(2))
# From square
tokens.append(match.group(3))
# To square
tokens.append(match.group(4))
# Promotion (optional)
if match.group(5):
tokens.append(match.group(5)) # e.g., "=Q"
# Parse suffixes (optional)
if match.group(6):
suffix = match.group(6) # e.g., "(x+)"
# Remove parentheses
suffix_content = suffix[1:-1]
# Parse individual modifiers
if "x" in suffix_content:
tokens.append("x")
if "+*" in suffix_content:
tokens.append("+*")
elif "+" in suffix_content:
tokens.append("+")
if suffix_content == "o":
tokens.append("o")
elif suffix_content == "O":
tokens.append("O")
return tokens
def _tokenize(self, text: str) -> List[str]:
"""
Tokenize a string of moves.
Args:
text: Space-separated moves in extended UCI format.
Returns:
List of component tokens.
"""
tokens = []
moves = text.strip().split()
for i, move in enumerate(moves):
move_tokens = self._parse_move(move)
tokens.extend(move_tokens)
# Add separator between moves (optional)
if self.add_move_separator and i < len(moves) - 1:
tokens.append(self.SEP_TOKEN)
return tokens
def _convert_token_to_id(self, token: str) -> int:
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
def _convert_id_to_token(self, index: int) -> str:
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Convert tokens back to move string.
Reconstructs moves from component tokens.
"""
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN, self.SEP_TOKEN}
result = []
current_move = []
for token in tokens:
if token in special:
if current_move:
result.append(self._reconstruct_move(current_move))
current_move = []
continue
current_move.append(token)
# Check if we have a complete move
if self._is_complete_move(current_move):
result.append(self._reconstruct_move(current_move))
current_move = []
# Handle remaining tokens
if current_move:
result.append(self._reconstruct_move(current_move))
return " ".join(result)
def _is_complete_move(self, tokens: List[str]) -> bool:
"""Check if tokens form a complete move."""
if len(tokens) < 4:
return False
# Basic move: Color + Piece + From + To
if (tokens[0] in self.COLORS and
tokens[1] in self.PIECES and
tokens[2] in self.SQUARES and
tokens[3] in self.SQUARES):
# Check if next token would start a new move
if len(tokens) == 4:
return True
# Check for modifiers
remaining = tokens[4:]
for t in remaining:
if t in self.COLORS:
return True # Next move starting
if t not in self.MODIFIERS and not t.startswith("="):
return True
return True
return False
def _reconstruct_move(self, tokens: List[str]) -> str:
"""Reconstruct a move string from component tokens."""
if not tokens:
return ""
# Basic structure: Color + Piece + From + To
if len(tokens) >= 4:
# Convert [W] -> W and [B] -> B for colors
color = tokens[0]
if color in self.COLORS:
color = color[1] # Extract 'W' from '[W]' or 'B' from '[B]'
move = color + "".join(tokens[1:4])
# Add modifiers
suffixes = []
for t in tokens[4:]:
if t.startswith("="):
move += t
elif t in ["x", "+", "+*", "o", "O"]:
suffixes.append(t)
if suffixes:
move += "(" + "".join(suffixes) + ")"
return move
return "".join(tokens)
def save_vocabulary(
self,
save_directory: str,
filename_prefix: Optional[str] = None,
) -> Tuple[str]:
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
# Also save config with auto_map for HuggingFace to find our custom tokenizer
# Format: (slow_tokenizer_class, fast_tokenizer_class) - we don't have a fast version
config = {
"tokenizer_class": "ChessTokenizer",
"auto_map": {
"AutoTokenizer": ["tokenizer.ChessTokenizer", None]
},
"add_move_separator": self.add_move_separator,
"vocab_size": self.vocab_size,
}
config_file = os.path.join(save_directory, "tokenizer_config.json")
with open(config_file, "w", encoding="utf-8") as f:
json.dump(config, f, indent=2)
return (vocab_file,)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
"""Load tokenizer from directory or hub."""
path = Path(pretrained_model_name_or_path)
if path.is_dir():
vocab_file = path / "vocab.json"
config_file = path / "tokenizer_config.json"
add_move_separator = False
if config_file.exists():
with open(config_file, "r") as f:
config = json.load(f)
add_move_separator = config.get("add_move_separator", False)
return cls(
vocab_file=str(vocab_file) if vocab_file.exists() else None,
add_move_separator=add_move_separator,
**kwargs,
)
# Fallback to HuggingFace hub
from huggingface_hub import hf_hub_download
vocab_file = hf_hub_download(
repo_id=pretrained_model_name_or_path,
filename="vocab.json",
)
return cls(vocab_file=vocab_file, **kwargs)