chess-Rayan-FATNASSI / tokenizer.py
Ryn11H's picture
Chess Challenge submission by Ryn11H
7ab0ed4 verified
"""
Custom Chess Tokenizer for the Chess Challenge.
Move-level tokenizer:
- Each move string is ONE token, e.g. "WPe2e4", "BNg8f6", "WBb5c6(x)".
Key improvement vs baseline:
- Adds `max_vocab_size` to cap vocabulary size (very important for <1M params).
- Keeps the TOP-K most frequent moves (after min_frequency filter).
- Registers for AutoTokenizer so server-side loading works.
"""
from __future__ import annotations
import json
import os
from typing import Dict, List, Optional, Tuple
from transformers import PreTrainedTokenizer
class ChessTokenizer(PreTrainedTokenizer):
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
# Special tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
# Set special tokens
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
# Remove duplicates in kwargs (important when loading)
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
# Load vocab
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
self._vocab = self._create_default_vocab()
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _create_default_vocab(self) -> Dict[str, int]:
special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
return {tok: i for i, tok in enumerate(special_tokens)}
@classmethod
def build_vocab_from_iterator(
cls,
iterator,
min_frequency: int = 1,
max_vocab_size: int = 5000,
) -> "ChessTokenizer":
"""
Build vocabulary from an iterator of game strings.
Strategy:
- Count move frequency
- Filter by min_frequency
- Take TOP-K most frequent moves (K = max_vocab_size - #special_tokens)
This avoids vocab explosion (which breaks the <1M parameter constraint).
"""
from collections import Counter
token_counts = Counter()
for game in iterator:
moves = game.strip().split()
token_counts.update(moves)
# Filter by frequency
items = [(tok, c) for tok, c in token_counts.items() if c >= min_frequency]
# Sort by frequency desc then token for reproducibility
items.sort(key=lambda x: (-x[1], x[0]))
special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
budget = max_vocab_size - len(special_tokens)
if budget <= 0:
raise ValueError("max_vocab_size must be > number of special tokens")
top_tokens = [tok for tok, _ in items[:budget]]
vocab = {tok: i for i, tok in enumerate(special_tokens + top_tokens)}
return cls(vocab=vocab)
@classmethod
def build_vocab_from_dataset(
cls,
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
min_frequency: int = 500,
max_samples: Optional[int] = 100000,
max_vocab_size: int = 5000,
) -> "ChessTokenizer":
"""
Build vocabulary from a HF dataset.
IMPORTANT:
- `max_vocab_size` caps final vocab size (including special tokens).
- `min_frequency` filters extremely rare moves before top-k selection.
"""
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
def game_iterator():
for example in dataset:
yield example[column]
return cls.build_vocab_from_iterator(
game_iterator(),
min_frequency=min_frequency,
max_vocab_size=max_vocab_size,
)
@property
def vocab_size(self) -> int:
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
return dict(self._vocab)
def _tokenize(self, text: str) -> List[str]:
return text.strip().split()
def _convert_token_to_id(self, token: str) -> int:
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
def _convert_id_to_token(self, index: int) -> str:
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
return " ".join(t for t in tokens if t not in special)
def save_vocabulary(
self,
save_directory: str,
filename_prefix: Optional[str] = None,
) -> Tuple[str]:
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)
# IMPORTANT for server-side loading on HF
ChessTokenizer.register_for_auto_class("AutoTokenizer")