chess-tomin-v2bis / tokenizer.py
Tome1's picture
Update tokenizer.py
b09ebed verified
from __future__ import annotations
import json
import os
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
class ChessTokenizer(PreTrainedTokenizer):
model_input_names = ["input_ids", "attention_mask"]
# Special tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
# REQUIRED for .from_pretrained to work automatically
vocab_files_names = {
"vocab_file": "vocab.json",
}
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
# Clean kwargs to avoid conflicts
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
self.vocab_file = vocab_file
# Load vocab
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
self._vocab = self._create_default_vocab()
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(
pad_token=self.PAD_TOKEN,
bos_token=self.BOS_TOKEN,
eos_token=self.EOS_TOKEN,
unk_token=self.UNK_TOKEN,
**kwargs,
)
def _create_default_vocab(self) -> Dict[str, int]:
special = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
return {t: i for i, t in enumerate(special)}
@property
def vocab_size(self) -> int:
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
return dict(self._vocab)
def _tokenize(self, text: str) -> List[str]:
tokens = []
raw_moves = text.strip().split()
for move in raw_moves:
if len(move) >= 6:
# 1. Piece (WP)
tokens.append(move[:2])
# 2. From (e2)
tokens.append(move[2:4])
# 3. To (e4 or e4(x)) - grab the rest
tokens.append(move[4:])
else:
tokens.append(self.UNK_TOKEN)
return tokens
def _convert_token_to_id(self, token: str) -> int:
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN))
def _convert_id_to_token(self, index: int) -> str:
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
# Filter specials
filtered = [t for t in tokens if t not in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]]
# Join with space. Result: "WP e2 e4 BN g8 f6"
# The evaluator regex will handle this fine.
return " ".join(filtered)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json")
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)
@classmethod
def build_vocab_from_dataset(cls, dataset_name="dlouapre/lichess_2025-01_1M", split="train", max_samples=10000):
from datasets import load_dataset
dataset = load_dataset(dataset_name, split=split, streaming=True)
unique_tokens = set()
print("Building vocabulary...")
count = 0
for example in dataset:
moves = example["text"].split()
for move in moves:
if len(move) >= 6:
unique_tokens.add(move[:2]) # Piece
unique_tokens.add(move[2:4]) # From
unique_tokens.add(move[4:]) # To (includes suffix like (x))
count += 1
if count >= max_samples:
break
special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
# Sort tokens to ensure deterministic IDs
all_tokens = special + sorted(list(unique_tokens))
vocab = {token: idx for idx, token in enumerate(all_tokens)}
return cls(vocab=vocab)