|
|
""" |
|
|
Custom Chess Tokenizer for the Chess Challenge. |
|
|
|
|
|
This tokenizer treats each move as a single token using the extended UCI notation |
|
|
from the Lichess dataset (e.g., WPe2e4, BNg8f6). |
|
|
|
|
|
The dataset format uses: |
|
|
- W/B prefix for White/Black |
|
|
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King |
|
|
- Source and destination squares (e.g., e2e4) |
|
|
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import json |
|
|
import os |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Optional |
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
""" |
|
|
Custom Chess Tokenizer - Normalized Version |
|
|
""" |
|
|
import re |
|
|
|
|
|
|
|
|
MOVE_RE = re.compile(r"([a-h][1-8])([a-h][1-8])") |
|
|
PROMO_RE = re.compile(r"=([NBRQ])") |
|
|
|
|
|
def normalize_move(tok: str) -> str: |
|
|
"""Transforme 'WPe2e4(x)' en 'WPe2e4' pour réduire le vocabulaire.""" |
|
|
|
|
|
m = MOVE_RE.search(tok) |
|
|
if not m: |
|
|
return tok |
|
|
|
|
|
fr, to = m.group(1), m.group(2) |
|
|
|
|
|
|
|
|
promo = "" |
|
|
pm = PROMO_RE.search(tok) |
|
|
if pm: |
|
|
promo = "=" + pm.group(1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prefix = tok[:2] if len(tok) >= 2 else "WP" |
|
|
return f"{prefix}{fr}{to}{promo}" |
|
|
|
|
|
class ChessTokenizer(PreTrainedTokenizer): |
|
|
model_input_names = ["input_ids", "attention_mask"] |
|
|
|
|
|
PAD_TOKEN = "[PAD]" |
|
|
BOS_TOKEN = "[BOS]" |
|
|
EOS_TOKEN = "[EOS]" |
|
|
UNK_TOKEN = "[UNK]" |
|
|
|
|
|
def __init__(self, vocab_file=None, vocab=None, **kwargs): |
|
|
self._pad_token = self.PAD_TOKEN |
|
|
self._bos_token = self.BOS_TOKEN |
|
|
self._eos_token = self.EOS_TOKEN |
|
|
self._unk_token = self.UNK_TOKEN |
|
|
|
|
|
|
|
|
for t in ["pad_token", "bos_token", "eos_token", "unk_token"]: |
|
|
kwargs.pop(t, None) |
|
|
|
|
|
if vocab: |
|
|
self._vocab = vocab |
|
|
elif vocab_file: |
|
|
with open(vocab_file, "r", encoding="utf-8") as f: |
|
|
self._vocab = json.load(f) |
|
|
else: |
|
|
self._vocab = {t: i for i, t in enumerate([self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN])} |
|
|
|
|
|
self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
|
|
super().__init__(pad_token=self.PAD_TOKEN, bos_token=self.BOS_TOKEN, eos_token=self.EOS_TOKEN, unk_token=self.UNK_TOKEN, **kwargs) |
|
|
|
|
|
@property |
|
|
def vocab_size(self): |
|
|
return len(self._vocab) |
|
|
|
|
|
def get_vocab(self): |
|
|
return dict(self._vocab) |
|
|
|
|
|
def _tokenize(self, text): |
|
|
|
|
|
return [normalize_move(t) for t in text.strip().split()] |
|
|
|
|
|
def _convert_token_to_id(self, token): |
|
|
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN)) |
|
|
|
|
|
def _convert_id_to_token(self, index): |
|
|
return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
|
|
|
|
|
def convert_tokens_to_string(self, tokens): |
|
|
return " ".join(t for t in tokens if t not in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]) |
|
|
|
|
|
def save_vocabulary(self, save_directory, filename_prefix=None): |
|
|
if not os.path.exists(save_directory): |
|
|
os.makedirs(save_directory) |
|
|
path = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json") |
|
|
with open(path, "w") as f: |
|
|
json.dump(self._vocab, f, indent=2) |
|
|
return (path,) |
|
|
|
|
|
@classmethod |
|
|
def build_vocab_from_dataset(cls, dataset_name, min_frequency=2, max_vocab_size=1200, **kwargs): |
|
|
"""Construit un vocabulaire compact et dense.""" |
|
|
from datasets import load_dataset |
|
|
from collections import Counter |
|
|
|
|
|
|
|
|
ds = load_dataset(dataset_name, split="train", streaming=True) |
|
|
ds = ds.take(50000) |
|
|
|
|
|
counter = Counter() |
|
|
for ex in ds: |
|
|
|
|
|
moves = [normalize_move(t) for t in ex["text"].split()] |
|
|
counter.update(moves) |
|
|
|
|
|
|
|
|
special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] |
|
|
most_common = counter.most_common(max_vocab_size - len(special)) |
|
|
|
|
|
vocab = {t: i for i, t in enumerate(special + [t for t, c in most_common])} |
|
|
return cls(vocab=vocab) |