chess_MaximeMuh / tokenizer.py
MaximeMuhlethaler's picture
Chess Challenge submission by MaximeMuhlethaler
f98ff7a verified
"""
Custom Chess Tokenizer - Final Fix
"""
from __future__ import annotations
import json
import os
import shutil
import re
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
MOVE_RE = re.compile(r"([a-h][1-8])([a-h][1-8])")
PROMO_RE = re.compile(r"=([NBRQ])")
def normalize_move(tok: str) -> str:
if tok.startswith("["): return tok
m = MOVE_RE.search(tok)
if not m: return tok
fr, to = m.group(1), m.group(2)
promo = ""
pm = PROMO_RE.search(tok)
if pm: promo = "=" + pm.group(1)
prefix = tok[:2] if len(tok) >= 2 else "WP"
return f"{prefix}{fr}{to}{promo}"
class ChessTokenizer(PreTrainedTokenizer):
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
def __init__(self, vocab_file=None, vocab=None, **kwargs):
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
for t in ["pad_token", "bos_token", "eos_token", "unk_token"]: kwargs.pop(t, None)
if vocab is None:
if vocab_file is None:
vocab_file = os.path.join(os.path.dirname(__file__), "vocab.json")
self.vocab_file = vocab_file
if os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f: self._vocab = json.load(f)
else: self._vocab = self._create_default_vocab()
else:
self._vocab = vocab
self.vocab_file = vocab_file
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(pad_token=self.PAD_TOKEN, bos_token=self.BOS_TOKEN, eos_token=self.EOS_TOKEN, unk_token=self.UNK_TOKEN, **kwargs)
def save_pretrained(self, save_directory: str, **kwargs):
super().save_pretrained(save_directory, **kwargs)
src_path = os.path.abspath(__file__)
dst_path = os.path.join(save_directory, "tokenizer.py")
if src_path != dst_path: shutil.copy(src_path, dst_path)
config_path = os.path.join(save_directory, "tokenizer_config.json")
if os.path.exists(config_path):
with open(config_path, "r") as f: cfg = json.load(f)
cfg["auto_map"] = {"AutoTokenizer": "tokenizer.ChessTokenizer"}
with open(config_path, "w") as f: json.dump(cfg, f, indent=2)
def _create_default_vocab(self):
return {t: i for i, t in enumerate([self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN])}
@classmethod
def build_vocab_from_dataset(cls, dataset_name, split="train", column="text", min_frequency=2, max_vocab_size=1700, max_samples=100000):
from datasets import load_dataset
from collections import Counter
ds = load_dataset(dataset_name, split=split, streaming=True)
ds = ds.take(max_samples)
counter = Counter()
for ex in ds:
# On normalise
moves = [normalize_move(t) for t in ex[column].split()]
counter.update(moves)
special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
most_common = counter.most_common(max_vocab_size - len(special))
vocab = {t: i for i, t in enumerate(special + [t for t, c in most_common])}
return cls(vocab=vocab)
@property
def vocab_size(self): return len(self._vocab)
def get_vocab(self): return dict(self._vocab)
def _tokenize(self, text): return [normalize_move(t) for t in text.strip().split()]
def _convert_token_to_id(self, token): return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN))
def _convert_id_to_token(self, index): return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens): return " ".join(t for t in tokens if t not in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN])
def save_vocabulary(self, save_directory, filename_prefix=None):
if not os.path.isdir(save_directory): os.makedirs(save_directory, exist_ok=True)
path = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json")
with open(path, "w", encoding="utf-8") as f: json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (path,)