File size: 4,712 Bytes
b83b66b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
"""
Custom Chess Tokenizer for the Chess Challenge.

This tokenizer treats each move as a single token using the extended UCI notation
from the Lichess dataset (e.g., WPe2e4, BNg8f6).

The dataset format uses:
- W/B prefix for White/Black
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
- Source and destination squares (e.g., e2e4)
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
"""

from __future__ import annotations

import json
import os
from pathlib import Path
from typing import Dict, List, Optional

from transformers import PreTrainedTokenizer
"""
Custom Chess Tokenizer - Normalized Version
"""
import re

# Regex pour extraire case départ, arrivée et promotion
MOVE_RE = re.compile(r"([a-h][1-8])([a-h][1-8])")
PROMO_RE = re.compile(r"=([NBRQ])")

def normalize_move(tok: str) -> str:
    """Transforme 'WPe2e4(x)' en 'WPe2e4' pour réduire le vocabulaire."""
    # 1. Garder les infos de base
    m = MOVE_RE.search(tok)
    if not m:
        return tok # Fallback (sera probablement UNK)
    
    fr, to = m.group(1), m.group(2)
    
    # 2. Gérer la promotion
    promo = ""
    pm = PROMO_RE.search(tok)
    if pm:
        promo = "=" + pm.group(1)
        
    # 3. Reconstruire le token standardisé
    # On garde le préfixe WP/BN (chars 0 et 1) pour garder l'info couleur/pièce
    # mais on supprime les suffixes (x), (+), etc.
    prefix = tok[:2] if len(tok) >= 2 else "WP"
    return f"{prefix}{fr}{to}{promo}"

class ChessTokenizer(PreTrainedTokenizer):
    model_input_names = ["input_ids", "attention_mask"]
    
    PAD_TOKEN = "[PAD]"
    BOS_TOKEN = "[BOS]"
    EOS_TOKEN = "[EOS]"
    UNK_TOKEN = "[UNK]"
    
    def __init__(self, vocab_file=None, vocab=None, **kwargs):
        self._pad_token = self.PAD_TOKEN
        self._bos_token = self.BOS_TOKEN
        self._eos_token = self.EOS_TOKEN
        self._unk_token = self.UNK_TOKEN
        
        # Nettoyage kwargs
        for t in ["pad_token", "bos_token", "eos_token", "unk_token"]:
            kwargs.pop(t, None)
            
        if vocab:
            self._vocab = vocab
        elif vocab_file:
            with open(vocab_file, "r", encoding="utf-8") as f:
                self._vocab = json.load(f)
        else:
            self._vocab = {t: i for i, t in enumerate([self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN])}
            
        self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
        super().__init__(pad_token=self.PAD_TOKEN, bos_token=self.BOS_TOKEN, eos_token=self.EOS_TOKEN, unk_token=self.UNK_TOKEN, **kwargs)

    @property
    def vocab_size(self):
        return len(self._vocab)

    def get_vocab(self):
        return dict(self._vocab)

    def _tokenize(self, text):
        # C'est ICI que la magie opère : on normalise à la volée
        return [normalize_move(t) for t in text.strip().split()]

    def _convert_token_to_id(self, token):
        return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN))

    def _convert_id_to_token(self, index):
        return self._ids_to_tokens.get(index, self.UNK_TOKEN)

    def convert_tokens_to_string(self, tokens):
        return " ".join(t for t in tokens if t not in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN])
    
    def save_vocabulary(self, save_directory, filename_prefix=None):
        if not os.path.exists(save_directory):
            os.makedirs(save_directory)
        path = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json")
        with open(path, "w") as f:
            json.dump(self._vocab, f, indent=2)
        return (path,)

    @classmethod
    def build_vocab_from_dataset(cls, dataset_name, min_frequency=2, max_vocab_size=1200, **kwargs):
        """Construit un vocabulaire compact et dense."""
        from datasets import load_dataset
        from collections import Counter
        
        # On charge en streaming pour aller vite
        ds = load_dataset(dataset_name, split="train", streaming=True)
        ds = ds.take(50000) # 50k parties suffisent pour voir tous les coups possibles
        
        counter = Counter()
        for ex in ds:
            # On normalise avant de compter !
            moves = [normalize_move(t) for t in ex["text"].split()]
            counter.update(moves)
            
        # On garde les tokens spéciaux + les N plus fréquents
        special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
        most_common = counter.most_common(max_vocab_size - len(special))
        
        vocab = {t: i for i, t in enumerate(special + [t for t, c in most_common])}
        return cls(vocab=vocab)