File size: 5,754 Bytes
d62f3eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
"""
Custom Chess Tokenizer for the Chess Challenge.
"""

from __future__ import annotations
import re
import json
import os
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer

class ChessTokenizer(PreTrainedTokenizer):
    model_input_names = ["input_ids", "attention_mask"]
    vocab_files_names = {"vocab_file": "vocab.json"}
    
    # Special tokens
    PAD_TOKEN = "[PAD]"
    BOS_TOKEN = "[BOS]"
    EOS_TOKEN = "[EOS]"
    UNK_TOKEN = "[UNK]"
    
    def __init__(self, vocab_file: Optional[str] = None, vocab: Optional[Dict[str, int]] = None, **kwargs):
        self._pad_token = self.PAD_TOKEN
        self._bos_token = self.BOS_TOKEN
        self._eos_token = self.EOS_TOKEN
        self._unk_token = self.UNK_TOKEN

        kwargs.pop("pad_token", None)
        kwargs.pop("bos_token", None)
        kwargs.pop("eos_token", None)
        kwargs.pop("unk_token", None)
        
        if vocab is not None:
            self._vocab = vocab
        elif vocab_file is not None and os.path.exists(vocab_file):
            with open(vocab_file, "r", encoding="utf-8") as f:
                self._vocab = json.load(f)
        else:
            self._vocab = self._create_default_vocab()
        
        self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
        
        super().__init__(pad_token=self._pad_token, bos_token=self._bos_token, eos_token=self._eos_token, unk_token=self._unk_token, **kwargs)
    
    def _create_default_vocab(self) -> Dict[str, int]:
        special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
        return {token: idx for idx, token in enumerate(special_tokens)}
    
    @classmethod
    def build_vocab_from_iterator(cls, iterator, min_frequency: int = 1):
        from collections import Counter
        token_counts = Counter()
        
        for game in iterator:
            # 1. Nettoyage : on enlève les suffixes
            game = re.sub(r'\(.*?\)', '', game)
            moves = game.strip().split()
            
            for i, move in enumerate(moves):
                # 2. Logique Square-Aware : Cases (e2) ou Lettres (W)
                tokens = re.findall(r'[a-h][1-8]|.', move)
                token_counts.update(tokens)
                # 3. Ajout explicite de l'espace
                if i < len(moves) - 1:
                    token_counts.update([" "])
        
        tokens = sorted([t for t, c in token_counts.items() if c >= min_frequency])
        special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
        vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
        return cls(vocab=vocab)

    @classmethod
    def build_vocab_from_dataset(cls, dataset_name: str = "dlouapre/lichess_2025-01_1M", split: str = "train", column: str = "text", min_frequency: int = 1, max_samples: Optional[int] = 50000):
        from datasets import load_dataset
        dataset = load_dataset(dataset_name, split=split)
        if max_samples is not None:
            dataset = dataset.select(range(min(max_samples, len(dataset))))
        
        def game_iterator():
            for example in dataset:
                yield example[column]
        return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)
    
    @property
    def vocab_size(self) -> int:
        return len(self._vocab)
    
    def get_vocab(self) -> Dict[str, int]:
        return dict(self._vocab)

    def _tokenize(self, text: str) -> List[str]:
        # 1. Nettoyage
        text = re.sub(r'\(.*?\)', '', text)
        moves = text.strip().split()
        
        all_tokens = []
        for i, move in enumerate(moves):
            # 2. Regex
            tokens = re.findall(r'[a-h][1-8]|.', move)
            all_tokens.extend(tokens)
            # 3. Espace
            if i < len(moves) - 1:
                all_tokens.append(" ")
        return all_tokens
    
    def _convert_token_to_id(self, token: str) -> int:
        return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
    
    def _convert_id_to_token(self, index: int) -> str:
        return self._ids_to_tokens.get(index, self.UNK_TOKEN)
    
    def convert_tokens_to_string(self, tokens: List[str]) -> str:
        special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
        filtered_tokens = [t for t in tokens if t not in special]
        # On joint avec "" car l'espace " " est déjà un token dans la liste
        return "".join(filtered_tokens)
    
    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
        if not os.path.isdir(save_directory):
            os.makedirs(save_directory, exist_ok=True)
        vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json")
        with open(vocab_file, "w", encoding="utf-8") as f:
            json.dump(self._vocab, f, ensure_ascii=False, indent=2)
        return (vocab_file,)

# Fonction utilitaire inchangée pour compter les tokens
def count_vocab_from_dataset(dataset_name="dlouapre/lichess_2025-01_1M", split="train", column="text", max_samples=10000):
    from collections import Counter
    from datasets import load_dataset
    dataset = load_dataset(dataset_name, split=split)
    if max_samples: dataset = dataset.select(range(min(max_samples, len(dataset))))
    token_counts = Counter()
    for example in dataset:
        text = re.sub(r'\(.*?\)', '', example[column])
        moves = text.strip().split()
        for move in moves:
            tokens = re.findall(r'[a-h][1-8]|.', move)
            token_counts.update(tokens)
            token_counts.update([" "])
    return dict(token_counts)