File size: 6,659 Bytes
d005b36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
"""

Custom Chess Tokenizer for the Chess Challenge.



This tokenizer treats each move as a single token using the extended UCI notation

from the Lichess dataset (e.g., WPe2e4, BNg8f6).



We normalize moves by stripping special suffixes:

- (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling



Example normalization:

  WBb5c6(x)  -> WBb5c6

  BPd7c6(x)  -> BPd7c6

  ... (x)(+) -> ...   (both removed)

"""

from __future__ import annotations

import json
import os
from typing import Dict, List, Optional

from transformers import PreTrainedTokenizer


class ChessTokenizer(PreTrainedTokenizer):
    model_input_names = ["input_ids", "attention_mask"]
    vocab_files_names = {"vocab_file": "vocab.json"}

    # Special tokens
    PAD_TOKEN = "[PAD]"
    BOS_TOKEN = "[BOS]"
    EOS_TOKEN = "[EOS]"
    UNK_TOKEN = "[UNK]"

    # Suffixes to strip from move tokens
    _SPECIAL_SUFFIXES = ("(x)", "(+*)", "(+)", "(o)", "(O)")

    def __init__(

        self,

        vocab_file: Optional[str] = None,

        vocab: Optional[Dict[str, int]] = None,

        **kwargs,

    ):
        self._pad_token = self.PAD_TOKEN
        self._bos_token = self.BOS_TOKEN
        self._eos_token = self.EOS_TOKEN
        self._unk_token = self.UNK_TOKEN

        # Avoid duplicates passed through kwargs
        kwargs.pop("pad_token", None)
        kwargs.pop("bos_token", None)
        kwargs.pop("eos_token", None)
        kwargs.pop("unk_token", None)

        # Load or create vocabulary
        if vocab is not None:
            self._vocab = vocab
        elif vocab_file is not None and os.path.exists(vocab_file):
            with open(vocab_file, "r", encoding="utf-8") as f:
                self._vocab = json.load(f)
        else:
            self._vocab = self._create_default_vocab()

        self._ids_to_tokens = {v: k for k, v in self._vocab.items()}

        super().__init__(
            pad_token=self._pad_token,
            bos_token=self._bos_token,
            eos_token=self._eos_token,
            unk_token=self._unk_token,
            **kwargs,
        )

    def _create_default_vocab(self) -> Dict[str, int]:
        special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
        return {token: idx for idx, token in enumerate(special_tokens)}

    @classmethod
    def _normalize_move(cls, move: str) -> str:
        """

        Strip known special suffixes from the end of a move token.

        Handles stacked suffixes like "...(x)(+)" by stripping repeatedly.

        """
        move = move.strip()
        # Repeatedly remove known suffixes if they appear at the end
        changed = True
        while changed:
            changed = False
            for suf in cls._SPECIAL_SUFFIXES:
                if move.endswith(suf):
                    move = move[: -len(suf)]
                    changed = True
        return move

    @classmethod
    def build_vocab_from_iterator(

        cls,

        iterator,

        min_frequency: int = 1,

    ) -> "ChessTokenizer":
        from collections import Counter

        token_counts = Counter()

        for game in iterator:
            raw_moves = game.strip().split()
            moves = [cls._normalize_move(m) for m in raw_moves if m.strip()]
            token_counts.update(moves)

        tokens = [token for token, count in token_counts.items() if count >= min_frequency]
        tokens = sorted(tokens)

        special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
        vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
        return cls(vocab=vocab)

    @classmethod
    def build_vocab_from_dataset(

        cls,

        dataset_name: str = "dlouapre/lichess_2025-01_1M",

        split: str = "train",

        column: str = "text",

        min_frequency: int = 500,

        max_samples: Optional[int] = 100000,

    ) -> "ChessTokenizer":
        from datasets import load_dataset

        dataset = load_dataset(dataset_name, split=split)

        if max_samples is not None:
            dataset = dataset.select(range(min(max_samples, len(dataset))))

        def game_iterator():
            for example in dataset:
                yield example[column]

        return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)

    @property
    def vocab_size(self) -> int:
        return len(self._vocab)

    def get_vocab(self) -> Dict[str, int]:
        return dict(self._vocab)

    def _tokenize(self, text: str) -> List[str]:
        raw_moves = text.strip().split()
        return [self._normalize_move(m) for m in raw_moves if m.strip()]

    def _convert_token_to_id(self, token: str) -> int:
        # Extra safety: normalize here too, in case someone passes raw "(x)" tokens.
        token = self._normalize_move(token)
        return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))

    def _convert_id_to_token(self, index: int) -> str:
        return self._ids_to_tokens.get(index, self.UNK_TOKEN)

    def convert_tokens_to_string(self, tokens: List[str]) -> str:
        special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
        return " ".join(t for t in tokens if t not in special)

    def save_vocabulary(

        self,

        save_directory: str,

        filename_prefix: Optional[str] = None,

    ) -> tuple:
        if not os.path.isdir(save_directory):
            os.makedirs(save_directory, exist_ok=True)

        vocab_file = os.path.join(
            save_directory,
            (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
        )

        with open(vocab_file, "w", encoding="utf-8") as f:
            json.dump(self._vocab, f, ensure_ascii=False, indent=2)

        return (vocab_file,)


def count_vocab_from_dataset(

    dataset_name: str = "dlouapre/lichess_2025-01_1M",

    split: str = "train",

    column: str = "text",

    max_samples: Optional[int] = 10000,

) -> Dict[str, int]:
    from collections import Counter
    from datasets import load_dataset

    dataset = load_dataset(dataset_name, split=split)

    if max_samples is not None:
        dataset = dataset.select(range(min(max_samples, len(dataset))))

    token_counts = Counter()

    for example in dataset:
        raw_moves = example[column].strip().split()
        moves = [ChessTokenizer._normalize_move(m) for m in raw_moves if m.strip()]
        token_counts.update(moves)

    return dict(token_counts)