Yentlcol commited on
Commit
50068c7
·
verified ·
1 Parent(s): d9b9265

Chess Challenge submission by Yentlcol

Browse files
Files changed (2) hide show
  1. component_tokenizer.py +338 -0
  2. tokenizer_config.json +2 -2
component_tokenizer.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Component-based Chess Tokenizer - Optimized for Parameter Efficiency.
3
+
4
+ This tokenizer decomposes chess moves into reusable components:
5
+ - Piece type (P, N, B, R, Q, K)
6
+ - Source square (a1-h8)
7
+ - Destination square (a1-h8)
8
+ - Modifiers (capture, check, castling, etc.)
9
+
10
+ Example:
11
+ "WPe2e4" → ["P", "e2", "e4"]
12
+ "BNg8f6(x)" → ["N", "g8", "f6", "(x)"]
13
+
14
+ This reduces vocabulary from ~1682 to ~80 tokens, saving 205K parameters.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import json
20
+ import os
21
+ from typing import Dict, List, Optional
22
+
23
+ from transformers import PreTrainedTokenizer
24
+
25
+
26
+ class ComponentChessTokenizer(PreTrainedTokenizer):
27
+ """
28
+ Component-based tokenizer for chess moves.
29
+
30
+ Decomposes moves into: [piece, from_square, to_square, modifiers...]
31
+
32
+ Key advantages:
33
+ - 95% smaller vocabulary (1682 → 80 tokens)
34
+ - Saves 205K embedding parameters
35
+ - Better generalization to rare move combinations
36
+ - Compositional understanding of chess structure
37
+ """
38
+
39
+ model_input_names = ["input_ids", "attention_mask"]
40
+ vocab_files_names = {"vocab_file": "vocab.json"}
41
+
42
+ # Special tokens
43
+ PAD_TOKEN = "[PAD]"
44
+ BOS_TOKEN = "[BOS]"
45
+ EOS_TOKEN = "[EOS]"
46
+ UNK_TOKEN = "[UNK]"
47
+ SEP_TOKEN = "[SEP]" # Separates components within a move
48
+
49
+ # Chess piece types (6 tokens)
50
+ PIECES = ["P", "N", "B", "R", "Q", "K"]
51
+
52
+ # All squares on the board (64 tokens)
53
+ FILES = "abcdefgh"
54
+ RANKS = "12345678"
55
+
56
+ # Move modifiers (10 tokens)
57
+ MODIFIERS = [
58
+ "(x)", # capture
59
+ "(+)", # check
60
+ "(+*)", # checkmate
61
+ "(o)", # kingside castling
62
+ "(O)", # queenside castling
63
+ "=Q", # promotion to queen
64
+ "=R", # promotion to rook
65
+ "=B", # promotion to bishop
66
+ "=N", # promotion to knight
67
+ "(e.p.)", # en passant
68
+ ]
69
+
70
+ def __init__(
71
+ self,
72
+ vocab_file: Optional[str] = None,
73
+ vocab: Optional[Dict[str, int]] = None,
74
+ **kwargs,
75
+ ):
76
+ """Initialize the component chess tokenizer."""
77
+ # Initialize special tokens
78
+ self._pad_token = self.PAD_TOKEN
79
+ self._bos_token = self.BOS_TOKEN
80
+ self._eos_token = self.EOS_TOKEN
81
+ self._unk_token = self.UNK_TOKEN
82
+
83
+ # Remove duplicate special-token entries
84
+ kwargs.pop("pad_token", None)
85
+ kwargs.pop("bos_token", None)
86
+ kwargs.pop("eos_token", None)
87
+ kwargs.pop("unk_token", None)
88
+
89
+ # Load or create vocabulary
90
+ if vocab is not None:
91
+ self._vocab = vocab
92
+ elif vocab_file is not None and os.path.exists(vocab_file):
93
+ with open(vocab_file, "r", encoding="utf-8") as f:
94
+ self._vocab = json.load(f)
95
+ else:
96
+ self._vocab = self._create_component_vocab()
97
+
98
+ # Create reverse mapping
99
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
100
+
101
+ # Call parent init
102
+ super().__init__(
103
+ pad_token=self._pad_token,
104
+ bos_token=self._bos_token,
105
+ eos_token=self._eos_token,
106
+ unk_token=self._unk_token,
107
+ **kwargs,
108
+ )
109
+
110
+ def _create_component_vocab(self) -> Dict[str, int]:
111
+ """
112
+ Create the component vocabulary.
113
+
114
+ Vocabulary structure:
115
+ - Special tokens (5): [PAD], [BOS], [EOS], [UNK], [SEP]
116
+ - Pieces (6): P, N, B, R, Q, K
117
+ - Squares (64): a1, a2, ..., h8
118
+ - Modifiers (10): (x), (+), (+*), (o), (O), =Q, =R, =B, =N, (e.p.)
119
+
120
+ Total: 85 tokens (vs 1682 in original tokenizer)
121
+ """
122
+ tokens = [
123
+ self.PAD_TOKEN,
124
+ self.BOS_TOKEN,
125
+ self.EOS_TOKEN,
126
+ self.UNK_TOKEN,
127
+ self.SEP_TOKEN,
128
+ ]
129
+
130
+ # Add pieces
131
+ tokens.extend(self.PIECES)
132
+
133
+ # Add all squares
134
+ squares = [f + r for f in self.FILES for r in self.RANKS]
135
+ tokens.extend(squares)
136
+
137
+ # Add modifiers
138
+ tokens.extend(self.MODIFIERS)
139
+
140
+ # Create vocabulary
141
+ vocab = {token: idx for idx, token in enumerate(tokens)}
142
+
143
+ return vocab
144
+
145
+ @classmethod
146
+ def build_vocab(cls) -> "ComponentChessTokenizer":
147
+ """
148
+ Build tokenizer with component vocabulary.
149
+
150
+ No dataset needed - vocabulary is deterministic based on chess rules.
151
+ """
152
+ return cls()
153
+
154
+ @property
155
+ def vocab_size(self) -> int:
156
+ """Return the size of the vocabulary."""
157
+ return len(self._vocab)
158
+
159
+ def get_vocab(self) -> Dict[str, int]:
160
+ """Return the vocabulary as a dictionary."""
161
+ return dict(self._vocab)
162
+
163
+ def _decompose_move(self, move: str) -> List[str]:
164
+ """
165
+ Decompose a move string into components.
166
+
167
+ Examples:
168
+ "WPe2e4" → ["P", "e2", "e4"]
169
+ "BNg8f6(x)" → ["N", "g8", "f6", "(x)"]
170
+ "WKe1g1(o)" → ["K", "e1", "g1", "(o)"]
171
+ "BPe7e8=Q(+)" �� ["P", "e7", "e8", "=Q", "(+)"]
172
+
173
+ Args:
174
+ move: Extended UCI move string (e.g., "WPe2e4")
175
+
176
+ Returns:
177
+ List of component tokens
178
+ """
179
+ if not move or move in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]:
180
+ return [move]
181
+
182
+ components = []
183
+
184
+ # Remove color prefix (W/B)
185
+ if move.startswith(('W', 'B')):
186
+ move = move[1:]
187
+
188
+ if not move:
189
+ return [self.UNK_TOKEN]
190
+
191
+ # Extract piece type
192
+ piece = move[0]
193
+ if piece in self.PIECES:
194
+ components.append(piece)
195
+ move = move[1:]
196
+ else:
197
+ # Invalid piece
198
+ return [self.UNK_TOKEN]
199
+
200
+ # Extract squares (from and to)
201
+ # Format: <piece><from_square><to_square>[modifiers]
202
+ # E.g., "Pe2e4", "Ng1f3(x)", "Ke1g1(o)"
203
+
204
+ if len(move) < 4:
205
+ # Not enough characters for two squares
206
+ return [self.UNK_TOKEN]
207
+
208
+ # Generate valid squares for checking
209
+ valid_squares = [f + r for f in self.FILES for r in self.RANKS]
210
+
211
+ # Extract from_square (2 chars)
212
+ from_square = move[0:2]
213
+ if from_square in valid_squares:
214
+ components.append(from_square)
215
+ else:
216
+ return [self.UNK_TOKEN]
217
+
218
+ # Extract to_square (2 chars)
219
+ to_square = move[2:4]
220
+ if to_square in valid_squares:
221
+ components.append(to_square)
222
+ else:
223
+ return [self.UNK_TOKEN]
224
+
225
+ # Extract modifiers (remaining characters)
226
+ remaining = move[4:]
227
+ if remaining:
228
+ # Parse modifiers: (x), (+), (+*), (o), (O), =Q, =R, =B, =N, (e.p.)
229
+ i = 0
230
+ while i < len(remaining):
231
+ # Check for known modifiers
232
+ found = False
233
+ for modifier in self.MODIFIERS:
234
+ if remaining[i:].startswith(modifier):
235
+ components.append(modifier)
236
+ i += len(modifier)
237
+ found = True
238
+ break
239
+
240
+ if not found:
241
+ # Unknown character, skip it
242
+ i += 1
243
+
244
+ return components
245
+
246
+ def _tokenize(self, text: str) -> List[str]:
247
+ """
248
+ Tokenize a string of moves into component tokens.
249
+
250
+ Args:
251
+ text: Space-separated moves (e.g., "WPe2e4 BPe7e5 WNg1f3")
252
+
253
+ Returns:
254
+ List of component tokens
255
+ """
256
+ moves = text.strip().split()
257
+ tokens = []
258
+
259
+ for move in moves:
260
+ # Skip special tokens
261
+ if move in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]:
262
+ tokens.append(move)
263
+ else:
264
+ # Decompose move into components
265
+ components = self._decompose_move(move)
266
+ tokens.extend(components)
267
+
268
+ return tokens
269
+
270
+ def _convert_token_to_id(self, token: str) -> int:
271
+ """Convert a token to its ID."""
272
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
273
+
274
+ def _convert_id_to_token(self, index: int) -> str:
275
+ """Convert an ID to its token."""
276
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
277
+
278
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
279
+ """
280
+ Convert component tokens back to move strings.
281
+
282
+ This reconstructs moves from components.
283
+ Note: We lose the W/B color prefix, but it's redundant
284
+ (can be inferred from move position).
285
+ """
286
+ # Filter out special tokens
287
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN, self.SEP_TOKEN}
288
+ tokens = [t for t in tokens if t not in special]
289
+
290
+ # Generate valid squares for checking
291
+ valid_squares = [f + r for f in self.FILES for r in self.RANKS]
292
+
293
+ # Reconstruct moves from components
294
+ moves = []
295
+ i = 0
296
+ while i < len(tokens):
297
+ # Expect: piece, from_square, to_square, [modifiers...]
298
+ if i + 2 >= len(tokens):
299
+ break
300
+
301
+ piece = tokens[i]
302
+ from_sq = tokens[i + 1]
303
+ to_sq = tokens[i + 2]
304
+
305
+ if piece in self.PIECES and from_sq in valid_squares and to_sq in valid_squares:
306
+ move = f"{piece}{from_sq}{to_sq}"
307
+ i += 3
308
+
309
+ # Collect modifiers
310
+ while i < len(tokens) and tokens[i] in self.MODIFIERS:
311
+ move += tokens[i]
312
+ i += 1
313
+
314
+ moves.append(move)
315
+ else:
316
+ # Skip invalid tokens
317
+ i += 1
318
+
319
+ return " ".join(moves)
320
+
321
+ def save_vocabulary(
322
+ self,
323
+ save_directory: str,
324
+ filename_prefix: Optional[str] = None,
325
+ ) -> tuple:
326
+ """Save the vocabulary to a JSON file."""
327
+ if not os.path.isdir(save_directory):
328
+ os.makedirs(save_directory, exist_ok=True)
329
+
330
+ vocab_file = os.path.join(
331
+ save_directory,
332
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
333
+ )
334
+
335
+ with open(vocab_file, "w", encoding="utf-8") as f:
336
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
337
+
338
+ return (vocab_file,)
tokenizer_config.json CHANGED
@@ -35,7 +35,7 @@
35
  },
36
  "auto_map": {
37
  "AutoTokenizer": [
38
- "tokenizer.ChessTokenizer",
39
  null
40
  ]
41
  },
@@ -45,6 +45,6 @@
45
  "extra_special_tokens": {},
46
  "model_max_length": 1000000000000000019884624838656,
47
  "pad_token": "[PAD]",
48
- "tokenizer_class": "ChessTokenizer",
49
  "unk_token": "[UNK]"
50
  }
 
35
  },
36
  "auto_map": {
37
  "AutoTokenizer": [
38
+ "component_tokenizer.ComponentChessTokenizer",
39
  null
40
  ]
41
  },
 
45
  "extra_special_tokens": {},
46
  "model_max_length": 1000000000000000019884624838656,
47
  "pad_token": "[PAD]",
48
+ "tokenizer_class": "ComponentChessTokenizer",
49
  "unk_token": "[UNK]"
50
  }