thandre10 commited on
Commit
93b803b
·
verified ·
1 Parent(s): 4aa73d3

Chess Challenge submission by thandre10

Browse files
Files changed (7) hide show
  1. README.md +26 -0
  2. config.json +20 -0
  3. model.safetensors +3 -0
  4. special_tokens_map.json +6 -0
  5. tokenizer.py +412 -0
  6. tokenizer_config.json +50 -0
  7. vocab.json +85 -0
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - chess
5
+ - llm-course
6
+ - chess-challenge
7
+ license: mit
8
+ ---
9
+
10
+ # chess_thandre10_v3
11
+
12
+ Chess model submitted to the LLM Course Chess Challenge.
13
+
14
+ ## Submission Info
15
+
16
+ - **Submitted by**: [thandre10](https://huggingface.co/thandre10)
17
+ - **Parameters**: 738,360
18
+ - **Organization**: LLM-course
19
+
20
+ ## Model Details
21
+
22
+ - **Architecture**: Chess Transformer (GPT-style)
23
+ - **Vocab size**: 83
24
+ - **Embedding dim**: 120
25
+ - **Layers**: 4
26
+ - **Heads**: 4
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ChessForCausalLM"
4
+ ],
5
+ "bos_token_id": 1,
6
+ "dropout": 0.1,
7
+ "dtype": "float32",
8
+ "eos_token_id": 2,
9
+ "layer_norm_epsilon": 1e-05,
10
+ "model_type": "chess_transformer",
11
+ "n_ctx": 256,
12
+ "n_embd": 120,
13
+ "n_head": 4,
14
+ "n_inner": 480,
15
+ "n_layer": 4,
16
+ "pad_token_id": 0,
17
+ "tie_weights": true,
18
+ "transformers_version": "4.57.6",
19
+ "vocab_size": 83
20
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8de0af4b0f13d2a81c8fa50f28bd6718b3d44f685324ece2ef2e70cfa80bd521
3
+ size 2957840
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[BOS]",
3
+ "eos_token": "[EOS]",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "[UNK]"
6
+ }
tokenizer.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Chess Tokenizer for the Chess Challenge.
3
+
4
+ This tokenizer uses sub-structural tokenization: each move is decomposed into
5
+ its components (piece, source square, destination square, suffix) instead of
6
+ treating the whole move as a single token.
7
+
8
+ Example: WPe2e4 -> [P, e2, e4] (color is implicit from move number)
9
+ BNg8f6(x) -> [N, g8, f6, (x)]
10
+
11
+ This approach:
12
+ - Reduces vocabulary from ~1200 to ~80 tokens
13
+ - Enables generalization across similar moves
14
+ - Eliminates [UNK] tokens for rare moves
15
+ - Saves parameters in the embedding layer
16
+
17
+ The dataset format uses:
18
+ - W/B prefix for White/Black (ignored - implicit from position)
19
+ - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
20
+ - Source and destination squares (e.g., e2e4)
21
+ - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
22
+ """
23
+
24
+ from __future__ import annotations
25
+
26
+ import json
27
+ import os
28
+ import re
29
+ from pathlib import Path
30
+ from typing import Dict, List, Optional, Tuple
31
+
32
+ from transformers import PreTrainedTokenizer
33
+
34
+
35
+ # Regex pattern to parse extended UCI notation
36
+ # Matches: (W|B)(Piece)(src_file)(src_rank)(dst_file)(dst_rank)(suffix?)
37
+ MOVE_PATTERN = re.compile(
38
+ r'^([WB])([PNBRQK])([a-h])([1-8])([a-h])([1-8])(\([^)]+\))?$'
39
+ )
40
+
41
+
42
+ class ChessTokenizer(PreTrainedTokenizer):
43
+ """
44
+ A custom tokenizer for chess moves using sub-structural tokenization.
45
+
46
+ Each move is decomposed into components:
47
+ - Piece type (P, N, B, R, Q, K)
48
+ - Source square (e2, d7, etc.)
49
+ - Destination square (e4, f6, etc.)
50
+ - Optional suffix for captures/checks ((x), (+), (+*), (o), (O))
51
+
52
+ The color (W/B) is NOT tokenized as it's implicit from the move order.
53
+
54
+ Example:
55
+ >>> tokenizer = ChessTokenizer.build_vocab()
56
+ >>> tokenizer.encode("WPe2e4 BPe7e5")
57
+ [1, 5, 20, 28, 5, 52, 44, 2] # [BOS, P, e2, e4, P, e7, e5, EOS]
58
+ """
59
+
60
+ model_input_names = ["input_ids", "attention_mask"]
61
+ vocab_files_names = {"vocab_file": "vocab.json"}
62
+
63
+ # Special tokens
64
+ PAD_TOKEN = "[PAD]"
65
+ BOS_TOKEN = "[BOS]"
66
+ EOS_TOKEN = "[EOS]"
67
+ UNK_TOKEN = "[UNK]"
68
+
69
+ def __init__(
70
+ self,
71
+ vocab_file: Optional[str] = None,
72
+ vocab: Optional[Dict[str, int]] = None,
73
+ **kwargs,
74
+ ):
75
+ """
76
+ Initialize the chess tokenizer.
77
+
78
+ Args:
79
+ vocab_file: Path to a JSON file containing the vocabulary mapping.
80
+ vocab: Dictionary mapping tokens to IDs (alternative to vocab_file).
81
+ **kwargs: Additional arguments passed to PreTrainedTokenizer.
82
+ """
83
+ # Initialize special tokens
84
+ self._pad_token = self.PAD_TOKEN
85
+ self._bos_token = self.BOS_TOKEN
86
+ self._eos_token = self.EOS_TOKEN
87
+ self._unk_token = self.UNK_TOKEN
88
+
89
+ # Remove any duplicate special-token entries passed through kwargs
90
+ # to avoid "multiple values for keyword" errors when loading from disk.
91
+ kwargs.pop("pad_token", None)
92
+ kwargs.pop("bos_token", None)
93
+ kwargs.pop("eos_token", None)
94
+ kwargs.pop("unk_token", None)
95
+
96
+ # Load or create vocabulary
97
+ if vocab is not None:
98
+ self._vocab = vocab
99
+ elif vocab_file is not None and os.path.exists(vocab_file):
100
+ with open(vocab_file, "r", encoding="utf-8") as f:
101
+ self._vocab = json.load(f)
102
+ else:
103
+ # Create a minimal vocabulary with just special tokens
104
+ # The full vocabulary should be built from the dataset
105
+ self._vocab = self._create_default_vocab()
106
+
107
+ # Create reverse mapping
108
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
109
+
110
+ # Call parent init AFTER setting up vocab
111
+ super().__init__(
112
+ pad_token=self._pad_token,
113
+ bos_token=self._bos_token,
114
+ eos_token=self._eos_token,
115
+ unk_token=self._unk_token,
116
+ **kwargs,
117
+ )
118
+
119
+ def _create_default_vocab(self) -> Dict[str, int]:
120
+ """
121
+ Create the full sub-structural vocabulary.
122
+
123
+ The vocabulary contains:
124
+ - 4 special tokens: [PAD], [BOS], [EOS], [UNK]
125
+ - 6 piece tokens: P, N, B, R, Q, K
126
+ - 64 square tokens: a1, a2, ..., h8
127
+ - 5 suffix tokens: (x), (+), (+*), (o), (O)
128
+
129
+ Total: 79 tokens (vs ~1200 for move-level tokenization)
130
+ """
131
+ tokens = []
132
+
133
+ # Special tokens first
134
+ special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
135
+ tokens.extend(special_tokens)
136
+
137
+ # Piece tokens
138
+ pieces = ['P', 'N', 'B', 'R', 'Q', 'K']
139
+ tokens.extend(pieces)
140
+
141
+ # Square tokens (a1-h8)
142
+ files = 'abcdefgh'
143
+ ranks = '12345678'
144
+ for f in files:
145
+ for r in ranks:
146
+ tokens.append(f + r)
147
+
148
+ # Suffix tokens for special moves
149
+ suffixes = ['(x)', '(+)', '(+*)', '(o)', '(O)']
150
+ tokens.extend(suffixes)
151
+
152
+ # Promotion tokens (pawn promotion to piece)
153
+ # Format in dataset might be like WPe7e8Q for promotion
154
+ promotion_pieces = ['=Q', '=R', '=B', '=N']
155
+ tokens.extend(promotion_pieces)
156
+
157
+ vocab = {token: idx for idx, token in enumerate(tokens)}
158
+ return vocab
159
+
160
+ @classmethod
161
+ def build_vocab(cls) -> "ChessTokenizer":
162
+ """
163
+ Build a tokenizer with the pre-defined sub-structural vocabulary.
164
+
165
+ This is the recommended way to create a tokenizer for the chess challenge.
166
+ The vocabulary is deterministic and covers all possible moves.
167
+
168
+ Returns:
169
+ A ChessTokenizer with the full sub-structural vocabulary (~83 tokens).
170
+ """
171
+ return cls()
172
+
173
+ @classmethod
174
+ def build_vocab_from_iterator(
175
+ cls,
176
+ iterator,
177
+ min_frequency: int = 1,
178
+ ) -> "ChessTokenizer":
179
+ """
180
+ Build a tokenizer vocabulary from an iterator of game strings.
181
+
182
+ Note: With sub-structural tokenization, this method is mainly useful
183
+ for analyzing token frequencies. The default vocabulary already covers
184
+ all possible moves.
185
+
186
+ Args:
187
+ iterator: An iterator yielding game strings (space-separated moves).
188
+ min_frequency: Minimum frequency for a token to be included.
189
+
190
+ Returns:
191
+ A ChessTokenizer with the built vocabulary.
192
+ """
193
+ # With sub-structural tokenization, we use the default vocab
194
+ # which already contains all possible sub-tokens
195
+ return cls()
196
+
197
+ @classmethod
198
+ def build_vocab_from_dataset(
199
+ cls,
200
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
201
+ split: str = "train",
202
+ column: str = "text",
203
+ min_frequency: int = 500,
204
+ max_samples: Optional[int] = 100000,
205
+ ) -> "ChessTokenizer":
206
+ """
207
+ Build a tokenizer vocabulary from a Hugging Face dataset.
208
+
209
+ Note: With sub-structural tokenization, the vocabulary is pre-defined
210
+ and doesn't need to be built from data. This method is kept for
211
+ compatibility but simply returns a tokenizer with the default vocab.
212
+
213
+ Args:
214
+ dataset_name: Name of the dataset on Hugging Face Hub.
215
+ split: Dataset split to use.
216
+ column: Column containing the game strings.
217
+ min_frequency: Minimum frequency for a token to be included.
218
+ max_samples: Maximum number of samples to process.
219
+
220
+ Returns:
221
+ A ChessTokenizer with the full sub-structural vocabulary.
222
+ """
223
+ # With sub-structural tokenization, we don't need to scan the dataset
224
+ return cls()
225
+
226
+ @property
227
+ def vocab_size(self) -> int:
228
+ """Return the size of the vocabulary."""
229
+ return len(self._vocab)
230
+
231
+ def get_vocab(self) -> Dict[str, int]:
232
+ """Return the vocabulary as a dictionary."""
233
+ return dict(self._vocab)
234
+
235
+ def _parse_move(self, move: str) -> List[str]:
236
+ """
237
+ Parse a single move into its sub-components.
238
+
239
+ Args:
240
+ move: A move in extended UCI notation (e.g., WPe2e4, BNg8f6(x))
241
+
242
+ Returns:
243
+ List of tokens: [piece, src_square, dst_square, suffix?]
244
+ Color (W/B) is ignored as it's implicit from move order.
245
+ """
246
+ # Try standard move pattern
247
+ match = MOVE_PATTERN.match(move)
248
+ if match:
249
+ color, piece, src_file, src_rank, dst_file, dst_rank, suffix = match.groups()
250
+ tokens = [piece, src_file + src_rank, dst_file + dst_rank]
251
+ if suffix:
252
+ tokens.append(suffix)
253
+ return tokens
254
+
255
+ # Try promotion pattern: WPe7e8Q or WPe7e8Q(+)
256
+ promo_pattern = re.match(
257
+ r'^([WB])P([a-h])([1-8])([a-h])([1-8])([QRBN])(\([^)]+\))?$',
258
+ move
259
+ )
260
+ if promo_pattern:
261
+ color, src_file, src_rank, dst_file, dst_rank, promo_piece, suffix = promo_pattern.groups()
262
+ tokens = ['P', src_file + src_rank, dst_file + dst_rank, '=' + promo_piece]
263
+ if suffix:
264
+ tokens.append(suffix)
265
+ return tokens
266
+
267
+ # Fallback: return as single token (will likely be UNK)
268
+ return [move]
269
+
270
+ def _tokenize(self, text: str) -> List[str]:
271
+ """
272
+ Tokenize a string of moves into sub-structural tokens.
273
+
274
+ Each move is decomposed into:
275
+ - Piece type (P, N, B, R, Q, K)
276
+ - Source square (e2, d7, etc.)
277
+ - Destination square (e4, f6, etc.)
278
+ - Optional suffix ((x), (+), etc.)
279
+
280
+ Args:
281
+ text: A string of space-separated moves.
282
+
283
+ Returns:
284
+ List of sub-tokens.
285
+
286
+ Example:
287
+ "WPe2e4 BPe7e5" -> ['P', 'e2', 'e4', 'P', 'e7', 'e5']
288
+ """
289
+ tokens = []
290
+ moves = text.strip().split()
291
+ for move in moves:
292
+ tokens.extend(self._parse_move(move))
293
+ return tokens
294
+
295
+ def _convert_token_to_id(self, token: str) -> int:
296
+ """Convert a token to its ID."""
297
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
298
+
299
+ def _convert_id_to_token(self, index: int) -> str:
300
+ """Convert an ID to its token."""
301
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
302
+
303
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
304
+ """
305
+ Convert a list of sub-tokens back to a string of moves.
306
+
307
+ Reconstructs moves from their components. Each move consists of:
308
+ - Piece token (P, N, B, R, Q, K)
309
+ - Source square (e2, d7, etc.)
310
+ - Destination square (e4, f6, etc.)
311
+ - Optional suffix ((x), (+), etc.) or promotion (=Q, =R, etc.)
312
+
313
+ Args:
314
+ tokens: List of sub-tokens.
315
+
316
+ Returns:
317
+ Space-separated string of reconstructed moves.
318
+ """
319
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
320
+ pieces = {'P', 'N', 'B', 'R', 'Q', 'K'}
321
+ suffixes = {'(x)', '(+)', '(+*)', '(o)', '(O)'}
322
+ promotions = {'=Q', '=R', '=B', '=N'}
323
+
324
+ moves = []
325
+ current_move = []
326
+
327
+ for token in tokens:
328
+ if token in special:
329
+ continue
330
+
331
+ if token in pieces:
332
+ # Start of a new move - save previous if exists
333
+ if current_move:
334
+ moves.append(''.join(current_move))
335
+ current_move = [token]
336
+ elif token in suffixes or token in promotions:
337
+ # End of move with suffix/promotion
338
+ current_move.append(token)
339
+ else:
340
+ # Square token
341
+ current_move.append(token)
342
+
343
+ # Don't forget the last move
344
+ if current_move:
345
+ moves.append(''.join(current_move))
346
+
347
+ return " ".join(moves)
348
+
349
+ def save_vocabulary(
350
+ self,
351
+ save_directory: str,
352
+ filename_prefix: Optional[str] = None,
353
+ ) -> tuple:
354
+ """
355
+ Save the vocabulary to a JSON file.
356
+
357
+ Args:
358
+ save_directory: Directory to save the vocabulary.
359
+ filename_prefix: Optional prefix for the filename.
360
+
361
+ Returns:
362
+ Tuple containing the path to the saved vocabulary file.
363
+ """
364
+ if not os.path.isdir(save_directory):
365
+ os.makedirs(save_directory, exist_ok=True)
366
+
367
+ vocab_file = os.path.join(
368
+ save_directory,
369
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
370
+ )
371
+
372
+ with open(vocab_file, "w", encoding="utf-8") as f:
373
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
374
+
375
+ return (vocab_file,)
376
+
377
+
378
+ def count_vocab_from_dataset(
379
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
380
+ split: str = "train",
381
+ column: str = "text",
382
+ max_samples: Optional[int] = 10000,
383
+ ) -> Dict[str, int]:
384
+ """
385
+ Count sub-token frequencies in a dataset (useful for vocabulary analysis).
386
+
387
+ Args:
388
+ dataset_name: Name of the dataset on Hugging Face Hub.
389
+ split: Dataset split to use.
390
+ column: Column containing the game strings.
391
+ max_samples: Maximum number of samples to process.
392
+
393
+ Returns:
394
+ Dictionary mapping sub-tokens to their frequencies.
395
+ """
396
+ from collections import Counter
397
+ from datasets import load_dataset
398
+
399
+ dataset = load_dataset(dataset_name, split=split)
400
+
401
+ if max_samples is not None:
402
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
403
+
404
+ # Use a tokenizer instance to parse moves into sub-tokens
405
+ tokenizer = ChessTokenizer()
406
+ token_counts = Counter()
407
+
408
+ for example in dataset:
409
+ sub_tokens = tokenizer._tokenize(example[column])
410
+ token_counts.update(sub_tokens)
411
+
412
+ return dict(token_counts)
tokenizer_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[BOS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[EOS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "auto_map": {
37
+ "AutoTokenizer": [
38
+ "tokenizer.ChessTokenizer",
39
+ null
40
+ ]
41
+ },
42
+ "bos_token": "[BOS]",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "[EOS]",
45
+ "extra_special_tokens": {},
46
+ "model_max_length": 1000000000000000019884624838656,
47
+ "pad_token": "[PAD]",
48
+ "tokenizer_class": "ChessTokenizer",
49
+ "unk_token": "[UNK]"
50
+ }
vocab.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 0,
3
+ "[BOS]": 1,
4
+ "[EOS]": 2,
5
+ "[UNK]": 3,
6
+ "P": 4,
7
+ "N": 5,
8
+ "B": 6,
9
+ "R": 7,
10
+ "Q": 8,
11
+ "K": 9,
12
+ "a1": 10,
13
+ "a2": 11,
14
+ "a3": 12,
15
+ "a4": 13,
16
+ "a5": 14,
17
+ "a6": 15,
18
+ "a7": 16,
19
+ "a8": 17,
20
+ "b1": 18,
21
+ "b2": 19,
22
+ "b3": 20,
23
+ "b4": 21,
24
+ "b5": 22,
25
+ "b6": 23,
26
+ "b7": 24,
27
+ "b8": 25,
28
+ "c1": 26,
29
+ "c2": 27,
30
+ "c3": 28,
31
+ "c4": 29,
32
+ "c5": 30,
33
+ "c6": 31,
34
+ "c7": 32,
35
+ "c8": 33,
36
+ "d1": 34,
37
+ "d2": 35,
38
+ "d3": 36,
39
+ "d4": 37,
40
+ "d5": 38,
41
+ "d6": 39,
42
+ "d7": 40,
43
+ "d8": 41,
44
+ "e1": 42,
45
+ "e2": 43,
46
+ "e3": 44,
47
+ "e4": 45,
48
+ "e5": 46,
49
+ "e6": 47,
50
+ "e7": 48,
51
+ "e8": 49,
52
+ "f1": 50,
53
+ "f2": 51,
54
+ "f3": 52,
55
+ "f4": 53,
56
+ "f5": 54,
57
+ "f6": 55,
58
+ "f7": 56,
59
+ "f8": 57,
60
+ "g1": 58,
61
+ "g2": 59,
62
+ "g3": 60,
63
+ "g4": 61,
64
+ "g5": 62,
65
+ "g6": 63,
66
+ "g7": 64,
67
+ "g8": 65,
68
+ "h1": 66,
69
+ "h2": 67,
70
+ "h3": 68,
71
+ "h4": 69,
72
+ "h5": 70,
73
+ "h6": 71,
74
+ "h7": 72,
75
+ "h8": 73,
76
+ "(x)": 74,
77
+ "(+)": 75,
78
+ "(+*)": 76,
79
+ "(o)": 77,
80
+ "(O)": 78,
81
+ "=Q": 79,
82
+ "=R": 80,
83
+ "=B": 81,
84
+ "=N": 82
85
+ }