ilanou20 commited on
Commit
fd3416e
·
verified ·
1 Parent(s): 14c4d30

Chess Challenge submission by ilanou20

Browse files
Files changed (6) hide show
  1. README.md +15 -0
  2. config.json +24 -0
  3. model.safetensors +3 -0
  4. tokenizer.py +468 -0
  5. tokenizer_config.json +8 -0
  6. vocab.json +115 -0
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - chess
5
+ - llm-course
6
+ - chess-challenge
7
+ license: mit
8
+ ---
9
+
10
+ # chess-ilan-v7
11
+
12
+ Chess model submitted to the LLM Course Chess Challenge.
13
+
14
+ - Submitted by: ilanou20
15
+ - Parameters: 708,992
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ChessForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "model.ChessConfig",
7
+ "AutoModelForCausalLM": "model.ChessForCausalLM"
8
+ },
9
+ "bos_token_id": 1,
10
+ "dropout": 0.0,
11
+ "dtype": "float32",
12
+ "eos_token_id": 2,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "chess_transformer",
15
+ "n_ctx": 256,
16
+ "n_embd": 128,
17
+ "n_head": 4,
18
+ "n_inner": 384,
19
+ "n_layer": 4,
20
+ "pad_token_id": 0,
21
+ "tie_weights": true,
22
+ "transformers_version": "4.57.6",
23
+ "vocab_size": 113
24
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09addb055f16697ed37bcc0d1c14daecc0b7b24a5026cb3bd6884d09604cc6f2
3
+ size 2840368
tokenizer.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Chess Tokenizer for the Chess Challenge.
3
+ We build a vocabulary with:
4
+ - W/B prefix for White/Black
5
+ - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
6
+ - Source and rank and file: e.g e 2
7
+ - Destination and rank and file: e.g e 4
8
+ - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import json
14
+ import os
15
+ from pathlib import Path
16
+ import shutil
17
+ import inspect
18
+ from typing import Dict, List, Optional
19
+
20
+ from transformers import PreTrainedTokenizer
21
+ from datasets import load_dataset
22
+
23
+
24
+ class ChessTokenizer(PreTrainedTokenizer):
25
+
26
+ model_input_names = ["input_ids", "attention_mask"]
27
+ vocab_files_names = {"vocab_file": "vocab.json"}
28
+
29
+ # Special tokens
30
+ PAD_TOKEN = "[PAD]"
31
+ BOS_TOKEN = "[BOS]"
32
+ EOS_TOKEN = "[EOS]"
33
+ UNK_TOKEN = "[UNK]"
34
+ SEP_TOKEN = "[SEP]"
35
+
36
+ def __init__(
37
+ self,
38
+ vocab_file: Optional[str] = None,
39
+ vocab: Optional[Dict[str, int]] = None,
40
+ **kwargs,
41
+ ):
42
+
43
+ self._pad_token = self.PAD_TOKEN
44
+ self._bos_token = self.BOS_TOKEN
45
+ self._eos_token = self.EOS_TOKEN
46
+ self._unk_token = self.UNK_TOKEN
47
+ self._sep_token = self.SEP_TOKEN
48
+
49
+ kwargs.pop("pad_token", None)
50
+ kwargs.pop("bos_token", None)
51
+ kwargs.pop("eos_token", None)
52
+ kwargs.pop("unk_token", None)
53
+ kwargs.pop("sep_token", None)
54
+
55
+ print("Initializing ChessTokenizer")
56
+ print(f" vocab_file: {vocab_file}")
57
+ print(f" vocab provided: {vocab is not None}")
58
+ print(f" vocab: {vocab}")
59
+
60
+ print(os.listdir("."))
61
+
62
+ vocab = {
63
+ "[PAD]": 0,
64
+ "[BOS]": 1,
65
+ "[EOS]": 2,
66
+ "[UNK]": 3,
67
+ "[SEP]": 4,
68
+ "(+)": 5,
69
+ "(+*)": 6,
70
+ "(+*B)": 7,
71
+ "(+*N)": 8,
72
+ "(+*Q)": 9,
73
+ "(+*R)": 10,
74
+ "(+B)": 11,
75
+ "(+N)": 12,
76
+ "(+Q)": 13,
77
+ "(+R)": 14,
78
+ "(B)": 15,
79
+ "(N)": 16,
80
+ "(O)": 17,
81
+ "(O+)": 18,
82
+ "(O+*)": 19,
83
+ "(Q)": 20,
84
+ "(R)": 21,
85
+ "(o)": 22,
86
+ "(o+)": 23,
87
+ "(o+*)": 24,
88
+ "(x)": 25,
89
+ "(x+)": 26,
90
+ "(x+*)": 27,
91
+ "(x+*B)": 28,
92
+ "(x+*Q)": 29,
93
+ "(x+*R)": 30,
94
+ "(x+B)": 31,
95
+ "(x+N)": 32,
96
+ "(x+Q)": 33,
97
+ "(x+R)": 34,
98
+ "(xB)": 35,
99
+ "(xE)": 36,
100
+ "(xE+)": 37,
101
+ "(xE+*)": 38,
102
+ "(xN)": 39,
103
+ "(xQ)": 40,
104
+ "(xR)": 41,
105
+ "B": 42,
106
+ "K": 43,
107
+ "N": 44,
108
+ "P": 45,
109
+ "Q": 46,
110
+ "R": 47,
111
+ "W": 48,
112
+ "a1": 49,
113
+ "a2": 50,
114
+ "a3": 51,
115
+ "a4": 52,
116
+ "a5": 53,
117
+ "a6": 54,
118
+ "a7": 55,
119
+ "a8": 56,
120
+ "b1": 57,
121
+ "b2": 58,
122
+ "b3": 59,
123
+ "b4": 60,
124
+ "b5": 61,
125
+ "b6": 62,
126
+ "b7": 63,
127
+ "b8": 64,
128
+ "c1": 65,
129
+ "c2": 66,
130
+ "c3": 67,
131
+ "c4": 68,
132
+ "c5": 69,
133
+ "c6": 70,
134
+ "c7": 71,
135
+ "c8": 72,
136
+ "d1": 73,
137
+ "d2": 74,
138
+ "d3": 75,
139
+ "d4": 76,
140
+ "d5": 77,
141
+ "d6": 78,
142
+ "d7": 79,
143
+ "d8": 80,
144
+ "e1": 81,
145
+ "e2": 82,
146
+ "e3": 83,
147
+ "e4": 84,
148
+ "e5": 85,
149
+ "e6": 86,
150
+ "e7": 87,
151
+ "e8": 88,
152
+ "f1": 89,
153
+ "f2": 90,
154
+ "f3": 91,
155
+ "f4": 92,
156
+ "f5": 93,
157
+ "f6": 94,
158
+ "f7": 95,
159
+ "f8": 96,
160
+ "g1": 97,
161
+ "g2": 98,
162
+ "g3": 99,
163
+ "g4": 100,
164
+ "g5": 101,
165
+ "g6": 102,
166
+ "g7": 103,
167
+ "g8": 104,
168
+ "h1": 105,
169
+ "h2": 106,
170
+ "h3": 107,
171
+ "h4": 108,
172
+ "h5": 109,
173
+ "h6": 110,
174
+ "h7": 111,
175
+ "h8": 112
176
+ }
177
+
178
+ if vocab is not None:
179
+ self._vocab = vocab
180
+ elif vocab_file is not None and os.path.exists(vocab_file):
181
+ with open(vocab_file, "r", encoding="utf-8") as f:
182
+ self._vocab = json.load(f)
183
+ else:
184
+ print("No vocabulary provided; creating default minimal vocab.")
185
+ self._vocab = self._create_default_vocab()
186
+
187
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
188
+
189
+ super().__init__(
190
+ pad_token=self._pad_token,
191
+ bos_token=self._bos_token,
192
+ eos_token=self._eos_token,
193
+ unk_token=self._unk_token,
194
+ sep_token=self._sep_token,
195
+ **kwargs,
196
+ )
197
+
198
+ def _create_default_vocab(self) -> Dict[str, int]:
199
+ special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN, self.SEP_TOKEN]
200
+ vocab = {token: idx for idx, token in enumerate(special_tokens)}
201
+ return vocab
202
+
203
+
204
+ @classmethod
205
+ def build_vocab_from_dataset(
206
+ cls,
207
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
208
+ split: str = "train",
209
+ column: str = "text",
210
+ min_frequency: Optional[int] = 1,
211
+ max_samples: Optional[int] = None,
212
+ save_path: Optional[str] = None,
213
+ ) -> "ChessTokenizer":
214
+
215
+
216
+
217
+ if save_path is None:
218
+ cwd = os.getcwd()
219
+ save_path = os.path.join(cwd, "chess_tokenizer_vocab.json")
220
+
221
+ if os.path.exists(save_path):
222
+ try:
223
+ with open(save_path, "r", encoding="utf-8") as f:
224
+ print("Loading existing tokenizer vocab from", save_path)
225
+ vocab = json.load(f)
226
+ return cls(vocab=vocab)
227
+ except Exception:
228
+ pass
229
+
230
+ dataset = load_dataset(dataset_name, split=split)
231
+
232
+ samples = dataset[column]
233
+
234
+ tokens = set()
235
+
236
+ for game in samples:
237
+ if not isinstance(game, str):
238
+ continue
239
+ moves = game.strip().split()
240
+ for move in moves:
241
+ if len(move) < 2:
242
+ continue
243
+ color = move[0]
244
+ piece = move[1]
245
+ from_square = move[2:4] if len(move) >= 4 else ''
246
+ to_square = move[4:6] if len(move) >= 6 else ''
247
+ suffix = move[6:] if len(move) > 6 else ''
248
+
249
+ tokens.add(color)
250
+ tokens.add(piece)
251
+ tokens.add(from_square)
252
+ tokens.add(to_square)
253
+ if suffix:
254
+ tokens.add(suffix)
255
+
256
+ tokens = sorted(tokens)
257
+
258
+ special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN, cls.SEP_TOKEN]
259
+
260
+ vocab: Dict[str, int] = {}
261
+ idx = 0
262
+ for st in special_tokens:
263
+ vocab[st] = idx
264
+ idx += 1
265
+
266
+ for t in tokens:
267
+ if t in vocab:
268
+ continue
269
+ vocab[t] = idx
270
+ idx += 1
271
+
272
+ tokenizer = cls(vocab=vocab)
273
+
274
+ try:
275
+ if save_path is None:
276
+ cwd = os.getcwd()
277
+ save_path = os.path.join(cwd, "chess_tokenizer_vocab.json")
278
+
279
+ tmp_path = save_path + ".tmp"
280
+ with open(tmp_path, "w", encoding="utf-8") as f:
281
+ json.dump(vocab, f, ensure_ascii=False, indent=2)
282
+ os.replace(tmp_path, save_path)
283
+ except Exception:
284
+ # Non-fatal: ignore save errors but don't leave temp files behind.
285
+ try:
286
+ if 'tmp_path' in locals() and os.path.exists(tmp_path):
287
+ os.remove(tmp_path)
288
+ except Exception:
289
+ pass
290
+
291
+ return tokenizer
292
+
293
+ @property
294
+ def vocab_size(self) -> int:
295
+ """Return the size of the vocabulary."""
296
+ return len(self._vocab)
297
+
298
+ def get_vocab(self) -> Dict[str, int]:
299
+ """Return the vocabulary as a dictionary."""
300
+ return dict(self._vocab)
301
+
302
+ def _tokenize(self, text: str) -> List[str]:
303
+ """
304
+ Tokenize a string of moves into a list of tokens.
305
+
306
+ Args:
307
+ text: A string of space-separated moves.
308
+
309
+ Returns:
310
+ List of move tokens.
311
+ """
312
+ tokens: List[str] = []
313
+ for move in text.strip().split():
314
+ if len(move) < 2:
315
+ continue
316
+ color, piece, from_square, to_square, suffix = self._decompose_move(move)
317
+ tokens.append(color)
318
+ tokens.append(piece)
319
+ tokens.append(from_square)
320
+ tokens.append(to_square)
321
+ if suffix:
322
+ tokens.append(suffix)
323
+
324
+ tokens.append(self._sep_token)
325
+
326
+ return tokens[:-1] # Remove last SEP token
327
+
328
+ @staticmethod
329
+ def _decompose_move(move: str):
330
+ """Decompose a move string into components: color, piece, from_square, to_square, suffix.
331
+ Returns a 5-tuple of strings (empty strings for missing parts).
332
+ """
333
+ color = move[0]
334
+ piece = move[1] if len(move) >= 2 else ''
335
+ from_square = move[2:4] if len(move) >= 4 else ''
336
+ to_square = move[4:6] if len(move) >= 6 else ''
337
+ suffix = move[6:] if len(move) > 6 else ''
338
+ return color, piece, from_square, to_square, suffix
339
+
340
+ def _convert_token_to_id(self, token: str) -> int:
341
+ """Convert a token to its ID."""
342
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
343
+
344
+ def _convert_id_to_token(self, index: int) -> str:
345
+ """Convert an ID to its token."""
346
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
347
+
348
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
349
+ """Convert a list of tokens back to a string."""
350
+ # Filter out special tokens for cleaner output
351
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
352
+ return " ".join(t for t in tokens if t not in special)
353
+
354
+ def decode(self, token_ids: List[int], skip_special_tokens: bool = True) -> str:
355
+ """Decode a list of token IDs back to a string."""
356
+ tokens = [self._convert_id_to_token(int(tid)) for tid in token_ids]
357
+ if skip_special_tokens:
358
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
359
+ # SEP token should be replace by space
360
+ tokens = [t if t != self.SEP_TOKEN else " " for t in tokens if t not in special]
361
+ return "".join(tokens)
362
+
363
+ def save_vocabulary(
364
+ self,
365
+ save_directory: str,
366
+ filename_prefix: Optional[str] = None,
367
+ ) -> tuple:
368
+ """
369
+ Save the vocabulary to a JSON file.
370
+
371
+ Args:
372
+ save_directory: Directory to save the vocabulary.
373
+ filename_prefix: Optional prefix for the filename.
374
+
375
+ Returns:
376
+ Tuple containing the path to the saved vocabulary file.
377
+ """
378
+ if not os.path.isdir(save_directory):
379
+ os.makedirs(save_directory, exist_ok=True)
380
+
381
+ vocab_file = os.path.join(
382
+ save_directory,
383
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
384
+ )
385
+
386
+ with open(vocab_file, "w", encoding="utf-8") as f:
387
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
388
+
389
+ return (vocab_file,)
390
+
391
+ def save_pretrained(
392
+ self,
393
+ save_directory: str,
394
+ filename_prefix: Optional[str] = None,
395
+ save_tokenizer_code: bool = True,
396
+ ) -> None:
397
+ """Save tokenizer files to a directory in a HF-compatible layout.
398
+ This writes the vocab JSON (via `save_vocabulary`), a small
399
+ `tokenizer_config.json` describing special tokens and the vocab
400
+ filename, and optionally copies the tokenizer module source file
401
+ into the directory so others can import the implementation.
402
+ """
403
+ if not os.path.isdir(save_directory):
404
+ os.makedirs(save_directory, exist_ok=True)
405
+
406
+ # Save the vocabulary file
407
+ vocab_file_tuple = self.save_vocabulary(save_directory, filename_prefix)
408
+ vocab_file = vocab_file_tuple[0]
409
+
410
+ # Write a minimal tokenizer config
411
+ config = {
412
+ "tokenizer_class": self.__class__.__name__,
413
+ "vocab_file": os.path.basename(vocab_file),
414
+ "pad_token": self.PAD_TOKEN,
415
+ "bos_token": self.BOS_TOKEN,
416
+ "eos_token": self.EOS_TOKEN,
417
+ "unk_token": self.UNK_TOKEN,
418
+ }
419
+ config_path = os.path.join(save_directory, "tokenizer_config.json")
420
+ with open(config_path, "w", encoding="utf-8") as f:
421
+ json.dump(config, f, ensure_ascii=False, indent=2)
422
+
423
+ # Optionally copy this module file so the tokenizer class implementation
424
+ # is available alongside the saved vocab/config. This helps when
425
+ # transferring the saved tokenizer to another environment.
426
+ if save_tokenizer_code:
427
+ try:
428
+ src_file = Path(inspect.getsourcefile(self.__class__))
429
+ dst_file = Path(save_directory) / src_file.name
430
+ shutil.copy2(src_file, dst_file)
431
+ except Exception:
432
+ # Non-fatal; we still saved vocab and config
433
+ pass
434
+
435
+
436
+ def count_vocab_from_dataset(
437
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
438
+ split: str = "train",
439
+ column: str = "text",
440
+ max_samples: Optional[int] = 10000,
441
+ ) -> Dict[str, int]:
442
+ """
443
+ Count token frequencies in a dataset (useful for vocabulary analysis).
444
+
445
+ Args:
446
+ dataset_name: Name of the dataset on Hugging Face Hub.
447
+ split: Dataset split to use.
448
+ column: Column containing the game strings.
449
+ max_samples: Maximum number of samples to process.
450
+
451
+ Returns:
452
+ Dictionary mapping tokens to their frequencies.
453
+ """
454
+ from collections import Counter
455
+ from datasets import load_dataset
456
+
457
+ dataset = load_dataset(dataset_name, split=split)
458
+
459
+ if max_samples is not None:
460
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
461
+
462
+ tokenizer = ChessTokenizer()
463
+ token_counts = Counter()
464
+
465
+ for example in dataset:
466
+ token_counts.update(tokenizer._tokenize(example[column]))
467
+
468
+ return dict(token_counts)
tokenizer_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "ChessTokenizer",
3
+ "vocab_file": "vocab.json",
4
+ "pad_token": "[PAD]",
5
+ "bos_token": "[BOS]",
6
+ "eos_token": "[EOS]",
7
+ "unk_token": "[UNK]"
8
+ }
vocab.json ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 0,
3
+ "[BOS]": 1,
4
+ "[EOS]": 2,
5
+ "[UNK]": 3,
6
+ "[SEP]": 4,
7
+ "(+)": 5,
8
+ "(+*)": 6,
9
+ "(+*B)": 7,
10
+ "(+*N)": 8,
11
+ "(+*Q)": 9,
12
+ "(+*R)": 10,
13
+ "(+B)": 11,
14
+ "(+N)": 12,
15
+ "(+Q)": 13,
16
+ "(+R)": 14,
17
+ "(B)": 15,
18
+ "(N)": 16,
19
+ "(O)": 17,
20
+ "(O+)": 18,
21
+ "(O+*)": 19,
22
+ "(Q)": 20,
23
+ "(R)": 21,
24
+ "(o)": 22,
25
+ "(o+)": 23,
26
+ "(o+*)": 24,
27
+ "(x)": 25,
28
+ "(x+)": 26,
29
+ "(x+*)": 27,
30
+ "(x+*B)": 28,
31
+ "(x+*Q)": 29,
32
+ "(x+*R)": 30,
33
+ "(x+B)": 31,
34
+ "(x+N)": 32,
35
+ "(x+Q)": 33,
36
+ "(x+R)": 34,
37
+ "(xB)": 35,
38
+ "(xE)": 36,
39
+ "(xE+)": 37,
40
+ "(xE+*)": 38,
41
+ "(xN)": 39,
42
+ "(xQ)": 40,
43
+ "(xR)": 41,
44
+ "B": 42,
45
+ "K": 43,
46
+ "N": 44,
47
+ "P": 45,
48
+ "Q": 46,
49
+ "R": 47,
50
+ "W": 48,
51
+ "a1": 49,
52
+ "a2": 50,
53
+ "a3": 51,
54
+ "a4": 52,
55
+ "a5": 53,
56
+ "a6": 54,
57
+ "a7": 55,
58
+ "a8": 56,
59
+ "b1": 57,
60
+ "b2": 58,
61
+ "b3": 59,
62
+ "b4": 60,
63
+ "b5": 61,
64
+ "b6": 62,
65
+ "b7": 63,
66
+ "b8": 64,
67
+ "c1": 65,
68
+ "c2": 66,
69
+ "c3": 67,
70
+ "c4": 68,
71
+ "c5": 69,
72
+ "c6": 70,
73
+ "c7": 71,
74
+ "c8": 72,
75
+ "d1": 73,
76
+ "d2": 74,
77
+ "d3": 75,
78
+ "d4": 76,
79
+ "d5": 77,
80
+ "d6": 78,
81
+ "d7": 79,
82
+ "d8": 80,
83
+ "e1": 81,
84
+ "e2": 82,
85
+ "e3": 83,
86
+ "e4": 84,
87
+ "e5": 85,
88
+ "e6": 86,
89
+ "e7": 87,
90
+ "e8": 88,
91
+ "f1": 89,
92
+ "f2": 90,
93
+ "f3": 91,
94
+ "f4": 92,
95
+ "f5": 93,
96
+ "f6": 94,
97
+ "f7": 95,
98
+ "f8": 96,
99
+ "g1": 97,
100
+ "g2": 98,
101
+ "g3": 99,
102
+ "g4": 100,
103
+ "g5": 101,
104
+ "g6": 102,
105
+ "g7": 103,
106
+ "g8": 104,
107
+ "h1": 105,
108
+ "h2": 106,
109
+ "h3": 107,
110
+ "h4": 108,
111
+ "h5": 109,
112
+ "h6": 110,
113
+ "h7": 111,
114
+ "h8": 112
115
+ }