Br0wks commited on
Commit
b83b66b
·
verified ·
1 Parent(s): fb9bb30

Chess Challenge submission by Br0wks

Browse files
Files changed (9) hide show
  1. README.md +11 -0
  2. config.json +20 -0
  3. model.py +437 -0
  4. model.safetensors +3 -0
  5. special_tokens_map.json +6 -0
  6. tokenizer.py +130 -0
  7. tokenizer_config.json +59 -0
  8. utils.py +305 -0
  9. vocab.json +1202 -0
README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - chess
5
+ - llm-course
6
+ - chess-challenge
7
+ license: mit
8
+ ---
9
+ # chess_test
10
+ Submitted by: Br0wks
11
+ Parameters: 924,000
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ChessForCausalLM"
4
+ ],
5
+ "bos_token_id": 1,
6
+ "dropout": 0.1,
7
+ "dtype": "float32",
8
+ "eos_token_id": 2,
9
+ "layer_norm_epsilon": 1e-05,
10
+ "model_type": "chess_transformer",
11
+ "n_ctx": 256,
12
+ "n_embd": 112,
13
+ "n_head": 8,
14
+ "n_inner": 336,
15
+ "n_layer": 6,
16
+ "pad_token_id": 0,
17
+ "tie_weights": true,
18
+ "transformers_version": "4.57.3",
19
+ "vocab_size": 1200
20
+ }
model.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chess Transformer Model for the Chess Challenge.
3
+
4
+ This module provides a simple GPT-style transformer architecture
5
+ designed to fit within the 1M parameter constraint.
6
+
7
+ Key components:
8
+ - ChessConfig: Configuration class for model hyperparameters
9
+ - ChessForCausalLM: The main model class for next-move prediction
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import math
15
+ from dataclasses import dataclass
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from transformers import PretrainedConfig, PreTrainedModel
22
+ from transformers.modeling_outputs import CausalLMOutputWithPast
23
+
24
+
25
+ class ChessConfig(PretrainedConfig):
26
+ """
27
+ Configuration class for the Chess Transformer model.
28
+
29
+ This configuration is designed for a ~1M parameter model.
30
+ Students can adjust these values to explore different architectures.
31
+
32
+ Parameter budget breakdown (with default values):
33
+ - Embeddings (vocab): 1200 x 128 = 153,600
34
+ - Position Embeddings: 256 x 128 = 32,768
35
+ - Transformer Layers: 6 x ~120,000 = ~720,000
36
+ - LM Head (with weight tying): 0 (shared with embeddings)
37
+ - Total: ~906,000 parameters
38
+
39
+ Attributes:
40
+ vocab_size: Size of the vocabulary (number of unique moves).
41
+ n_embd: Embedding dimension (d_model).
42
+ n_layer: Number of transformer layers.
43
+ n_head: Number of attention heads.
44
+ n_ctx: Maximum sequence length (context window).
45
+ n_inner: Feed-forward inner dimension (default: 3 * n_embd).
46
+ dropout: Dropout probability.
47
+ layer_norm_epsilon: Epsilon for layer normalization.
48
+ tie_weights: Whether to tie embedding and output weights.
49
+ """
50
+
51
+ model_type = "chess_transformer"
52
+
53
+ def __init__(
54
+ self,
55
+ vocab_size: int = 1200,
56
+ n_embd: int = 128,
57
+ n_layer: int = 6,
58
+ n_head: int = 4,
59
+ n_ctx: int = 256,
60
+ n_inner: Optional[int] = None,
61
+ dropout: float = 0.1,
62
+ layer_norm_epsilon: float = 1e-5,
63
+ tie_weights: bool = True,
64
+ pad_token_id: int = 0,
65
+ bos_token_id: int = 1,
66
+ eos_token_id: int = 2,
67
+ **kwargs,
68
+ ):
69
+ super().__init__(
70
+ pad_token_id=pad_token_id,
71
+ bos_token_id=bos_token_id,
72
+ eos_token_id=eos_token_id,
73
+ **kwargs,
74
+ )
75
+
76
+ self.vocab_size = vocab_size
77
+ self.n_embd = n_embd
78
+ self.n_layer = n_layer
79
+ self.n_head = n_head
80
+ self.n_ctx = n_ctx
81
+ self.n_inner = n_inner if n_inner is not None else 3 * n_embd # Reduced from 4x to 3x
82
+ self.dropout = dropout
83
+ self.layer_norm_epsilon = layer_norm_epsilon
84
+ self.tie_weights = tie_weights
85
+ # Inform HF base class about tying behavior
86
+ self.tie_word_embeddings = bool(tie_weights)
87
+
88
+
89
+ class MultiHeadAttention(nn.Module):
90
+ """
91
+ Multi-head self-attention module.
92
+
93
+ This is a standard scaled dot-product attention implementation
94
+ with causal masking for autoregressive generation.
95
+ """
96
+
97
+ def __init__(self, config: ChessConfig):
98
+ super().__init__()
99
+
100
+ assert config.n_embd % config.n_head == 0, \
101
+ f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
102
+
103
+ self.n_head = config.n_head
104
+ self.n_embd = config.n_embd
105
+ self.head_dim = config.n_embd // config.n_head
106
+
107
+ # Combined QKV projection for efficiency
108
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
109
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
110
+
111
+ self.dropout = nn.Dropout(config.dropout)
112
+
113
+ # Causal mask (will be created on first forward pass)
114
+ self.register_buffer(
115
+ "bias",
116
+ torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
117
+ 1, 1, config.n_ctx, config.n_ctx
118
+ ),
119
+ persistent=False,
120
+ )
121
+
122
+ def forward(
123
+ self,
124
+ x: torch.Tensor,
125
+ attention_mask: Optional[torch.Tensor] = None,
126
+ ) -> torch.Tensor:
127
+ batch_size, seq_len, _ = x.size()
128
+
129
+ # Compute Q, K, V
130
+ qkv = self.c_attn(x)
131
+ q, k, v = qkv.split(self.n_embd, dim=2)
132
+
133
+ # Reshape for multi-head attention
134
+ q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
135
+ k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
136
+ v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
137
+
138
+ # Scaled dot-product attention
139
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
140
+
141
+ # Apply causal mask
142
+ causal_mask = self.bias[:, :, :seq_len, :seq_len]
143
+ attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
144
+
145
+ # Apply attention mask (for padding)
146
+ if attention_mask is not None:
147
+ # attention_mask shape: (batch_size, seq_len) -> (batch_size, 1, 1, seq_len)
148
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
149
+ attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
150
+
151
+ attn_weights = F.softmax(attn_weights, dim=-1)
152
+ attn_weights = self.dropout(attn_weights)
153
+
154
+ # Apply attention to values
155
+ attn_output = torch.matmul(attn_weights, v)
156
+
157
+ # Reshape back
158
+ attn_output = attn_output.transpose(1, 2).contiguous().view(
159
+ batch_size, seq_len, self.n_embd
160
+ )
161
+
162
+ # Output projection
163
+ attn_output = self.c_proj(attn_output)
164
+
165
+ return attn_output
166
+
167
+
168
+ class FeedForward(nn.Module):
169
+ """
170
+ Feed-forward network (MLP) module.
171
+
172
+ Standard two-layer MLP with GELU activation.
173
+ """
174
+
175
+ def __init__(self, config: ChessConfig):
176
+ super().__init__()
177
+
178
+ self.c_fc = nn.Linear(config.n_embd, config.n_inner)
179
+ self.c_proj = nn.Linear(config.n_inner, config.n_embd)
180
+ self.dropout = nn.Dropout(config.dropout)
181
+
182
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
183
+ x = self.c_fc(x)
184
+ x = F.gelu(x)
185
+ x = self.c_proj(x)
186
+ x = self.dropout(x)
187
+ return x
188
+
189
+
190
+ class TransformerBlock(nn.Module):
191
+ """
192
+ A single transformer block with attention and feed-forward layers.
193
+
194
+ Uses pre-normalization (LayerNorm before attention/FFN) for better
195
+ training stability.
196
+ """
197
+
198
+ def __init__(self, config: ChessConfig):
199
+ super().__init__()
200
+
201
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
202
+ self.attn = MultiHeadAttention(config)
203
+ self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
204
+ self.mlp = FeedForward(config)
205
+
206
+ def forward(
207
+ self,
208
+ x: torch.Tensor,
209
+ attention_mask: Optional[torch.Tensor] = None,
210
+ ) -> torch.Tensor:
211
+ # Pre-norm attention
212
+ x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
213
+ # Pre-norm FFN
214
+ x = x + self.mlp(self.ln_2(x))
215
+ return x
216
+
217
+
218
+ class ChessForCausalLM(PreTrainedModel):
219
+ """
220
+ Chess Transformer for Causal Language Modeling (next-move prediction).
221
+
222
+ This model is designed to predict the next chess move given a sequence
223
+ of previous moves. It uses a GPT-style architecture with:
224
+ - Token embeddings for chess moves
225
+ - Learned positional embeddings
226
+ - Stacked transformer blocks
227
+ - Linear head for next-token prediction
228
+
229
+ The model supports weight tying between the embedding layer and the
230
+ output projection to save parameters.
231
+
232
+ Example:
233
+ >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
234
+ >>> model = ChessForCausalLM(config)
235
+ >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
236
+ >>> outputs = model(**inputs)
237
+ >>> next_move_logits = outputs.logits[:, -1, :]
238
+ """
239
+
240
+ config_class = ChessConfig
241
+ base_model_prefix = "transformer"
242
+ supports_gradient_checkpointing = True
243
+ # Suppress missing-key warning for tied lm_head when loading
244
+ keys_to_ignore_on_load_missing = ["lm_head.weight"]
245
+
246
+ def __init__(self, config: ChessConfig):
247
+ super().__init__(config)
248
+
249
+ # Token and position embeddings
250
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
251
+ self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
252
+
253
+ self.drop = nn.Dropout(config.dropout)
254
+
255
+ # Transformer blocks
256
+ self.h = nn.ModuleList([
257
+ TransformerBlock(config) for _ in range(config.n_layer)
258
+ ])
259
+
260
+ # Final layer norm
261
+ self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
262
+
263
+ # Output head
264
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
265
+
266
+ # Declare tied weights for proper serialization
267
+ if config.tie_weights:
268
+ self._tied_weights_keys = ["lm_head.weight"]
269
+
270
+ # Initialize weights
271
+ self.post_init()
272
+
273
+ # Tie weights if configured
274
+ if config.tie_weights:
275
+ self.tie_weights()
276
+
277
+ def get_input_embeddings(self) -> nn.Module:
278
+ return self.wte
279
+
280
+ def set_input_embeddings(self, new_embeddings: nn.Module):
281
+ self.wte = new_embeddings
282
+ if getattr(self.config, "tie_weights", False):
283
+ self.tie_weights()
284
+
285
+ def get_output_embeddings(self) -> nn.Module:
286
+ return self.lm_head
287
+
288
+ def set_output_embeddings(self, new_embeddings: nn.Module):
289
+ self.lm_head = new_embeddings
290
+
291
+ def tie_weights(self):
292
+ # Use HF helper to tie or clone depending on config
293
+ if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
294
+ self._tie_or_clone_weights(self.lm_head, self.wte)
295
+
296
+ def _init_weights(self, module: nn.Module):
297
+ """Initialize weights following GPT-2 style."""
298
+ if isinstance(module, nn.Linear):
299
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
300
+ if module.bias is not None:
301
+ torch.nn.init.zeros_(module.bias)
302
+ elif isinstance(module, nn.Embedding):
303
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
304
+ elif isinstance(module, nn.LayerNorm):
305
+ torch.nn.init.ones_(module.weight)
306
+ torch.nn.init.zeros_(module.bias)
307
+
308
+ def forward(
309
+ self,
310
+ input_ids: torch.LongTensor,
311
+ attention_mask: Optional[torch.Tensor] = None,
312
+ position_ids: Optional[torch.LongTensor] = None,
313
+ labels: Optional[torch.LongTensor] = None,
314
+ return_dict: Optional[bool] = None,
315
+ **kwargs,
316
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
317
+ """
318
+ Forward pass of the model.
319
+
320
+ Args:
321
+ input_ids: Token IDs of shape (batch_size, seq_len).
322
+ attention_mask: Attention mask of shape (batch_size, seq_len).
323
+ position_ids: Position IDs of shape (batch_size, seq_len).
324
+ labels: Labels for language modeling loss.
325
+ return_dict: Whether to return a ModelOutput object.
326
+
327
+ Returns:
328
+ CausalLMOutputWithPast containing loss (if labels provided) and logits.
329
+ """
330
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
331
+
332
+ batch_size, seq_len = input_ids.size()
333
+ device = input_ids.device
334
+
335
+ # Create position IDs if not provided
336
+ if position_ids is None:
337
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
338
+
339
+ # Get embeddings
340
+ token_embeds = self.wte(input_ids)
341
+ position_embeds = self.wpe(position_ids)
342
+ hidden_states = self.drop(token_embeds + position_embeds)
343
+
344
+ # Pass through transformer blocks
345
+ for block in self.h:
346
+ hidden_states = block(hidden_states, attention_mask=attention_mask)
347
+
348
+ # Final layer norm
349
+ hidden_states = self.ln_f(hidden_states)
350
+
351
+ # Get logits
352
+ logits = self.lm_head(hidden_states)
353
+
354
+ # Compute loss if labels are provided
355
+ loss = None
356
+ if labels is not None:
357
+ # Shift logits and labels for next-token prediction
358
+ shift_logits = logits[..., :-1, :].contiguous()
359
+ shift_labels = labels[..., 1:].contiguous()
360
+
361
+ # Flatten for cross-entropy
362
+ loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
363
+ loss = loss_fct(
364
+ shift_logits.view(-1, shift_logits.size(-1)),
365
+ shift_labels.view(-1),
366
+ )
367
+
368
+ if not return_dict:
369
+ output = (logits,)
370
+ return ((loss,) + output) if loss is not None else output
371
+
372
+ return CausalLMOutputWithPast(
373
+ loss=loss,
374
+ logits=logits,
375
+ past_key_values=None,
376
+ hidden_states=None,
377
+ attentions=None,
378
+ )
379
+
380
+ @torch.no_grad()
381
+ def generate_move(
382
+ self,
383
+ input_ids: torch.LongTensor,
384
+ temperature: float = 1.0,
385
+ top_k: Optional[int] = None,
386
+ top_p: Optional[float] = None,
387
+ ) -> int:
388
+ """
389
+ Generate the next move given a sequence of moves.
390
+
391
+ Args:
392
+ input_ids: Token IDs of shape (1, seq_len).
393
+ temperature: Sampling temperature (1.0 = no change).
394
+ top_k: If set, only sample from top k tokens.
395
+ top_p: If set, use nucleus sampling with this threshold.
396
+
397
+ Returns:
398
+ The token ID of the predicted next move.
399
+ """
400
+ self.eval()
401
+
402
+ # Get logits for the last position
403
+ outputs = self(input_ids)
404
+ logits = outputs.logits[:, -1, :] / temperature
405
+
406
+ # Apply top-k filtering
407
+ if top_k is not None:
408
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
409
+ logits[indices_to_remove] = float("-inf")
410
+
411
+ # Apply top-p (nucleus) filtering
412
+ if top_p is not None:
413
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
414
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
415
+
416
+ # Remove tokens with cumulative probability above the threshold
417
+ sorted_indices_to_remove = cumulative_probs > top_p
418
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
419
+ sorted_indices_to_remove[..., 0] = 0
420
+
421
+ indices_to_remove = sorted_indices_to_remove.scatter(
422
+ dim=-1, index=sorted_indices, src=sorted_indices_to_remove
423
+ )
424
+ logits[indices_to_remove] = float("-inf")
425
+
426
+ # Sample from the distribution
427
+ probs = F.softmax(logits, dim=-1)
428
+ next_token = torch.multinomial(probs, num_samples=1)
429
+
430
+ return next_token.item()
431
+
432
+
433
+ # Register the model with Auto classes for easy loading
434
+ from transformers import AutoConfig, AutoModelForCausalLM
435
+
436
+ AutoConfig.register("chess_transformer", ChessConfig)
437
+ AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9de7e6548178446130b8f76b2cbd82b98c64822ee046afefb208b0e897e8da3
3
+ size 3702448
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[BOS]",
3
+ "eos_token": "[EOS]",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "[UNK]"
6
+ }
tokenizer.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Chess Tokenizer for the Chess Challenge.
3
+
4
+ This tokenizer treats each move as a single token using the extended UCI notation
5
+ from the Lichess dataset (e.g., WPe2e4, BNg8f6).
6
+
7
+ The dataset format uses:
8
+ - W/B prefix for White/Black
9
+ - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
10
+ - Source and destination squares (e.g., e2e4)
11
+ - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict, List, Optional
20
+
21
+ from transformers import PreTrainedTokenizer
22
+ """
23
+ Custom Chess Tokenizer - Normalized Version
24
+ """
25
+ import re
26
+
27
+ # Regex pour extraire case départ, arrivée et promotion
28
+ MOVE_RE = re.compile(r"([a-h][1-8])([a-h][1-8])")
29
+ PROMO_RE = re.compile(r"=([NBRQ])")
30
+
31
+ def normalize_move(tok: str) -> str:
32
+ """Transforme 'WPe2e4(x)' en 'WPe2e4' pour réduire le vocabulaire."""
33
+ # 1. Garder les infos de base
34
+ m = MOVE_RE.search(tok)
35
+ if not m:
36
+ return tok # Fallback (sera probablement UNK)
37
+
38
+ fr, to = m.group(1), m.group(2)
39
+
40
+ # 2. Gérer la promotion
41
+ promo = ""
42
+ pm = PROMO_RE.search(tok)
43
+ if pm:
44
+ promo = "=" + pm.group(1)
45
+
46
+ # 3. Reconstruire le token standardisé
47
+ # On garde le préfixe WP/BN (chars 0 et 1) pour garder l'info couleur/pièce
48
+ # mais on supprime les suffixes (x), (+), etc.
49
+ prefix = tok[:2] if len(tok) >= 2 else "WP"
50
+ return f"{prefix}{fr}{to}{promo}"
51
+
52
+ class ChessTokenizer(PreTrainedTokenizer):
53
+ model_input_names = ["input_ids", "attention_mask"]
54
+
55
+ PAD_TOKEN = "[PAD]"
56
+ BOS_TOKEN = "[BOS]"
57
+ EOS_TOKEN = "[EOS]"
58
+ UNK_TOKEN = "[UNK]"
59
+
60
+ def __init__(self, vocab_file=None, vocab=None, **kwargs):
61
+ self._pad_token = self.PAD_TOKEN
62
+ self._bos_token = self.BOS_TOKEN
63
+ self._eos_token = self.EOS_TOKEN
64
+ self._unk_token = self.UNK_TOKEN
65
+
66
+ # Nettoyage kwargs
67
+ for t in ["pad_token", "bos_token", "eos_token", "unk_token"]:
68
+ kwargs.pop(t, None)
69
+
70
+ if vocab:
71
+ self._vocab = vocab
72
+ elif vocab_file:
73
+ with open(vocab_file, "r", encoding="utf-8") as f:
74
+ self._vocab = json.load(f)
75
+ else:
76
+ self._vocab = {t: i for i, t in enumerate([self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN])}
77
+
78
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
79
+ super().__init__(pad_token=self.PAD_TOKEN, bos_token=self.BOS_TOKEN, eos_token=self.EOS_TOKEN, unk_token=self.UNK_TOKEN, **kwargs)
80
+
81
+ @property
82
+ def vocab_size(self):
83
+ return len(self._vocab)
84
+
85
+ def get_vocab(self):
86
+ return dict(self._vocab)
87
+
88
+ def _tokenize(self, text):
89
+ # C'est ICI que la magie opère : on normalise à la volée
90
+ return [normalize_move(t) for t in text.strip().split()]
91
+
92
+ def _convert_token_to_id(self, token):
93
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN))
94
+
95
+ def _convert_id_to_token(self, index):
96
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
97
+
98
+ def convert_tokens_to_string(self, tokens):
99
+ return " ".join(t for t in tokens if t not in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN])
100
+
101
+ def save_vocabulary(self, save_directory, filename_prefix=None):
102
+ if not os.path.exists(save_directory):
103
+ os.makedirs(save_directory)
104
+ path = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json")
105
+ with open(path, "w") as f:
106
+ json.dump(self._vocab, f, indent=2)
107
+ return (path,)
108
+
109
+ @classmethod
110
+ def build_vocab_from_dataset(cls, dataset_name, min_frequency=2, max_vocab_size=1200, **kwargs):
111
+ """Construit un vocabulaire compact et dense."""
112
+ from datasets import load_dataset
113
+ from collections import Counter
114
+
115
+ # On charge en streaming pour aller vite
116
+ ds = load_dataset(dataset_name, split="train", streaming=True)
117
+ ds = ds.take(50000) # 50k parties suffisent pour voir tous les coups possibles
118
+
119
+ counter = Counter()
120
+ for ex in ds:
121
+ # On normalise avant de compter !
122
+ moves = [normalize_move(t) for t in ex["text"].split()]
123
+ counter.update(moves)
124
+
125
+ # On garde les tokens spéciaux + les N plus fréquents
126
+ special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
127
+ most_common = counter.most_common(max_vocab_size - len(special))
128
+
129
+ vocab = {t: i for i, t in enumerate(special + [t for t, c in most_common])}
130
+ return cls(vocab=vocab)
tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[BOS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[EOS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "[BOS]",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "[EOS]",
39
+ "extra_special_tokens": {},
40
+ "model_max_length": 256,
41
+ "pad_token": "[PAD]",
42
+ "tokenizer_class": "ChessTokenizer",
43
+ "unk_token": "[UNK]",
44
+ "vocab_file": "vocab.json",
45
+ "vocab_files_names": {
46
+ "vocab_file": "vocab.json"
47
+ },
48
+ "vocab_size": 1200,
49
+ "auto_map": {
50
+ "AutoTokenizer": [
51
+ "tokenizer.ChessTokenizer",
52
+ null
53
+ ],
54
+ "AutoModelForCausalLM": [
55
+ "model.ChessForCausalLM",
56
+ null
57
+ ]
58
+ }
59
+ }
utils.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions for the Chess Challenge.
3
+
4
+ This module provides helper functions for:
5
+ - Parameter counting and budget analysis
6
+ - Model registration with Hugging Face
7
+ - Move validation with python-chess
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from typing import Dict, Optional, TYPE_CHECKING
13
+
14
+ import torch.nn as nn
15
+
16
+ if TYPE_CHECKING:
17
+ from src.model import ChessConfig
18
+
19
+
20
+ def count_parameters(model: nn.Module, trainable_only: bool = True) -> int:
21
+ """
22
+ Count the number of parameters in a model.
23
+
24
+ Args:
25
+ model: The PyTorch model.
26
+ trainable_only: If True, only count trainable parameters.
27
+
28
+ Returns:
29
+ Total number of parameters.
30
+ """
31
+ if trainable_only:
32
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
33
+ return sum(p.numel() for p in model.parameters())
34
+
35
+
36
+ def count_parameters_by_component(model: nn.Module) -> Dict[str, int]:
37
+ """
38
+ Count parameters broken down by model component.
39
+
40
+ Args:
41
+ model: The PyTorch model.
42
+
43
+ Returns:
44
+ Dictionary mapping component names to parameter counts.
45
+ """
46
+ counts = {}
47
+ for name, module in model.named_modules():
48
+ if len(list(module.children())) == 0: # Leaf module
49
+ param_count = sum(p.numel() for p in module.parameters(recurse=False))
50
+ if param_count > 0:
51
+ counts[name] = param_count
52
+ return counts
53
+
54
+
55
+ def estimate_parameters(config: "ChessConfig") -> Dict[str, int]:
56
+ """
57
+ Estimate the parameter count for a given configuration.
58
+
59
+ This is useful for planning your architecture before building the model.
60
+
61
+ Args:
62
+ config: Model configuration.
63
+
64
+ Returns:
65
+ Dictionary with estimated parameter counts by component.
66
+ """
67
+ V = config.vocab_size
68
+ d = config.n_embd
69
+ L = config.n_layer
70
+ n_ctx = config.n_ctx
71
+ n_inner = config.n_inner
72
+
73
+ estimates = {
74
+ "token_embeddings": V * d,
75
+ "position_embeddings": n_ctx * d,
76
+ "attention_qkv_per_layer": 3 * d * d,
77
+ "attention_proj_per_layer": d * d,
78
+ "ffn_per_layer": 2 * d * n_inner,
79
+ "layernorm_per_layer": 4 * d, # 2 LayerNorms, each with weight and bias
80
+ "final_layernorm": 2 * d,
81
+ }
82
+
83
+ # Calculate totals
84
+ per_layer = (
85
+ estimates["attention_qkv_per_layer"] +
86
+ estimates["attention_proj_per_layer"] +
87
+ estimates["ffn_per_layer"] +
88
+ estimates["layernorm_per_layer"]
89
+ )
90
+
91
+ estimates["total_transformer_layers"] = L * per_layer
92
+
93
+ # LM head (tied with embeddings by default)
94
+ if config.tie_weights:
95
+ estimates["lm_head"] = 0
96
+ estimates["lm_head_note"] = "Tied with token embeddings"
97
+ else:
98
+ estimates["lm_head"] = V * d
99
+
100
+ # Grand total
101
+ estimates["total"] = (
102
+ estimates["token_embeddings"] +
103
+ estimates["position_embeddings"] +
104
+ estimates["total_transformer_layers"] +
105
+ estimates["final_layernorm"] +
106
+ estimates["lm_head"]
107
+ )
108
+
109
+ return estimates
110
+
111
+
112
+ def print_parameter_budget(config: "ChessConfig", limit: int = 1_000_000) -> None:
113
+ """
114
+ Print a formatted parameter budget analysis.
115
+
116
+ Args:
117
+ config: Model configuration.
118
+ limit: Parameter limit to compare against.
119
+ """
120
+ estimates = estimate_parameters(config)
121
+
122
+ print("=" * 60)
123
+ print("PARAMETER BUDGET ANALYSIS")
124
+ print("=" * 60)
125
+ print(f"\nConfiguration:")
126
+ print(f" vocab_size (V) = {config.vocab_size}")
127
+ print(f" n_embd (d) = {config.n_embd}")
128
+ print(f" n_layer (L) = {config.n_layer}")
129
+ print(f" n_head = {config.n_head}")
130
+ print(f" n_ctx = {config.n_ctx}")
131
+ print(f" n_inner = {config.n_inner}")
132
+ print(f" tie_weights = {config.tie_weights}")
133
+
134
+ print(f"\nParameter Breakdown:")
135
+ print(f" Token Embeddings: {estimates['token_embeddings']:>10,}")
136
+ print(f" Position Embeddings: {estimates['position_embeddings']:>10,}")
137
+ print(f" Transformer Layers: {estimates['total_transformer_layers']:>10,}")
138
+ print(f" Final LayerNorm: {estimates['final_layernorm']:>10,}")
139
+
140
+ if config.tie_weights:
141
+ print(f" LM Head: {'(tied)':>10}")
142
+ else:
143
+ print(f" LM Head: {estimates['lm_head']:>10,}")
144
+
145
+ print(f" " + "-" * 30)
146
+ print(f" TOTAL: {estimates['total']:>10,}")
147
+
148
+ print(f"\nBudget Status:")
149
+ print(f" Limit: {limit:>10,}")
150
+ print(f" Used: {estimates['total']:>10,}")
151
+ print(f" Remaining:{limit - estimates['total']:>10,}")
152
+
153
+ if estimates['total'] <= limit:
154
+ print(f"\n Within budget! ({estimates['total'] / limit * 100:.1f}% used)")
155
+ else:
156
+ print(f"\n OVER BUDGET by {estimates['total'] - limit:,} parameters!")
157
+
158
+ print("=" * 60)
159
+
160
+
161
+ def validate_move_with_chess(move: str, board_fen: Optional[str] = None) -> bool:
162
+ """
163
+ Validate a move using python-chess.
164
+
165
+ This function converts the dataset's extended UCI format to standard UCI
166
+ and validates it against the current board state.
167
+
168
+ Args:
169
+ move: Move in extended UCI format (e.g., "WPe2e4", "BNg8f6(x)").
170
+ board_fen: FEN string of the current board state (optional).
171
+
172
+ Returns:
173
+ True if the move is legal, False otherwise.
174
+ """
175
+ try:
176
+ import chess
177
+ except ImportError:
178
+ raise ImportError("python-chess is required for move validation. "
179
+ "Install it with: pip install python-chess")
180
+
181
+ # Parse the extended UCI format
182
+ # Format: [W|B][Piece][from_sq][to_sq][suffix]
183
+ # Example: WPe2e4, BNg8f6(x), WKe1g1(o)
184
+
185
+ if len(move) < 6:
186
+ return False
187
+
188
+ # Extract components
189
+ color = move[0] # W or B
190
+ piece = move[1] # P, N, B, R, Q, K
191
+ from_sq = move[2:4] # e.g., "e2"
192
+ to_sq = move[4:6] # e.g., "e4"
193
+
194
+ # Check for promotion
195
+ promotion = None
196
+ if "=" in move:
197
+ promo_idx = move.index("=")
198
+ promotion = move[promo_idx + 1].lower()
199
+
200
+ # Create board
201
+ board = chess.Board(board_fen) if board_fen else chess.Board()
202
+
203
+ # Build UCI move string
204
+ uci_move = from_sq + to_sq
205
+ if promotion:
206
+ uci_move += promotion
207
+
208
+ try:
209
+ move_obj = chess.Move.from_uci(uci_move)
210
+ return move_obj in board.legal_moves
211
+ except (ValueError, chess.InvalidMoveError):
212
+ return False
213
+
214
+
215
+ def convert_extended_uci_to_uci(move: str) -> str:
216
+ """
217
+ Convert extended UCI format to standard UCI format.
218
+
219
+ Args:
220
+ move: Move in extended UCI format (e.g., "WPe2e4").
221
+
222
+ Returns:
223
+ Move in standard UCI format (e.g., "e2e4").
224
+ """
225
+ if len(move) < 6:
226
+ return move
227
+
228
+ # Extract squares
229
+ from_sq = move[2:4]
230
+ to_sq = move[4:6]
231
+
232
+ # Check for promotion
233
+ promotion = ""
234
+ if "=" in move:
235
+ promo_idx = move.index("=")
236
+ promotion = move[promo_idx + 1].lower()
237
+
238
+ return from_sq + to_sq + promotion
239
+
240
+
241
+ def convert_uci_to_extended(
242
+ uci_move: str,
243
+ board_fen: str,
244
+ ) -> str:
245
+ """
246
+ Convert standard UCI format to extended UCI format.
247
+
248
+ Args:
249
+ uci_move: Move in standard UCI format (e.g., "e2e4").
250
+ board_fen: FEN string of the current board state.
251
+
252
+ Returns:
253
+ Move in extended UCI format (e.g., "WPe2e4").
254
+ """
255
+ try:
256
+ import chess
257
+ except ImportError:
258
+ raise ImportError("python-chess is required for move conversion.")
259
+
260
+ board = chess.Board(board_fen)
261
+ move = chess.Move.from_uci(uci_move)
262
+
263
+ # Get color
264
+ color = "W" if board.turn == chess.WHITE else "B"
265
+
266
+ # Get piece
267
+ piece = board.piece_at(move.from_square)
268
+ piece_letter = piece.symbol().upper() if piece else "P"
269
+
270
+ # Build extended UCI
271
+ from_sq = chess.square_name(move.from_square)
272
+ to_sq = chess.square_name(move.to_square)
273
+
274
+ result = f"{color}{piece_letter}{from_sq}{to_sq}"
275
+
276
+ # Add promotion
277
+ if move.promotion:
278
+ result += f"={chess.piece_symbol(move.promotion).upper()}"
279
+
280
+ # Add suffix for captures
281
+ if board.is_capture(move):
282
+ result += "(x)"
283
+
284
+ # Add suffix for check/checkmate
285
+ board.push(move)
286
+ if board.is_checkmate():
287
+ if "(x)" in result:
288
+ result = result.replace("(x)", "(x+*)")
289
+ else:
290
+ result += "(+*)"
291
+ elif board.is_check():
292
+ if "(x)" in result:
293
+ result = result.replace("(x)", "(x+)")
294
+ else:
295
+ result += "(+)"
296
+ board.pop()
297
+
298
+ # Handle castling notation
299
+ if board.is_castling(move):
300
+ if move.to_square in [chess.G1, chess.G8]: # Kingside
301
+ result = result.replace("(x)", "").replace("(+)", "") + "(o)"
302
+ else: # Queenside
303
+ result = result.replace("(x)", "").replace("(+)", "") + "(O)"
304
+
305
+ return result
vocab.json ADDED
@@ -0,0 +1,1202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 0,
3
+ "[BOS]": 1,
4
+ "[EOS]": 2,
5
+ "[UNK]": 3,
6
+ "WNg1f3": 4,
7
+ "BNg8f6": 5,
8
+ "WPe2e4": 6,
9
+ "WPd2d4": 7,
10
+ "WNb1c3": 8,
11
+ "WKe1g1": 9,
12
+ "BNb8c6": 10,
13
+ "BKe8g8": 11,
14
+ "BPd7d5": 12,
15
+ "BPe7e6": 13,
16
+ "BPe7e5": 14,
17
+ "BPd7d6": 15,
18
+ "WPh2h3": 16,
19
+ "WPc2c3": 17,
20
+ "BPg7g6": 18,
21
+ "BPc7c6": 19,
22
+ "BPh7h6": 20,
23
+ "BPc7c5": 21,
24
+ "BPa7a6": 22,
25
+ "WPc2c4": 23,
26
+ "WNf3e5": 24,
27
+ "BBf8e7": 25,
28
+ "WPa2a3": 26,
29
+ "WPg2g3": 27,
30
+ "WPe2e3": 28,
31
+ "WPf2f4": 29,
32
+ "BNb8d7": 30,
33
+ "WBf1c4": 31,
34
+ "WPd2d3": 32,
35
+ "WRf1e1": 33,
36
+ "WNb1d2": 34,
37
+ "BPb7b6": 35,
38
+ "WPe4e5": 36,
39
+ "WBf1d3": 37,
40
+ "BPf7f6": 38,
41
+ "BNf6e4": 39,
42
+ "WPb2b3": 40,
43
+ "BPb7b5": 41,
44
+ "WBc1g5": 42,
45
+ "WBc1e3": 43,
46
+ "BBc8g4": 44,
47
+ "WPf2f3": 45,
48
+ "BRf8e8": 46,
49
+ "WPe4d5": 47,
50
+ "BBf8g7": 48,
51
+ "WBf1e2": 49,
52
+ "BPf7f5": 50,
53
+ "WNf3d4": 51,
54
+ "WPb2b4": 52,
55
+ "WBc1f4": 53,
56
+ "BBc8b7": 54,
57
+ "BPc5d4": 55,
58
+ "BNc6d4": 56,
59
+ "WNc3d5": 57,
60
+ "WPg2g4": 58,
61
+ "BNf6d5": 59,
62
+ "WPa2a4": 60,
63
+ "BBf8d6": 61,
64
+ "BBc8d7": 62,
65
+ "BPa7a5": 63,
66
+ "WPh2h4": 64,
67
+ "WPd4d5": 65,
68
+ "BBc8e6": 66,
69
+ "WPd4e5": 67,
70
+ "BRa8c8": 68,
71
+ "WRa1d1": 69,
72
+ "BBf8c5": 70,
73
+ "WQd1e2": 71,
74
+ "BNc6e5": 72,
75
+ "BNg8e7": 73,
76
+ "BPh7h5": 74,
77
+ "WNc3e4": 75,
78
+ "BRa8d8": 76,
79
+ "BPe6e5": 77,
80
+ "BQd8e7": 78,
81
+ "BBc8f5": 79,
82
+ "BPe5d4": 80,
83
+ "WNf3g5": 81,
84
+ "WRa1c1": 82,
85
+ "WQd1f3": 83,
86
+ "BPd5e4": 84,
87
+ "WKe1c1": 85,
88
+ "WKg1h1": 86,
89
+ "WBf1b5": 87,
90
+ "BPg7g5": 88,
91
+ "WQd1d2": 89,
92
+ "BPc6c5": 90,
93
+ "WBc1d2": 91,
94
+ "BBf8b4": 92,
95
+ "WBf1g2": 93,
96
+ "BKg8h8": 94,
97
+ "BKg8g7": 95,
98
+ "WBc1b2": 96,
99
+ "WPc4d5": 97,
100
+ "BPe5e4": 98,
101
+ "BRa8b8": 99,
102
+ "WPh4h5": 100,
103
+ "BKe8c8": 101,
104
+ "BPe6d5": 102,
105
+ "BPd6d5": 103,
106
+ "BPd5d4": 104,
107
+ "BQd8c7": 105,
108
+ "WPc3c4": 106,
109
+ "WRa1b1": 107,
110
+ "BQd8f6": 108,
111
+ "BPb5b4": 109,
112
+ "BQd8d7": 110,
113
+ "BNf6g4": 111,
114
+ "BPd6e5": 112,
115
+ "WKg1g2": 113,
116
+ "WPf4f5": 114,
117
+ "WPg4g5": 115,
118
+ "BPc6d5": 116,
119
+ "BPb7c6": 117,
120
+ "WRf1d1": 118,
121
+ "BRf8d8": 119,
122
+ "WRa1e1": 120,
123
+ "WPe3e4": 121,
124
+ "WNg1e2": 122,
125
+ "WPc3d4": 123,
126
+ "WPb2c3": 124,
127
+ "BPa5a4": 125,
128
+ "WKg1h2": 126,
129
+ "BBg4f3": 127,
130
+ "WPh3h4": 128,
131
+ "BNd7e5": 129,
132
+ "BQd8b6": 130,
133
+ "BNf6d7": 131,
134
+ "WBg5f6": 132,
135
+ "BPc5c4": 133,
136
+ "WPd3d4": 134,
137
+ "BRa8e8": 135,
138
+ "BKg8h7": 136,
139
+ "BBe7f6": 137,
140
+ "BPg6g5": 138,
141
+ "WPc4c5": 139,
142
+ "WQd1c2": 140,
143
+ "WNd2f3": 141,
144
+ "WPa4a5": 142,
145
+ "WPb4b5": 143,
146
+ "BNd7f6": 144,
147
+ "BPh5h4": 145,
148
+ "BPa6a5": 146,
149
+ "BQd8d5": 147,
150
+ "WNc3e2": 148,
151
+ "WKg1f2": 149,
152
+ "BPg5g4": 150,
153
+ "WPg3g4": 151,
154
+ "WPf3f4": 152,
155
+ "BPf6f5": 153,
156
+ "BPh6h5": 154,
157
+ "BKg8f7": 155,
158
+ "WKg1f1": 156,
159
+ "WNc3b5": 157,
160
+ "BKg8f8": 158,
161
+ "BPd5c4": 159,
162
+ "BPf5f4": 160,
163
+ "BNf6h5": 161,
164
+ "WNd2e4": 162,
165
+ "WPd4c5": 163,
166
+ "WBe2f3": 164,
167
+ "WBb5c6": 165,
168
+ "WPf4e5": 166,
169
+ "BBb4c3": 167,
170
+ "BNc6e7": 168,
171
+ "WQd1d3": 169,
172
+ "BPb6b5": 170,
173
+ "WBc4b3": 171,
174
+ "WQd1b3": 172,
175
+ "BNc6b4": 173,
176
+ "WNf3d2": 174,
177
+ "WNf3h4": 175,
178
+ "WQd1d4": 176,
179
+ "WPa3a4": 177,
180
+ "BNe7f5": 178,
181
+ "BNd7c5": 179,
182
+ "WPb3b4": 180,
183
+ "WPg2f3": 181,
184
+ "BPg7f6": 182,
185
+ "BNc6a5": 183,
186
+ "BPf7e6": 184,
187
+ "BNe7g6": 185,
188
+ "WKc1b1": 186,
189
+ "BNd7b6": 187,
190
+ "WNe2g3": 188,
191
+ "BRf8f7": 189,
192
+ "BPe5f4": 190,
193
+ "BQd8d6": 191,
194
+ "BBg4h5": 192,
195
+ "WQd1h5": 193,
196
+ "WBg5h4": 194,
197
+ "WNd2c4": 195,
198
+ "BQd8a5": 196,
199
+ "WPe3d4": 197,
200
+ "BBd7c6": 198,
201
+ "BRh8g8": 199,
202
+ "WPe5f6": 200,
203
+ "WBd3e4": 201,
204
+ "WRf1f2": 202,
205
+ "BKc8b8": 203,
206
+ "BPf6e5": 204,
207
+ "WPe4f5": 205,
208
+ "BPh6g5": 206,
209
+ "BKe8d7": 207,
210
+ "BKe8f7": 208,
211
+ "WPe5e6": 209,
212
+ "WRd1d2": 210,
213
+ "WRd1e1": 211,
214
+ "WBe3d4": 212,
215
+ "WRa1f1": 213,
216
+ "WNd4c6": 214,
217
+ "BRf8c8": 215,
218
+ "WNc3a4": 216,
219
+ "BPf5e4": 217,
220
+ "BRa8f8": 218,
221
+ "WPf2e3": 219,
222
+ "BKe8d8": 220,
223
+ "WNd2b3": 221,
224
+ "BBe7d6": 222,
225
+ "WRh1g1": 223,
226
+ "WNe2f4": 224,
227
+ "BKe8e7": 225,
228
+ "WPd5d6": 226,
229
+ "WPh3g4": 227,
230
+ "BRd8d7": 228,
231
+ "WPd3e4": 229,
232
+ "WRe1e2": 230,
233
+ "BPa6b5": 231,
234
+ "WPh5h6": 232,
235
+ "BRd8e8": 233,
236
+ "WRe1d1": 234,
237
+ "BBd6e5": 235,
238
+ "BPa4a3": 236,
239
+ "WBf4g3": 237,
240
+ "WRf1c1": 238,
241
+ "BKf8e7": 239,
242
+ "BRe8e7": 240,
243
+ "WBc4d5": 241,
244
+ "WNe4f6": 242,
245
+ "WRh1e1": 243,
246
+ "WBf4e5": 244,
247
+ "WPa3b4": 245,
248
+ "WKf1e2": 246,
249
+ "WRd1d8": 247,
250
+ "WBd2c3": 248,
251
+ "BPh7g6": 249,
252
+ "BNe7d5": 250,
253
+ "BRd8d1": 251,
254
+ "BBc8a6": 252,
255
+ "BPd4d3": 253,
256
+ "BRh8e8": 254,
257
+ "BRh8f8": 255,
258
+ "WPa5a6": 256,
259
+ "WRe1e3": 257,
260
+ "BQd8h4": 258,
261
+ "BKe8f8": 259,
262
+ "WPf3e4": 260,
263
+ "BKg7f6": 261,
264
+ "WPa4b5": 262,
265
+ "WNe5c6": 263,
266
+ "WBe2d3": 264,
267
+ "WPe5d6": 265,
268
+ "WRf1f3": 266,
269
+ "WQd1g4": 267,
270
+ "BPe4e3": 268,
271
+ "BBc5b6": 269,
272
+ "WKe1d2": 270,
273
+ "BBe7g5": 271,
274
+ "WKe1d1": 272,
275
+ "WKe1e2": 273,
276
+ "WNe2d4": 274,
277
+ "WQd1a4": 275,
278
+ "BRe8d8": 276,
279
+ "BPa5b4": 277,
280
+ "BQd8g5": 278,
281
+ "BPe6f5": 279,
282
+ "WKg2f3": 280,
283
+ "BPf7g6": 281,
284
+ "BNb8a6": 282,
285
+ "WRd1d7": 283,
286
+ "WBc4d3": 284,
287
+ "BPb5c4": 285,
288
+ "WKf2e3": 286,
289
+ "BPh5g4": 287,
290
+ "BNg8h6": 288,
291
+ "BNe5f3": 289,
292
+ "BBh5g6": 290,
293
+ "BRe8e1": 291,
294
+ "WKh1g1": 292,
295
+ "BBg7e5": 293,
296
+ "BPh4h3": 294,
297
+ "WPd5c6": 295,
298
+ "BRf8f6": 296,
299
+ "WRe1e8": 297,
300
+ "BNd5c3": 298,
301
+ "BBe6d5": 299,
302
+ "BPd4c3": 300,
303
+ "WPh4g5": 301,
304
+ "WBg5e7": 302,
305
+ "BKf7e6": 303,
306
+ "WRh1f1": 304,
307
+ "BNe7c6": 305,
308
+ "WNe5d7": 306,
309
+ "WPf5f6": 307,
310
+ "BPg6f5": 308,
311
+ "WBh4g3": 309,
312
+ "WNg5e6": 310,
313
+ "BBg7f6": 311,
314
+ "BBf5g6": 312,
315
+ "WRe1e4": 313,
316
+ "WRe1e5": 314,
317
+ "BBe7c5": 315,
318
+ "WBd3e2": 316,
319
+ "BPb4b3": 317,
320
+ "WBd3c4": 318,
321
+ "WBe3c5": 319,
322
+ "WPc5c6": 320,
323
+ "WNb1a3": 321,
324
+ "WRe1e7": 322,
325
+ "BNe4c3": 323,
326
+ "BKh8g8": 324,
327
+ "BKg7h6": 325,
328
+ "BRe8e6": 326,
329
+ "WBc4b5": 327,
330
+ "WPa2b3": 328,
331
+ "BRd8d2": 329,
332
+ "WRd1d3": 330,
333
+ "WPh2g3": 331,
334
+ "WKh1h2": 332,
335
+ "WBd3f5": 333,
336
+ "WRe1f1": 334,
337
+ "WBc4e6": 335,
338
+ "BPb6c5": 336,
339
+ "WPb5b6": 337,
340
+ "WPg5g6": 338,
341
+ "WPb4c5": 339,
342
+ "BPc7d6": 340,
343
+ "WBf4d6": 341,
344
+ "BNe5c4": 342,
345
+ "BPc4c3": 343,
346
+ "WKe1f1": 344,
347
+ "BKe7d6": 345,
348
+ "WPb3c4": 346,
349
+ "BPf4f3": 347,
350
+ "WPh5g6": 348,
351
+ "WRh1d1": 349,
352
+ "WBd3c2": 350,
353
+ "BQd8e8": 351,
354
+ "BBf5e4": 352,
355
+ "BBc5d4": 353,
356
+ "WKh2g3": 354,
357
+ "WBb5a4": 355,
358
+ "WPg4f5": 356,
359
+ "WBe3f4": 357,
360
+ "WNe4d6": 358,
361
+ "WQf3g3": 359,
362
+ "BRh8d8": 360,
363
+ "BBf5d3": 361,
364
+ "WKg2h3": 362,
365
+ "WKe1f2": 363,
366
+ "BKf7g6": 364,
367
+ "WBc4f7": 365,
368
+ "WKg2g3": 366,
369
+ "BNd4f3": 367,
370
+ "WNd5f6": 368,
371
+ "WKe2d3": 369,
372
+ "WRd1c1": 370,
373
+ "WQd1e1": 371,
374
+ "WRd1d4": 372,
375
+ "BBb7d5": 373,
376
+ "BRe8e5": 374,
377
+ "BRd8d6": 375,
378
+ "BRe8f8": 376,
379
+ "WNg5f3": 377,
380
+ "WKf2g3": 378,
381
+ "BRc8d8": 379,
382
+ "WBe3g5": 380,
383
+ "BKf7e7": 381,
384
+ "BNe5d3": 382,
385
+ "WNf3h2": 383,
386
+ "WBb5d7": 384,
387
+ "BBd6e7": 385,
388
+ "BRe8e2": 386,
389
+ "WBe2g4": 387,
390
+ "WPc2d3": 388,
391
+ "WNg5f7": 389,
392
+ "BPg4g3": 390,
393
+ "BRd8d5": 391,
394
+ "BNh5f4": 392,
395
+ "WPf2g3": 393,
396
+ "WNd4f5": 394,
397
+ "BNd5f4": 395,
398
+ "WNe2c3": 396,
399
+ "BPe4f3": 397,
400
+ "BRd8c8": 398,
401
+ "BPa7b6": 399,
402
+ "WRd1d6": 400,
403
+ "BKf7f6": 401,
404
+ "WNb5d6": 402,
405
+ "BRd8d4": 403,
406
+ "WNe4c5": 404,
407
+ "WRc1d1": 405,
408
+ "WPd5e6": 406,
409
+ "WRe1e6": 407,
410
+ "BRc8c7": 408,
411
+ "BKg7g6": 409,
412
+ "BBd6f4": 410,
413
+ "WKc1d2": 411,
414
+ "BNg4e3": 412,
415
+ "WKf2e2": 413,
416
+ "WRd1d5": 414,
417
+ "WBc1h6": 415,
418
+ "WBc1a3": 416,
419
+ "WBg5e3": 417,
420
+ "WRd1f1": 418,
421
+ "WPg3f4": 419,
422
+ "WBe2c4": 420,
423
+ "BKh8h7": 421,
424
+ "BNe4d2": 422,
425
+ "BKh7g8": 423,
426
+ "WKh2g1": 424,
427
+ "BBb7e4": 425,
428
+ "BKh7g6": 426,
429
+ "WNe5f3": 427,
430
+ "BPa3a2": 428,
431
+ "BKe7d7": 429,
432
+ "BNa5c4": 430,
433
+ "BRc8c2": 431,
434
+ "BRf8g8": 432,
435
+ "WBf4g5": 433,
436
+ "WKe2d2": 434,
437
+ "BBc5e3": 435,
438
+ "WQe2f3": 436,
439
+ "WKf2f3": 437,
440
+ "BKc8d7": 438,
441
+ "WBe3d2": 439,
442
+ "WNe4g5": 440,
443
+ "WRf1g1": 441,
444
+ "WNd4f3": 442,
445
+ "BBd7e6": 443,
446
+ "BBd6c5": 444,
447
+ "BRd8f8": 445,
448
+ "BKf8g7": 446,
449
+ "WNg3f5": 447,
450
+ "BRf8b8": 448,
451
+ "BRd8d3": 449,
452
+ "WBg5f4": 450,
453
+ "WPa6a7": 451,
454
+ "WKh1g2": 452,
455
+ "BQd8c8": 453,
456
+ "WRc1c2": 454,
457
+ "BKh8g7": 455,
458
+ "BKf8e8": 456,
459
+ "BBg7d4": 457,
460
+ "BRe8e4": 458,
461
+ "WBd2e3": 459,
462
+ "BBe6c4": 460,
463
+ "BKe7f6": 461,
464
+ "WBd3b5": 462,
465
+ "BBb7c6": 463,
466
+ "WNg1h3": 464,
467
+ "WBd3g6": 465,
468
+ "WNd5e7": 466,
469
+ "BNb6c4": 467,
470
+ "WRc1c7": 468,
471
+ "BBg4e2": 469,
472
+ "WBf4e3": 470,
473
+ "BBd7b5": 471,
474
+ "BKd7c6": 472,
475
+ "WNd2f1": 473,
476
+ "WNg5e4": 474,
477
+ "BBg7h6": 475,
478
+ "WNh4f5": 476,
479
+ "BPg5f4": 477,
480
+ "BNf6e8": 478,
481
+ "BQf6g6": 479,
482
+ "BNe4f6": 480,
483
+ "BKf7g8": 481,
484
+ "BNd7f8": 482,
485
+ "BNg6f4": 483,
486
+ "BNf6h7": 484,
487
+ "BRf8f5": 485,
488
+ "WPc3b4": 486,
489
+ "BNc5e4": 487,
490
+ "BKf8g8": 488,
491
+ "BNg4f6": 489,
492
+ "WPf4g5": 490,
493
+ "BKg7f7": 491,
494
+ "BKe7e6": 492,
495
+ "BBb4d2": 493,
496
+ "WQd1d8": 494,
497
+ "WKf1g1": 495,
498
+ "WPc4b5": 496,
499
+ "BQd8d4": 497,
500
+ "BNb6d5": 498,
501
+ "WKe2f3": 499,
502
+ "WNb5c3": 500,
503
+ "BPd6c5": 501,
504
+ "BBe6f5": 502,
505
+ "BNd5e3": 503,
506
+ "BNb4d3": 504,
507
+ "WBe3h6": 505,
508
+ "BPf5g4": 506,
509
+ "BNg4e5": 507,
510
+ "WPe3f4": 508,
511
+ "WQd1d5": 509,
512
+ "BQe7f6": 510,
513
+ "WNd4e6": 511,
514
+ "BBb4a5": 512,
515
+ "BKf6e5": 513,
516
+ "WPd6d7": 514,
517
+ "WPh6h7": 515,
518
+ "WRf1b1": 516,
519
+ "WKf1g2": 517,
520
+ "BNf5d4": 518,
521
+ "BRb8b2": 519,
522
+ "WBg2e4": 520,
523
+ "BQd8d1": 521,
524
+ "BPc6b5": 522,
525
+ "WNg3e4": 523,
526
+ "WKe2e3": 524,
527
+ "BNd5f6": 525,
528
+ "BBc5d6": 526,
529
+ "BBg4f5": 527,
530
+ "WRf1f4": 528,
531
+ "WKf1e1": 529,
532
+ "WNe5g6": 530,
533
+ "BNd4e2": 531,
534
+ "BQb6c7": 532,
535
+ "BRc8c1": 533,
536
+ "WPd3c4": 534,
537
+ "WNc4e5": 535,
538
+ "WNe4g3": 536,
539
+ "WPc6c7": 537,
540
+ "BKf7e8": 538,
541
+ "BRe8e3": 539,
542
+ "WKg2f2": 540,
543
+ "BNb4c6": 541,
544
+ "WNa4c5": 542,
545
+ "BKd7c7": 543,
546
+ "BPg7h6": 544,
547
+ "BKg7g8": 545,
548
+ "BBe7h4": 546,
549
+ "WBa4b3": 547,
550
+ "WBh6g7": 548,
551
+ "BBf6e5": 549,
552
+ "WQd1c1": 550,
553
+ "BNg6e5": 551,
554
+ "WBb3c2": 552,
555
+ "BNe5g4": 553,
556
+ "BQd5d8": 554,
557
+ "WNb3c5": 555,
558
+ "WNb5c7": 556,
559
+ "WNd4b5": 557,
560
+ "BPd3d2": 558,
561
+ "WNe5f7": 559,
562
+ "BBb7f3": 560,
563
+ "BKc8b7": 561,
564
+ "WRe1c1": 562,
565
+ "BBg4e6": 563,
566
+ "WKf3e4": 564,
567
+ "BRc8c3": 565,
568
+ "BPc5b4": 566,
569
+ "BBg7f8": 567,
570
+ "BKg7f8": 568,
571
+ "WRb1b7": 569,
572
+ "BBc8h3": 570,
573
+ "WQe2e3": 571,
574
+ "WKc1b2": 572,
575
+ "BRe8c8": 573,
576
+ "WKf3e3": 574,
577
+ "WKd2c3": 575,
578
+ "WQd2e3": 576,
579
+ "WBg2f3": 577,
580
+ "BPf6g5": 578,
581
+ "BBc5b4": 579,
582
+ "BRb8c8": 580,
583
+ "BRa8a7": 581,
584
+ "BKf7g7": 582,
585
+ "WRf1f8": 583,
586
+ "BPg6h5": 584,
587
+ "BBe6g4": 585,
588
+ "BBe7b4": 586,
589
+ "BBc5f2": 587,
590
+ "WKd2e3": 588,
591
+ "WRf1f7": 589,
592
+ "WKg3f4": 590,
593
+ "WKh2g2": 591,
594
+ "BNc5d3": 592,
595
+ "WRb1c1": 593,
596
+ "BNd4c2": 594,
597
+ "WBb5d3": 595,
598
+ "BKd7e6": 596,
599
+ "WKh2h3": 597,
600
+ "BKe7f7": 598,
601
+ "WKh2h1": 599,
602
+ "BRc8c4": 600,
603
+ "BPb4c3": 601,
604
+ "BKf6f5": 602,
605
+ "WBd2b4": 603,
606
+ "BBd6g3": 604,
607
+ "BKd8c7": 605,
608
+ "WKg2f1": 606,
609
+ "BKg6f5": 607,
610
+ "WBc4e2": 608,
611
+ "WNd5c7": 609,
612
+ "WNf3e1": 610,
613
+ "WQe2d3": 611,
614
+ "WNe5g4": 612,
615
+ "WRc1c8": 613,
616
+ "BRb8d8": 614,
617
+ "WPg2h3": 615,
618
+ "WPf5g6": 616,
619
+ "WRd1g1": 617,
620
+ "BKg6f6": 618,
621
+ "BNf5e3": 619,
622
+ "BKd7e7": 620,
623
+ "WKd2c2": 621,
624
+ "WBb2d4": 622,
625
+ "WPb6b7": 623,
626
+ "WQe2e4": 624,
627
+ "BBb7a6": 625,
628
+ "BPh3h2": 626,
629
+ "BKc7b6": 627,
630
+ "BBb7c8": 628,
631
+ "WKg3g4": 629,
632
+ "WBe2b5": 630,
633
+ "BNb4c2": 631,
634
+ "BKh7g7": 632,
635
+ "BPb3b2": 633,
636
+ "BPc3c2": 634,
637
+ "BBe6d7": 635,
638
+ "WKf3f4": 636,
639
+ "WBb5c4": 637,
640
+ "BQc7b6": 638,
641
+ "WRf1f5": 639,
642
+ "BRf8f4": 640,
643
+ "BRc8b8": 641,
644
+ "BPh4g3": 642,
645
+ "WKe3d3": 643,
646
+ "WKe3d4": 644,
647
+ "WRf1f6": 645,
648
+ "WNb3d4": 646,
649
+ "WPe6e7": 647,
650
+ "BQb6b2": 648,
651
+ "BBf5e6": 649,
652
+ "BRc8e8": 650,
653
+ "WPf3g4": 651,
654
+ "BKf6e6": 652,
655
+ "WNc4d6": 653,
656
+ "WKc1c2": 654,
657
+ "BNd5b6": 655,
658
+ "BKd8e7": 656,
659
+ "BKf8f7": 657,
660
+ "WRc1e1": 658,
661
+ "WKg3f3": 659,
662
+ "BBf5g4": 660,
663
+ "WKg2g1": 661,
664
+ "BNe5g6": 662,
665
+ "BKe6d5": 663,
666
+ "WRc1c3": 664,
667
+ "BKd7d6": 665,
668
+ "WKd2d3": 666,
669
+ "BNd5b4": 667,
670
+ "WKf3g4": 668,
671
+ "WBd2f4": 669,
672
+ "BNh6f5": 670,
673
+ "BKh7h8": 671,
674
+ "BRc8c6": 672,
675
+ "WRb1d1": 673,
676
+ "WQd2e2": 674,
677
+ "BKh7h6": 675,
678
+ "BPe3e2": 676,
679
+ "WQe2d2": 677,
680
+ "WBe2h5": 678,
681
+ "BRb8b7": 679,
682
+ "BBf6g5": 680,
683
+ "WBg3e5": 681,
684
+ "BKc8c7": 682,
685
+ "BBd7f5": 683,
686
+ "WRa1a2": 684,
687
+ "WBb2e5": 685,
688
+ "WKg3h4": 686,
689
+ "WKf1f2": 687,
690
+ "BRf8f1": 688,
691
+ "WKe2f2": 689,
692
+ "WKd1e2": 690,
693
+ "WNc3d1": 691,
694
+ "BRf8f2": 692,
695
+ "WKc2b3": 693,
696
+ "BBc5e7": 694,
697
+ "WBg2d5": 695,
698
+ "BNe5c6": 696,
699
+ "BNh5f6": 697,
700
+ "BKe7f8": 698,
701
+ "BNe4f2": 699,
702
+ "BKe6d6": 700,
703
+ "WKe3f4": 701,
704
+ "WKd2e2": 702,
705
+ "BNe4g3": 703,
706
+ "BPe4d3": 704,
707
+ "BKg6g5": 705,
708
+ "BBd6c7": 706,
709
+ "WBb3d5": 707,
710
+ "WNe5c4": 708,
711
+ "BNg4f2": 709,
712
+ "BRf8a8": 710,
713
+ "BQe7e6": 711,
714
+ "BQc7d7": 712,
715
+ "WQd2d3": 713,
716
+ "BPa4b3": 714,
717
+ "BKf6g5": 715,
718
+ "WKf2g2": 716,
719
+ "WRc1c6": 717,
720
+ "WKf2g1": 718,
721
+ "BKd6c5": 719,
722
+ "BQe7d7": 720,
723
+ "WKe3f3": 721,
724
+ "BKe6f5": 722,
725
+ "BBb4d6": 723,
726
+ "BBf6d4": 724,
727
+ "BKf6g6": 725,
728
+ "BRg8g7": 726,
729
+ "WNh2g4": 727,
730
+ "WKe3e4": 728,
731
+ "WKf3e2": 729,
732
+ "BKf6e7": 730,
733
+ "BNc6d8": 731,
734
+ "BPa2a1": 732,
735
+ "BRa8g8": 733,
736
+ "WBe3f2": 734,
737
+ "WPg4h5": 735,
738
+ "BBb4c5": 736,
739
+ "BRf8f3": 737,
740
+ "WQb3c2": 738,
741
+ "BNh5g3": 739,
742
+ "BQc7e5": 740,
743
+ "WBd3h7": 741,
744
+ "WBd2g5": 742,
745
+ "WQd2f4": 743,
746
+ "BQd7e7": 744,
747
+ "BBf8h6": 745,
748
+ "BRd8g8": 746,
749
+ "WRf1a1": 747,
750
+ "WNh4g6": 748,
751
+ "BBg7c3": 749,
752
+ "WQf3e2": 750,
753
+ "BKe7d8": 751,
754
+ "WBc4a2": 752,
755
+ "WKd1c2": 753,
756
+ "WBb2f6": 754,
757
+ "BNe4c5": 755,
758
+ "BNe4g5": 756,
759
+ "WKg3f2": 757,
760
+ "WPg6g7": 758,
761
+ "WBf3e4": 759,
762
+ "WRc1b1": 760,
763
+ "WNf4d5": 761,
764
+ "BBe7f8": 762,
765
+ "BPb5a4": 763,
766
+ "WRa1g1": 764,
767
+ "WKd3c4": 765,
768
+ "WBe3b6": 766,
769
+ "BBa5b6": 767,
770
+ "BPd7c6": 768,
771
+ "WRd1b1": 769,
772
+ "BQe7d6": 770,
773
+ "BRf8h8": 771,
774
+ "BPe7f6": 772,
775
+ "BBd6b4": 773,
776
+ "WQd2c3": 774,
777
+ "WBg2h3": 775,
778
+ "WQe2c4": 776,
779
+ "WKf3g3": 777,
780
+ "BNe4d6": 778,
781
+ "BNa6c5": 779,
782
+ "BQd5a5": 780,
783
+ "WBb2c3": 781,
784
+ "WNe5d3": 782,
785
+ "WQc2d2": 783,
786
+ "WQf3e3": 784,
787
+ "BKg6f7": 785,
788
+ "WBg5h6": 786,
789
+ "BQd7e6": 787,
790
+ "BKg6h5": 788,
791
+ "WNd4b3": 789,
792
+ "BQa5b6": 790,
793
+ "WKe2f1": 791,
794
+ "BKd8c8": 792,
795
+ "BBd7g4": 793,
796
+ "BKe6f6": 794,
797
+ "WKf2e1": 795,
798
+ "BRd8b8": 796,
799
+ "WNg3h5": 797,
800
+ "BKe6e5": 798,
801
+ "BKd6c6": 799,
802
+ "BPg5h4": 800,
803
+ "BBf6e7": 801,
804
+ "WQc2d3": 802,
805
+ "BQa5c7": 803,
806
+ "BPd4e3": 804,
807
+ "BQe7c5": 805,
808
+ "WRc1c5": 806,
809
+ "WPa7a8": 807,
810
+ "WPf5e6": 808,
811
+ "BQe7e5": 809,
812
+ "WNb5d4": 810,
813
+ "WBb2a3": 811,
814
+ "WKe2d1": 812,
815
+ "BNa5c6": 813,
816
+ "WRg1g2": 814,
817
+ "BQc7c6": 815,
818
+ "WPg5f6": 816,
819
+ "BKg7h7": 817,
820
+ "WPf6f7": 818,
821
+ "BNc6b8": 819,
822
+ "WQe2f2": 820,
823
+ "WNf4e6": 821,
824
+ "BQd7d6": 822,
825
+ "BRc8c5": 823,
826
+ "BQe7g5": 824,
827
+ "WBf3g4": 825,
828
+ "BQc7d6": 826,
829
+ "WQe2g4": 827,
830
+ "WPb5c6": 828,
831
+ "WNa3c4": 829,
832
+ "WPb4a5": 830,
833
+ "WQb3b7": 831,
834
+ "WRb1b2": 832,
835
+ "WNc4e3": 833,
836
+ "BBf6g7": 834,
837
+ "WRa1a7": 835,
838
+ "WBf3e2": 836,
839
+ "WQc2b3": 837,
840
+ "WQf3f4": 838,
841
+ "BNc5e6": 839,
842
+ "BRa8a6": 840,
843
+ "BPf3f2": 841,
844
+ "BNb6d7": 842,
845
+ "BQe7f7": 843,
846
+ "BKd7e8": 844,
847
+ "BQd7c6": 845,
848
+ "BBb4e7": 846,
849
+ "BRb8e8": 847,
850
+ "BPg3g2": 848,
851
+ "BKe6d7": 849,
852
+ "WNh4f3": 850,
853
+ "WKe3d2": 851,
854
+ "WNf5e7": 852,
855
+ "WBf3d5": 853,
856
+ "WKc1d1": 854,
857
+ "WQd2h6": 855,
858
+ "WPg3h4": 856,
859
+ "BRa8a2": 857,
860
+ "BKf6g7": 858,
861
+ "BKc8d8": 859,
862
+ "WQh5f3": 860,
863
+ "BKh6g7": 861,
864
+ "BBg6e4": 862,
865
+ "WKh3g2": 863,
866
+ "BKg7h8": 864,
867
+ "BNb4d5": 865,
868
+ "BBe7d8": 866,
869
+ "BBg4d7": 867,
870
+ "BKh6h5": 868,
871
+ "BBd7e8": 869,
872
+ "WNe4c3": 870,
873
+ "WQd2g5": 871,
874
+ "WKb1a1": 872,
875
+ "BRb8a8": 873,
876
+ "BQc7e7": 874,
877
+ "WBg2f1": 875,
878
+ "BNd4f5": 876,
879
+ "WPa5b6": 877,
880
+ "WKd3c3": 878,
881
+ "WRf1h1": 879,
882
+ "WRf3g3": 880,
883
+ "WQf3e4": 881,
884
+ "WKd3e3": 882,
885
+ "BRb2a2": 883,
886
+ "WQc2e4": 884,
887
+ "BNf5h4": 885,
888
+ "BKd7c8": 886,
889
+ "WRb1e1": 887,
890
+ "BPf4g3": 888,
891
+ "WBf1h3": 889,
892
+ "BNc4e3": 890,
893
+ "WNf1g3": 891,
894
+ "WKg2h2": 892,
895
+ "BNf6g8": 893,
896
+ "WRc1c4": 894,
897
+ "WKf3g2": 895,
898
+ "WKd3d4": 896,
899
+ "BQd7f5": 897,
900
+ "WQf3f6": 898,
901
+ "WPc5b6": 899,
902
+ "BKd8d7": 900,
903
+ "WKh3h4": 901,
904
+ "BQf6e5": 902,
905
+ "BKh6g5": 903,
906
+ "BKc7d6": 904,
907
+ "BNd4c6": 905,
908
+ "BRa8a1": 906,
909
+ "BQf6e7": 907,
910
+ "WKd1c1": 908,
911
+ "WRb7a7": 909,
912
+ "WQd3e3": 910,
913
+ "BBg7b2": 911,
914
+ "WBf4c7": 912,
915
+ "BRh8c8": 913,
916
+ "WKd2c1": 914,
917
+ "BKe7e8": 915,
918
+ "BKb8a8": 916,
919
+ "WKh3g4": 917,
920
+ "WNc7a8": 918,
921
+ "BNc2a1": 919,
922
+ "WNd5f4": 920,
923
+ "WNc5e6": 921,
924
+ "BQb6d4": 922,
925
+ "BNg6h4": 923,
926
+ "BKf7f8": 924,
927
+ "BPc4b3": 925,
928
+ "WQf3h5": 926,
929
+ "BRh8h7": 927,
930
+ "BBf5c2": 928,
931
+ "WBf4d2": 929,
932
+ "WPh7h8": 930,
933
+ "WQf3g4": 931,
934
+ "WPb3a4": 932,
935
+ "WQd3d2": 933,
936
+ "BNa5b3": 934,
937
+ "WBb2c1": 935,
938
+ "WQd3e4": 936,
939
+ "WRd1h1": 937,
940
+ "BKd8e8": 938,
941
+ "BKe6f7": 939,
942
+ "WKb1a2": 940,
943
+ "WKd3e4": 941,
944
+ "WNg3e2": 942,
945
+ "BBc6d5": 943,
946
+ "WPc7c8": 944,
947
+ "WKc2d3": 945,
948
+ "BNf4d3": 946,
949
+ "WNd5e3": 947,
950
+ "WQc2e2": 948,
951
+ "BQe7b4": 949,
952
+ "BNf4e2": 950,
953
+ "WKg4g5": 951,
954
+ "WQd3e2": 952,
955
+ "WRa1a8": 953,
956
+ "BNa6b4": 954,
957
+ "WNf5d6": 955,
958
+ "BRa2a3": 956,
959
+ "BQd8f8": 957,
960
+ "BRc8a8": 958,
961
+ "BKd6d5": 959,
962
+ "BNg6e7": 960,
963
+ "WQe2h5": 961,
964
+ "BRc2b2": 962,
965
+ "BKd6e5": 963,
966
+ "BKd6e6": 964,
967
+ "WBc2b3": 965,
968
+ "WRb1b3": 966,
969
+ "WPc2b3": 967,
970
+ "WBe2d1": 968,
971
+ "BRc8f8": 969,
972
+ "BKf5e4": 970,
973
+ "WQe2b5": 971,
974
+ "BKe5d4": 972,
975
+ "WRb1a1": 973,
976
+ "WBb5e2": 974,
977
+ "WBg5d2": 975,
978
+ "WRe1b1": 976,
979
+ "BRb8b6": 977,
980
+ "WNe3d5": 978,
981
+ "BKf5f4": 979,
982
+ "BKc7c6": 980,
983
+ "WQd2c2": 981,
984
+ "BNa6c7": 982,
985
+ "BQf6f5": 983,
986
+ "BQd7c7": 984,
987
+ "WBe5f6": 985,
988
+ "BQe7h4": 986,
989
+ "WKb1c1": 987,
990
+ "BNe5d7": 988,
991
+ "BNd4e6": 989,
992
+ "WKd2e1": 990,
993
+ "BNh7g5": 991,
994
+ "WPb7b8": 992,
995
+ "WRa1a3": 993,
996
+ "BNd5e7": 994,
997
+ "WBe2f1": 995,
998
+ "WRc7b7": 996,
999
+ "BRd8h8": 997,
1000
+ "BKd6c7": 998,
1001
+ "WPb2a3": 999,
1002
+ "BPb2b1": 1000,
1003
+ "WBf4h6": 1001,
1004
+ "WPc5d6": 1002,
1005
+ "WKf2f1": 1003,
1006
+ "BKc6b6": 1004,
1007
+ "WNf4h5": 1005,
1008
+ "WKb1c2": 1006,
1009
+ "BBe6h3": 1007,
1010
+ "WRe1g1": 1008,
1011
+ "WKe3f2": 1009,
1012
+ "WNh3f4": 1010,
1013
+ "BQc7d8": 1011,
1014
+ "WNa4c3": 1012,
1015
+ "WKf4g5": 1013,
1016
+ "BBb6d4": 1014,
1017
+ "BQf6f3": 1015,
1018
+ "WKf4f5": 1016,
1019
+ "BNh6g4": 1017,
1020
+ "BKb8a7": 1018,
1021
+ "BPc4d3": 1019,
1022
+ "WRh1h2": 1020,
1023
+ "WRh1h3": 1021,
1024
+ "BQf6g5": 1022,
1025
+ "BBd6h2": 1023,
1026
+ "WQc2c3": 1024,
1027
+ "BBc6b5": 1025,
1028
+ "BQd5e6": 1026,
1029
+ "WNg5h3": 1027,
1030
+ "WRh1c1": 1028,
1031
+ "WNe3f5": 1029,
1032
+ "WKd1d2": 1030,
1033
+ "BKc7d7": 1031,
1034
+ "BNg4h6": 1032,
1035
+ "WQd2d4": 1033,
1036
+ "BNf4h3": 1034,
1037
+ "BPh2h1": 1035,
1038
+ "WBd3b1": 1036,
1039
+ "WQd3c4": 1037,
1040
+ "BRa2a1": 1038,
1041
+ "BBe6b3": 1039,
1042
+ "BKd6e7": 1040,
1043
+ "WKd1e1": 1041,
1044
+ "WRc1f1": 1042,
1045
+ "BKg5g4": 1043,
1046
+ "WKf4e5": 1044,
1047
+ "BQd6e5": 1045,
1048
+ "WNb3d2": 1046,
1049
+ "BQd7g4": 1047,
1050
+ "WQf3f7": 1048,
1051
+ "BQe7d8": 1049,
1052
+ "BPg4f3": 1050,
1053
+ "BRd8a8": 1051,
1054
+ "WNf5h6": 1052,
1055
+ "WPg5h6": 1053,
1056
+ "WRc1a1": 1054,
1057
+ "BBc6e4": 1055,
1058
+ "WNh3g5": 1056,
1059
+ "WQd3d4": 1057,
1060
+ "WKd3c2": 1058,
1061
+ "WQd4d1": 1059,
1062
+ "WQf3d1": 1060,
1063
+ "WBg2c6": 1061,
1064
+ "BQb2c3": 1062,
1065
+ "BBb7g2": 1063,
1066
+ "BBe6f7": 1064,
1067
+ "BNc4e5": 1065,
1068
+ "WBc2e4": 1066,
1069
+ "BBe5d4": 1067,
1070
+ "WBh4f6": 1068,
1071
+ "BKc6c5": 1069,
1072
+ "WKc2d2": 1070,
1073
+ "WRa1a6": 1071,
1074
+ "BKb8c7": 1072,
1075
+ "WQf3d5": 1073,
1076
+ "BPc7b6": 1074,
1077
+ "BRh8h6": 1075,
1078
+ "BNf5d6": 1076,
1079
+ "BNd4b3": 1077,
1080
+ "WNd4e2": 1078,
1081
+ "WNe6f8": 1079,
1082
+ "BQc7b7": 1080,
1083
+ "BRe8g8": 1081,
1084
+ "WKe4d5": 1082,
1085
+ "WKf4e4": 1083,
1086
+ "WBe3a7": 1084,
1087
+ "WKg2h1": 1085,
1088
+ "BKc7b7": 1086,
1089
+ "WNc3b1": 1087,
1090
+ "BKc6b5": 1088,
1091
+ "WKd4c5": 1089,
1092
+ "BRg8f8": 1090,
1093
+ "WQd3f3": 1091,
1094
+ "WQe2e5": 1092,
1095
+ "BNf5g3": 1093,
1096
+ "WBd4e3": 1094,
1097
+ "WBd5c6": 1095,
1098
+ "BQb6c6": 1096,
1099
+ "BNe6d4": 1097,
1100
+ "BBd7a4": 1098,
1101
+ "WRa7a6": 1099,
1102
+ "BRc2a2": 1100,
1103
+ "BRb2b3": 1101,
1104
+ "BKf6f7": 1102,
1105
+ "WBb3c4": 1103,
1106
+ "BKe5e4": 1104,
1107
+ "WBh6g5": 1105,
1108
+ "WNd5c3": 1106,
1109
+ "BKd5c4": 1107,
1110
+ "WNc5d7": 1108,
1111
+ "BPb7a6": 1109,
1112
+ "BNd6e4": 1110,
1113
+ "BKg5f4": 1111,
1114
+ "BBa6b7": 1112,
1115
+ "WKe4e5": 1113,
1116
+ "BKb8c8": 1114,
1117
+ "BKf5g4": 1115,
1118
+ "BQd6d7": 1116,
1119
+ "WRg1g7": 1117,
1120
+ "WBe4d5": 1118,
1121
+ "BRg8g6": 1119,
1122
+ "WNa3b5": 1120,
1123
+ "BKb7b6": 1121,
1124
+ "WKd3e2": 1122,
1125
+ "BQb6a5": 1123,
1126
+ "BPb6a5": 1124,
1127
+ "WBg3d6": 1125,
1128
+ "WBd2e1": 1126,
1129
+ "BBg4h3": 1127,
1130
+ "WNd6b7": 1128,
1131
+ "BQd6e7": 1129,
1132
+ "WNa4b6": 1130,
1133
+ "WQf3d3": 1131,
1134
+ "BNc5b3": 1132,
1135
+ "BNe6f4": 1133,
1136
+ "WKg3g2": 1134,
1137
+ "BPc2c1": 1135,
1138
+ "BBh3g2": 1136,
1139
+ "WQh5h6": 1137,
1140
+ "BNc5d7": 1138,
1141
+ "WRg1f1": 1139,
1142
+ "WKf4e3": 1140,
1143
+ "WKe2e1": 1141,
1144
+ "BBc6d7": 1142,
1145
+ "BBa6c4": 1143,
1146
+ "BBc5a7": 1144,
1147
+ "WNe3g4": 1145,
1148
+ "WKc2c3": 1146,
1149
+ "BKf5e5": 1147,
1150
+ "BRe8b8": 1148,
1151
+ "BKe6e7": 1149,
1152
+ "WKc3c4": 1150,
1153
+ "BQf6d4": 1151,
1154
+ "WNd5b6": 1152,
1155
+ "BRb8b4": 1153,
1156
+ "BBh5f3": 1154,
1157
+ "WRd1a1": 1155,
1158
+ "WPg7g8": 1156,
1159
+ "WKc3b4": 1157,
1160
+ "BBe4f3": 1158,
1161
+ "WNg4f6": 1159,
1162
+ "WBg5d8": 1160,
1163
+ "BRa8a3": 1161,
1164
+ "WRa7a8": 1162,
1165
+ "WBf4h2": 1163,
1166
+ "WRe3f3": 1164,
1167
+ "WRh1h5": 1165,
1168
+ "BRc3c2": 1166,
1169
+ "WRa1a4": 1167,
1170
+ "WRb7b6": 1168,
1171
+ "WKe4f5": 1169,
1172
+ "BRa8a5": 1170,
1173
+ "BNf8g6": 1171,
1174
+ "BNc4b2": 1172,
1175
+ "BRb8b5": 1173,
1176
+ "WRg1g3": 1174,
1177
+ "WKg4f3": 1175,
1178
+ "BQd6d5": 1176,
1179
+ "BKb8b7": 1177,
1180
+ "BKe5d5": 1178,
1181
+ "BBf6b2": 1179,
1182
+ "WRe1a1": 1180,
1183
+ "BKb7a6": 1181,
1184
+ "WBb2g7": 1182,
1185
+ "WNc6e7": 1183,
1186
+ "BNc6a7": 1184,
1187
+ "WBe4f3": 1185,
1188
+ "WQd4e3": 1186,
1189
+ "BRa8a4": 1187,
1190
+ "WBd3a6": 1188,
1191
+ "WPd7d8": 1189,
1192
+ "BKf5e6": 1190,
1193
+ "BRa3a2": 1191,
1194
+ "BQd7d5": 1192,
1195
+ "WKe3e2": 1193,
1196
+ "BBf6c3": 1194,
1197
+ "BNf5e7": 1195,
1198
+ "WKg3h2": 1196,
1199
+ "WRg7h7": 1197,
1200
+ "BQb6d8": 1198,
1201
+ "WQd1a1": 1199
1202
+ }