ilanou20 commited on
Commit
ec70582
·
verified ·
1 Parent(s): d5cfd16

Chess Challenge submission by ilanou20

Browse files
Files changed (8) hide show
  1. README.md +31 -0
  2. config.json +24 -0
  3. model.py +427 -0
  4. model.safetensors +3 -0
  5. tokenizer.py +468 -0
  6. tokenizer_config.json +14 -0
  7. training_args.bin +3 -0
  8. vocab.json +114 -0
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - chess
5
+ - llm-course
6
+ - chess-challenge
7
+ license: mit
8
+ ---
9
+
10
+ # chess-ilan-v10
11
+
12
+ Chess model submitted to the LLM Course Chess Challenge.
13
+
14
+ ## Submission Info
15
+
16
+ - **Submitted by**: [ilanou20](https://huggingface.co/ilanou20)
17
+ - **Parameters**: 874,240
18
+ - **Organization**: LLM-course
19
+
20
+ ## Usage
21
+
22
+ ```python
23
+ from transformers import AutoModelForCausalLM, AutoTokenizer
24
+
25
+ model = AutoModelForCausalLM.from_pretrained("LLM-course/chess-ilan-v10", trust_remote_code=True)
26
+ tokenizer = AutoTokenizer.from_pretrained("LLM-course/chess-ilan-v10", trust_remote_code=True)
27
+ ```
28
+
29
+ ## Evaluation
30
+
31
+ This model is evaluated at the [Chess Challenge Arena](https://huggingface.co/spaces/LLM-course/Chess1MChallenge).
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ChessForCausalLM"
4
+ ],
5
+ "bos_token_id": 1,
6
+ "dropout": 0.0,
7
+ "dtype": "float32",
8
+ "eos_token_id": 2,
9
+ "layer_norm_epsilon": 1e-05,
10
+ "model_type": "chess_transformer",
11
+ "n_ctx": 256,
12
+ "n_embd": 128,
13
+ "n_head": 4,
14
+ "n_inner": 384,
15
+ "n_layer": 5,
16
+ "pad_token_id": 0,
17
+ "tie_weights": true,
18
+ "transformers_version": "4.57.6",
19
+ "vocab_size": 112,
20
+ "auto_map": {
21
+ "AutoConfig": "model.ChessConfig",
22
+ "AutoModelForCausalLM": "model.ChessForCausalLM"
23
+ }
24
+ }
model.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ from dataclasses import dataclass
5
+ from typing import Optional, Tuple, Union
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from transformers import PretrainedConfig, PreTrainedModel
11
+ from transformers.modeling_outputs import CausalLMOutputWithPast
12
+
13
+
14
+ class ChessConfig(PretrainedConfig):
15
+ """
16
+ Configuration class for the Chess Transformer model.
17
+
18
+ This configuration is designed for a ~1M parameter model.
19
+ Students can adjust these values to explore different architectures.
20
+
21
+ Parameter budget breakdown (with default values):
22
+ - Embeddings (vocab): 1200 x 128 = 153,600
23
+ - Position Embeddings: 256 x 128 = 32,768
24
+ - Transformer Layers: 6 x ~120,000 = ~720,000
25
+ - LM Head (with weight tying): 0 (shared with embeddings)
26
+ - Total: ~906,000 parameters
27
+
28
+ Attributes:
29
+ vocab_size: Size of the vocabulary (number of unique moves).
30
+ n_embd: Embedding dimension (d_model).
31
+ n_layer: Number of transformer layers.
32
+ n_head: Number of attention heads.
33
+ n_ctx: Maximum sequence length (context window).
34
+ n_inner: Feed-forward inner dimension (default: 3 * n_embd).
35
+ dropout: Dropout probability.
36
+ layer_norm_epsilon: Epsilon for layer normalization.
37
+ tie_weights: Whether to tie embedding and output weights.
38
+ """
39
+
40
+ model_type = "chess_transformer"
41
+
42
+ def __init__(
43
+ self,
44
+ vocab_size: int = 1200,
45
+ n_embd: int = 128,
46
+ n_layer: int = 6,
47
+ n_head: int = 4,
48
+ n_ctx: int = 256,
49
+ n_inner: Optional[int] = None,
50
+ dropout: float = 0.1,
51
+ layer_norm_epsilon: float = 1e-5,
52
+ tie_weights: bool = True,
53
+ pad_token_id: int = 0,
54
+ bos_token_id: int = 1,
55
+ eos_token_id: int = 2,
56
+ **kwargs,
57
+ ):
58
+ super().__init__(
59
+ pad_token_id=pad_token_id,
60
+ bos_token_id=bos_token_id,
61
+ eos_token_id=eos_token_id,
62
+ **kwargs,
63
+ )
64
+
65
+ self.vocab_size = vocab_size
66
+ self.n_embd = n_embd
67
+ self.n_layer = n_layer
68
+ self.n_head = n_head
69
+ self.n_ctx = n_ctx
70
+ self.n_inner = n_inner if n_inner is not None else 3 * n_embd # Reduced from 4x to 3x
71
+ self.dropout = dropout
72
+ self.layer_norm_epsilon = layer_norm_epsilon
73
+ self.tie_weights = tie_weights
74
+ # Inform HF base class about tying behavior
75
+ self.tie_word_embeddings = bool(tie_weights)
76
+
77
+
78
+ class MultiHeadAttention(nn.Module):
79
+ """
80
+ Multi-head self-attention module.
81
+
82
+ This is a standard scaled dot-product attention implementation
83
+ with causal masking for autoregressive generation.
84
+ """
85
+
86
+ def __init__(self, config: ChessConfig):
87
+ super().__init__()
88
+
89
+ assert config.n_embd % config.n_head == 0, \
90
+ f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
91
+
92
+ self.n_head = config.n_head
93
+ self.n_embd = config.n_embd
94
+ self.head_dim = config.n_embd // config.n_head
95
+
96
+ # Combined QKV projection for efficiency
97
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
98
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
99
+
100
+ self.dropout = nn.Dropout(config.dropout)
101
+
102
+ # Causal mask (will be created on first forward pass)
103
+ self.register_buffer(
104
+ "bias",
105
+ torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
106
+ 1, 1, config.n_ctx, config.n_ctx
107
+ ),
108
+ persistent=False,
109
+ )
110
+
111
+ def forward(
112
+ self,
113
+ x: torch.Tensor,
114
+ attention_mask: Optional[torch.Tensor] = None,
115
+ ) -> torch.Tensor:
116
+ batch_size, seq_len, _ = x.size()
117
+
118
+ # Compute Q, K, V
119
+ qkv = self.c_attn(x)
120
+ q, k, v = qkv.split(self.n_embd, dim=2)
121
+
122
+ # Reshape for multi-head attention
123
+ q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
124
+ k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
125
+ v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
126
+
127
+ # Scaled dot-product attention
128
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
129
+
130
+ # Apply causal mask
131
+ causal_mask = self.bias[:, :, :seq_len, :seq_len]
132
+ attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
133
+
134
+ # Apply attention mask (for padding)
135
+ if attention_mask is not None:
136
+ # attention_mask shape: (batch_size, seq_len) -> (batch_size, 1, 1, seq_len)
137
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
138
+ attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
139
+
140
+ attn_weights = F.softmax(attn_weights, dim=-1)
141
+ attn_weights = self.dropout(attn_weights)
142
+
143
+ # Apply attention to values
144
+ attn_output = torch.matmul(attn_weights, v)
145
+
146
+ # Reshape back
147
+ attn_output = attn_output.transpose(1, 2).contiguous().view(
148
+ batch_size, seq_len, self.n_embd
149
+ )
150
+
151
+ # Output projection
152
+ attn_output = self.c_proj(attn_output)
153
+
154
+ return attn_output
155
+
156
+
157
+ class FeedForward(nn.Module):
158
+ """
159
+ Feed-forward network (MLP) module.
160
+
161
+ Standard two-layer MLP with GELU activation.
162
+ """
163
+
164
+ def __init__(self, config: ChessConfig):
165
+ super().__init__()
166
+
167
+ self.c_fc = nn.Linear(config.n_embd, config.n_inner)
168
+ self.c_proj = nn.Linear(config.n_inner, config.n_embd)
169
+ self.dropout = nn.Dropout(config.dropout)
170
+
171
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
172
+ x = self.c_fc(x)
173
+ x = F.gelu(x)
174
+ x = self.c_proj(x)
175
+ x = self.dropout(x)
176
+ return x
177
+
178
+
179
+ class TransformerBlock(nn.Module):
180
+ """
181
+ A single transformer block with attention and feed-forward layers.
182
+
183
+ Uses pre-normalization (LayerNorm before attention/FFN) for better
184
+ training stability.
185
+ """
186
+
187
+ def __init__(self, config: ChessConfig):
188
+ super().__init__()
189
+
190
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
191
+ self.attn = MultiHeadAttention(config)
192
+ self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
193
+ self.mlp = FeedForward(config)
194
+
195
+ def forward(
196
+ self,
197
+ x: torch.Tensor,
198
+ attention_mask: Optional[torch.Tensor] = None,
199
+ ) -> torch.Tensor:
200
+ # Pre-norm attention
201
+ x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
202
+ # Pre-norm FFN
203
+ x = x + self.mlp(self.ln_2(x))
204
+ return x
205
+
206
+
207
+ class ChessForCausalLM(PreTrainedModel):
208
+ """
209
+ Chess Transformer for Causal Language Modeling (next-move prediction).
210
+
211
+ This model is designed to predict the next chess move given a sequence
212
+ of previous moves. It uses a GPT-style architecture with:
213
+ - Token embeddings for chess moves
214
+ - Learned positional embeddings
215
+ - Stacked transformer blocks
216
+ - Linear head for next-token prediction
217
+
218
+ The model supports weight tying between the embedding layer and the
219
+ output projection to save parameters.
220
+
221
+ Example:
222
+ >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
223
+ >>> model = ChessForCausalLM(config)
224
+ >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
225
+ >>> outputs = model(**inputs)
226
+ >>> next_move_logits = outputs.logits[:, -1, :]
227
+ """
228
+
229
+ config_class = ChessConfig
230
+ base_model_prefix = "transformer"
231
+ supports_gradient_checkpointing = True
232
+ # Suppress missing-key warning for tied lm_head when loading
233
+ keys_to_ignore_on_load_missing = ["lm_head.weight"]
234
+
235
+ def __init__(self, config: ChessConfig):
236
+ super().__init__(config)
237
+
238
+ # Token and position embeddings
239
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
240
+ self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
241
+
242
+ self.drop = nn.Dropout(config.dropout)
243
+
244
+ # Transformer blocks
245
+ self.h = nn.ModuleList([
246
+ TransformerBlock(config) for _ in range(config.n_layer)
247
+ ])
248
+
249
+ # Final layer norm
250
+ self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
251
+
252
+ # Output head
253
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
254
+
255
+ # Declare tied weights for proper serialization
256
+ if config.tie_weights:
257
+ self._tied_weights_keys = ["lm_head.weight"]
258
+
259
+ # Initialize weights
260
+ self.post_init()
261
+
262
+ # Tie weights if configured
263
+ if config.tie_weights:
264
+ self.tie_weights()
265
+
266
+ def get_input_embeddings(self) -> nn.Module:
267
+ return self.wte
268
+
269
+ def set_input_embeddings(self, new_embeddings: nn.Module):
270
+ self.wte = new_embeddings
271
+ if getattr(self.config, "tie_weights", False):
272
+ self.tie_weights()
273
+
274
+ def get_output_embeddings(self) -> nn.Module:
275
+ return self.lm_head
276
+
277
+ def set_output_embeddings(self, new_embeddings: nn.Module):
278
+ self.lm_head = new_embeddings
279
+
280
+ def tie_weights(self):
281
+ # Use HF helper to tie or clone depending on config
282
+ if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
283
+ self._tie_or_clone_weights(self.lm_head, self.wte)
284
+
285
+ def _init_weights(self, module: nn.Module):
286
+ """Initialize weights following GPT-2 style."""
287
+ if isinstance(module, nn.Linear):
288
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
289
+ if module.bias is not None:
290
+ torch.nn.init.zeros_(module.bias)
291
+ elif isinstance(module, nn.Embedding):
292
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
293
+ elif isinstance(module, nn.LayerNorm):
294
+ torch.nn.init.ones_(module.weight)
295
+ torch.nn.init.zeros_(module.bias)
296
+
297
+ def forward(
298
+ self,
299
+ input_ids: torch.LongTensor,
300
+ attention_mask: Optional[torch.Tensor] = None,
301
+ position_ids: Optional[torch.LongTensor] = None,
302
+ labels: Optional[torch.LongTensor] = None,
303
+ return_dict: Optional[bool] = None,
304
+ **kwargs,
305
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
306
+ """
307
+ Forward pass of the model.
308
+
309
+ Args:
310
+ input_ids: Token IDs of shape (batch_size, seq_len).
311
+ attention_mask: Attention mask of shape (batch_size, seq_len).
312
+ position_ids: Position IDs of shape (batch_size, seq_len).
313
+ labels: Labels for language modeling loss.
314
+ return_dict: Whether to return a ModelOutput object.
315
+
316
+ Returns:
317
+ CausalLMOutputWithPast containing loss (if labels provided) and logits.
318
+ """
319
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
320
+
321
+ batch_size, seq_len = input_ids.size()
322
+ device = input_ids.device
323
+
324
+ # Create position IDs if not provided
325
+ if position_ids is None:
326
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
327
+
328
+ # Get embeddings
329
+ token_embeds = self.wte(input_ids)
330
+ position_embeds = self.wpe(position_ids)
331
+ hidden_states = self.drop(token_embeds + position_embeds)
332
+
333
+ # Pass through transformer blocks
334
+ for block in self.h:
335
+ hidden_states = block(hidden_states, attention_mask=attention_mask)
336
+
337
+ # Final layer norm
338
+ hidden_states = self.ln_f(hidden_states)
339
+
340
+ # Get logits
341
+ logits = self.lm_head(hidden_states)
342
+
343
+ # Compute loss if labels are provided
344
+ loss = None
345
+ if labels is not None:
346
+ # Shift logits and labels for next-token prediction
347
+ shift_logits = logits[..., :-1, :].contiguous()
348
+ shift_labels = labels[..., 1:].contiguous()
349
+
350
+ # Flatten for cross-entropy
351
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
352
+ # loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
353
+ loss = loss_fct(
354
+ shift_logits.view(-1, shift_logits.size(-1)),
355
+ shift_labels.view(-1),
356
+ )
357
+
358
+ if not return_dict:
359
+ output = (logits,)
360
+ return ((loss,) + output) if loss is not None else output
361
+
362
+ return CausalLMOutputWithPast(
363
+ loss=loss,
364
+ logits=logits,
365
+ past_key_values=None,
366
+ hidden_states=None,
367
+ attentions=None,
368
+ )
369
+
370
+ @torch.no_grad()
371
+ def generate_move(
372
+ self,
373
+ input_ids: torch.LongTensor,
374
+ temperature: float = 1.0,
375
+ top_k: Optional[int] = None,
376
+ top_p: Optional[float] = None,
377
+ ) -> int:
378
+ """
379
+ Generate the next move given a sequence of moves.
380
+
381
+ Args:
382
+ input_ids: Token IDs of shape (1, seq_len).
383
+ temperature: Sampling temperature (1.0 = no change).
384
+ top_k: If set, only sample from top k tokens.
385
+ top_p: If set, use nucleus sampling with this threshold.
386
+
387
+ Returns:
388
+ The token ID of the predicted next move.
389
+ """
390
+ self.eval()
391
+
392
+ # Get logits for the last position
393
+ outputs = self(input_ids)
394
+ logits = outputs.logits[:, -1, :] / temperature
395
+
396
+ # Apply top-k filtering
397
+ if top_k is not None:
398
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
399
+ logits[indices_to_remove] = float("-inf")
400
+
401
+ # Apply top-p (nucleus) filtering
402
+ if top_p is not None:
403
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
404
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
405
+
406
+ # Remove tokens with cumulative probability above the threshold
407
+ sorted_indices_to_remove = cumulative_probs > top_p
408
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
409
+ sorted_indices_to_remove[..., 0] = 0
410
+
411
+ indices_to_remove = sorted_indices_to_remove.scatter(
412
+ dim=-1, index=sorted_indices, src=sorted_indices_to_remove
413
+ )
414
+ logits[indices_to_remove] = float("-inf")
415
+
416
+ # Sample from the distribution
417
+ probs = F.softmax(logits, dim=-1)
418
+ next_token = torch.multinomial(probs, num_samples=1)
419
+
420
+ return next_token.item()
421
+
422
+
423
+ # Register the model with Auto classes for easy loading
424
+ from transformers import AutoConfig, AutoModelForCausalLM
425
+
426
+ AutoConfig.register("chess_transformer", ChessConfig)
427
+ AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e498de0362ea282cd179dc717f24924c78124cea0d8843a444d88a7f276a5600
3
+ size 3502384
tokenizer.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Chess Tokenizer for the Chess Challenge.
3
+ We build a vocabulary with:
4
+ - W/B prefix for White/Black
5
+ - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
6
+ - Source and rank and file: e.g e 2
7
+ - Destination and rank and file: e.g e 4
8
+ - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import json
14
+ import os
15
+ from pathlib import Path
16
+ import shutil
17
+ import inspect
18
+ from typing import Dict, List, Optional
19
+
20
+ from transformers import PreTrainedTokenizer
21
+ from datasets import load_dataset
22
+
23
+
24
+ class ChessTokenizer(PreTrainedTokenizer):
25
+
26
+ model_input_names = ["input_ids", "attention_mask"]
27
+ vocab_files_names = {"vocab_file": "vocab.json"}
28
+
29
+ # Special tokens
30
+ PAD_TOKEN = "[PAD]"
31
+ BOS_TOKEN = "[BOS]"
32
+ EOS_TOKEN = "[EOS]"
33
+ UNK_TOKEN = "[UNK]"
34
+ SEP_TOKEN = "[SEP]"
35
+
36
+ def __init__(
37
+ self,
38
+ vocab_file: Optional[str] = None,
39
+ vocab: Optional[Dict[str, int]] = None,
40
+ **kwargs,
41
+ ):
42
+
43
+ self._pad_token = self.PAD_TOKEN
44
+ self._bos_token = self.BOS_TOKEN
45
+ self._eos_token = self.EOS_TOKEN
46
+ self._unk_token = self.UNK_TOKEN
47
+ self._sep_token = self.SEP_TOKEN
48
+
49
+ kwargs.pop("pad_token", None)
50
+ kwargs.pop("bos_token", None)
51
+ kwargs.pop("eos_token", None)
52
+ kwargs.pop("unk_token", None)
53
+ kwargs.pop("sep_token", None)
54
+
55
+ print("Initializing ChessTokenizer")
56
+ print(f" vocab_file: {vocab_file}")
57
+ print(f" vocab provided: {vocab is not None}")
58
+ print(f" vocab: {vocab}")
59
+
60
+ print(os.listdir("."))
61
+
62
+ vocab = {
63
+ "[PAD]": 0,
64
+ "[BOS]": 1,
65
+ "[EOS]": 2,
66
+ "[UNK]": 3,
67
+ "[SEP]": 4,
68
+ "(+)": 5,
69
+ "(+*)": 6,
70
+ "(+*B)": 7,
71
+ "(+*N)": 8,
72
+ "(+*Q)": 9,
73
+ "(+*R)": 10,
74
+ "(+B)": 11,
75
+ "(+N)": 12,
76
+ "(+Q)": 13,
77
+ "(+R)": 14,
78
+ "(B)": 15,
79
+ "(N)": 16,
80
+ "(O)": 17,
81
+ "(O+)": 18,
82
+ "(O+*)": 19,
83
+ "(Q)": 20,
84
+ "(R)": 21,
85
+ "(o)": 22,
86
+ "(o+)": 23,
87
+ "(o+*)": 24,
88
+ "(x)": 25,
89
+ "(x+)": 26,
90
+ "(x+*)": 27,
91
+ "(x+*B)": 28,
92
+ "(x+*Q)": 29,
93
+ "(x+*R)": 30,
94
+ "(x+B)": 31,
95
+ "(x+N)": 32,
96
+ "(x+Q)": 33,
97
+ "(x+R)": 34,
98
+ "(xB)": 35,
99
+ "(xE)": 36,
100
+ "(xE+)": 37,
101
+ "(xE+*)": 38,
102
+ "(xN)": 39,
103
+ "(xQ)": 40,
104
+ "(xR)": 41,
105
+ "B": 42,
106
+ "K": 43,
107
+ "N": 44,
108
+ "P": 45,
109
+ "Q": 46,
110
+ "R": 47,
111
+ "W": 48,
112
+ "a1": 49,
113
+ "a2": 50,
114
+ "a3": 51,
115
+ "a4": 52,
116
+ "a5": 53,
117
+ "a6": 54,
118
+ "a7": 55,
119
+ "a8": 56,
120
+ "b1": 57,
121
+ "b2": 58,
122
+ "b3": 59,
123
+ "b4": 60,
124
+ "b5": 61,
125
+ "b6": 62,
126
+ "b7": 63,
127
+ "b8": 64,
128
+ "c1": 65,
129
+ "c2": 66,
130
+ "c3": 67,
131
+ "c4": 68,
132
+ "c5": 69,
133
+ "c6": 70,
134
+ "c7": 71,
135
+ "c8": 72,
136
+ "d1": 73,
137
+ "d2": 74,
138
+ "d3": 75,
139
+ "d4": 76,
140
+ "d5": 77,
141
+ "d6": 78,
142
+ "d7": 79,
143
+ "d8": 80,
144
+ "e1": 81,
145
+ "e2": 82,
146
+ "e3": 83,
147
+ "e4": 84,
148
+ "e5": 85,
149
+ "e6": 86,
150
+ "e7": 87,
151
+ "e8": 88,
152
+ "f1": 89,
153
+ "f2": 90,
154
+ "f3": 91,
155
+ "f4": 92,
156
+ "f5": 93,
157
+ "f6": 94,
158
+ "f7": 95,
159
+ "f8": 96,
160
+ "g1": 97,
161
+ "g2": 98,
162
+ "g3": 99,
163
+ "g4": 100,
164
+ "g5": 101,
165
+ "g6": 102,
166
+ "g7": 103,
167
+ "g8": 104,
168
+ "h1": 105,
169
+ "h2": 106,
170
+ "h3": 107,
171
+ "h4": 108,
172
+ "h5": 109,
173
+ "h6": 110,
174
+ "h7": 111
175
+
176
+ }
177
+
178
+ if vocab is not None:
179
+ self._vocab = vocab
180
+ elif vocab_file is not None and os.path.exists(vocab_file):
181
+ with open(vocab_file, "r", encoding="utf-8") as f:
182
+ self._vocab = json.load(f)
183
+ else:
184
+ print("No vocabulary provided; creating default minimal vocab.")
185
+ self._vocab = self._create_default_vocab()
186
+
187
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
188
+
189
+ super().__init__(
190
+ pad_token=self._pad_token,
191
+ bos_token=self._bos_token,
192
+ eos_token=self._eos_token,
193
+ unk_token=self._unk_token,
194
+ sep_token=self._sep_token,
195
+ **kwargs,
196
+ )
197
+
198
+ def _create_default_vocab(self) -> Dict[str, int]:
199
+ special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN, self.SEP_TOKEN]
200
+ vocab = {token: idx for idx, token in enumerate(special_tokens)}
201
+ return vocab
202
+
203
+
204
+ @classmethod
205
+ def build_vocab_from_dataset(
206
+ cls,
207
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
208
+ split: str = "train",
209
+ column: str = "text",
210
+ min_frequency: Optional[int] = 1,
211
+ max_samples: Optional[int] = None,
212
+ save_path: Optional[str] = None,
213
+ ) -> "ChessTokenizer":
214
+
215
+
216
+
217
+ if save_path is None:
218
+ cwd = os.getcwd()
219
+ save_path = os.path.join(cwd, "chess_tokenizer_vocab.json")
220
+
221
+ if os.path.exists(save_path):
222
+ try:
223
+ with open(save_path, "r", encoding="utf-8") as f:
224
+ print("Loading existing tokenizer vocab from", save_path)
225
+ vocab = json.load(f)
226
+ return cls(vocab=vocab)
227
+ except Exception:
228
+ pass
229
+
230
+ dataset = load_dataset(dataset_name, split=split)
231
+
232
+ samples = dataset[column]
233
+
234
+ tokens = set()
235
+
236
+ for game in samples:
237
+ if not isinstance(game, str):
238
+ continue
239
+ moves = game.strip().split()
240
+ for move in moves:
241
+ if len(move) < 2:
242
+ continue
243
+ color = move[0]
244
+ piece = move[1]
245
+ from_square = move[2:4] if len(move) >= 4 else ''
246
+ to_square = move[4:6] if len(move) >= 6 else ''
247
+ suffix = move[6:] if len(move) > 6 else ''
248
+
249
+ tokens.add(color)
250
+ tokens.add(piece)
251
+ tokens.add(from_square)
252
+ tokens.add(to_square)
253
+ if suffix:
254
+ tokens.add(suffix)
255
+
256
+ tokens = sorted(tokens)
257
+
258
+ special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN, cls.SEP_TOKEN]
259
+
260
+ vocab: Dict[str, int] = {}
261
+ idx = 0
262
+ for st in special_tokens:
263
+ vocab[st] = idx
264
+ idx += 1
265
+
266
+ for t in tokens:
267
+ if t in vocab:
268
+ continue
269
+ vocab[t] = idx
270
+ idx += 1
271
+
272
+ tokenizer = cls(vocab=vocab)
273
+
274
+ try:
275
+ if save_path is None:
276
+ cwd = os.getcwd()
277
+ save_path = os.path.join(cwd, "chess_tokenizer_vocab.json")
278
+
279
+ tmp_path = save_path + ".tmp"
280
+ with open(tmp_path, "w", encoding="utf-8") as f:
281
+ json.dump(vocab, f, ensure_ascii=False, indent=2)
282
+ os.replace(tmp_path, save_path)
283
+ except Exception:
284
+ # Non-fatal: ignore save errors but don't leave temp files behind.
285
+ try:
286
+ if 'tmp_path' in locals() and os.path.exists(tmp_path):
287
+ os.remove(tmp_path)
288
+ except Exception:
289
+ pass
290
+
291
+ return tokenizer
292
+
293
+ @property
294
+ def vocab_size(self) -> int:
295
+ """Return the size of the vocabulary."""
296
+ return len(self._vocab)
297
+
298
+ def get_vocab(self) -> Dict[str, int]:
299
+ """Return the vocabulary as a dictionary."""
300
+ return dict(self._vocab)
301
+
302
+ def _tokenize(self, text: str) -> List[str]:
303
+ """
304
+ Tokenize a string of moves into a list of tokens.
305
+
306
+ Args:
307
+ text: A string of space-separated moves.
308
+
309
+ Returns:
310
+ List of move tokens.
311
+ """
312
+ tokens: List[str] = []
313
+ for move in text.strip().split():
314
+ if len(move) < 2:
315
+ continue
316
+ color, piece, from_square, to_square, suffix = self._decompose_move(move)
317
+ tokens.append(color)
318
+ tokens.append(piece)
319
+ tokens.append(from_square)
320
+ tokens.append(to_square)
321
+ if suffix:
322
+ tokens.append(suffix)
323
+
324
+ tokens.append(self._sep_token)
325
+
326
+ return tokens[:-1] # Remove last SEP token
327
+
328
+ @staticmethod
329
+ def _decompose_move(move: str):
330
+ """Decompose a move string into components: color, piece, from_square, to_square, suffix.
331
+ Returns a 5-tuple of strings (empty strings for missing parts).
332
+ """
333
+ color = move[0]
334
+ piece = move[1] if len(move) >= 2 else ''
335
+ from_square = move[2:4] if len(move) >= 4 else ''
336
+ to_square = move[4:6] if len(move) >= 6 else ''
337
+ suffix = move[6:] if len(move) > 6 else ''
338
+ return color, piece, from_square, to_square, suffix
339
+
340
+ def _convert_token_to_id(self, token: str) -> int:
341
+ """Convert a token to its ID."""
342
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
343
+
344
+ def _convert_id_to_token(self, index: int) -> str:
345
+ """Convert an ID to its token."""
346
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
347
+
348
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
349
+ """Convert a list of tokens back to a string."""
350
+ # Filter out special tokens for cleaner output
351
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
352
+ return " ".join(t for t in tokens if t not in special)
353
+
354
+ def decode(self, token_ids: List[int], skip_special_tokens: bool = True) -> str:
355
+ """Decode a list of token IDs back to a string."""
356
+ tokens = [self._convert_id_to_token(int(tid)) for tid in token_ids]
357
+ if skip_special_tokens:
358
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
359
+ # SEP token should be replace by space
360
+ tokens = [t if t != self.SEP_TOKEN else " " for t in tokens if t not in special]
361
+ return "".join(tokens)
362
+
363
+ def save_vocabulary(
364
+ self,
365
+ save_directory: str,
366
+ filename_prefix: Optional[str] = None,
367
+ ) -> tuple:
368
+ """
369
+ Save the vocabulary to a JSON file.
370
+
371
+ Args:
372
+ save_directory: Directory to save the vocabulary.
373
+ filename_prefix: Optional prefix for the filename.
374
+
375
+ Returns:
376
+ Tuple containing the path to the saved vocabulary file.
377
+ """
378
+ if not os.path.isdir(save_directory):
379
+ os.makedirs(save_directory, exist_ok=True)
380
+
381
+ vocab_file = os.path.join(
382
+ save_directory,
383
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
384
+ )
385
+
386
+ with open(vocab_file, "w", encoding="utf-8") as f:
387
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
388
+
389
+ return (vocab_file,)
390
+
391
+ def save_pretrained(
392
+ self,
393
+ save_directory: str,
394
+ filename_prefix: Optional[str] = None,
395
+ save_tokenizer_code: bool = True,
396
+ ) -> None:
397
+ """Save tokenizer files to a directory in a HF-compatible layout.
398
+ This writes the vocab JSON (via `save_vocabulary`), a small
399
+ `tokenizer_config.json` describing special tokens and the vocab
400
+ filename, and optionally copies the tokenizer module source file
401
+ into the directory so others can import the implementation.
402
+ """
403
+ if not os.path.isdir(save_directory):
404
+ os.makedirs(save_directory, exist_ok=True)
405
+
406
+ # Save the vocabulary file
407
+ vocab_file_tuple = self.save_vocabulary(save_directory, filename_prefix)
408
+ vocab_file = vocab_file_tuple[0]
409
+
410
+ # Write a minimal tokenizer config
411
+ config = {
412
+ "tokenizer_class": self.__class__.__name__,
413
+ "vocab_file": os.path.basename(vocab_file),
414
+ "pad_token": self.PAD_TOKEN,
415
+ "bos_token": self.BOS_TOKEN,
416
+ "eos_token": self.EOS_TOKEN,
417
+ "unk_token": self.UNK_TOKEN,
418
+ }
419
+ config_path = os.path.join(save_directory, "tokenizer_config.json")
420
+ with open(config_path, "w", encoding="utf-8") as f:
421
+ json.dump(config, f, ensure_ascii=False, indent=2)
422
+
423
+ # Optionally copy this module file so the tokenizer class implementation
424
+ # is available alongside the saved vocab/config. This helps when
425
+ # transferring the saved tokenizer to another environment.
426
+ if save_tokenizer_code:
427
+ try:
428
+ src_file = Path(inspect.getsourcefile(self.__class__))
429
+ dst_file = Path(save_directory) / src_file.name
430
+ shutil.copy2(src_file, dst_file)
431
+ except Exception:
432
+ # Non-fatal; we still saved vocab and config
433
+ pass
434
+
435
+
436
+ def count_vocab_from_dataset(
437
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
438
+ split: str = "train",
439
+ column: str = "text",
440
+ max_samples: Optional[int] = 10000,
441
+ ) -> Dict[str, int]:
442
+ """
443
+ Count token frequencies in a dataset (useful for vocabulary analysis).
444
+
445
+ Args:
446
+ dataset_name: Name of the dataset on Hugging Face Hub.
447
+ split: Dataset split to use.
448
+ column: Column containing the game strings.
449
+ max_samples: Maximum number of samples to process.
450
+
451
+ Returns:
452
+ Dictionary mapping tokens to their frequencies.
453
+ """
454
+ from collections import Counter
455
+ from datasets import load_dataset
456
+
457
+ dataset = load_dataset(dataset_name, split=split)
458
+
459
+ if max_samples is not None:
460
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
461
+
462
+ tokenizer = ChessTokenizer()
463
+ token_counts = Counter()
464
+
465
+ for example in dataset:
466
+ token_counts.update(tokenizer._tokenize(example[column]))
467
+
468
+ return dict(token_counts)
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "ChessTokenizer",
3
+ "vocab_file": "vocab.json",
4
+ "pad_token": "[PAD]",
5
+ "bos_token": "[BOS]",
6
+ "eos_token": "[EOS]",
7
+ "unk_token": "[UNK]",
8
+ "auto_map": {
9
+ "AutoTokenizer": [
10
+ "tokenizer.ChessTokenizer",
11
+ null
12
+ ]
13
+ }
14
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65443e2f5285cd13453a8a3bf0ed6ae85c7d1fe02a020d955d87fbbc4dfdcb86
3
+ size 5841
vocab.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 0,
3
+ "[BOS]": 1,
4
+ "[EOS]": 2,
5
+ "[UNK]": 3,
6
+ "[SEP]": 4,
7
+ "(+)": 5,
8
+ "(+*)": 6,
9
+ "(+*B)": 7,
10
+ "(+*N)": 8,
11
+ "(+*Q)": 9,
12
+ "(+*R)": 10,
13
+ "(+B)": 11,
14
+ "(+N)": 12,
15
+ "(+Q)": 13,
16
+ "(+R)": 14,
17
+ "(B)": 15,
18
+ "(N)": 16,
19
+ "(O)": 17,
20
+ "(O+)": 18,
21
+ "(O+*)": 19,
22
+ "(Q)": 20,
23
+ "(R)": 21,
24
+ "(o)": 22,
25
+ "(o+)": 23,
26
+ "(o+*)": 24,
27
+ "(x)": 25,
28
+ "(x+)": 26,
29
+ "(x+*)": 27,
30
+ "(x+*B)": 28,
31
+ "(x+*Q)": 29,
32
+ "(x+*R)": 30,
33
+ "(x+B)": 31,
34
+ "(x+N)": 32,
35
+ "(x+Q)": 33,
36
+ "(x+R)": 34,
37
+ "(xB)": 35,
38
+ "(xE)": 36,
39
+ "(xE+)": 37,
40
+ "(xE+*)": 38,
41
+ "(xN)": 39,
42
+ "(xQ)": 40,
43
+ "(xR)": 41,
44
+ "B": 42,
45
+ "K": 43,
46
+ "N": 44,
47
+ "P": 45,
48
+ "Q": 46,
49
+ "R": 47,
50
+ "W": 48,
51
+ "a1": 49,
52
+ "a2": 50,
53
+ "a3": 51,
54
+ "a4": 52,
55
+ "a5": 53,
56
+ "a6": 54,
57
+ "a7": 55,
58
+ "a8": 56,
59
+ "b1": 57,
60
+ "b2": 58,
61
+ "b3": 59,
62
+ "b4": 60,
63
+ "b5": 61,
64
+ "b6": 62,
65
+ "b7": 63,
66
+ "b8": 64,
67
+ "c1": 65,
68
+ "c2": 66,
69
+ "c3": 67,
70
+ "c4": 68,
71
+ "c5": 69,
72
+ "c6": 70,
73
+ "c7": 71,
74
+ "c8": 72,
75
+ "d1": 73,
76
+ "d2": 74,
77
+ "d3": 75,
78
+ "d4": 76,
79
+ "d5": 77,
80
+ "d6": 78,
81
+ "d7": 79,
82
+ "d8": 80,
83
+ "e1": 81,
84
+ "e2": 82,
85
+ "e3": 83,
86
+ "e4": 84,
87
+ "e5": 85,
88
+ "e6": 86,
89
+ "e7": 87,
90
+ "e8": 88,
91
+ "f1": 89,
92
+ "f2": 90,
93
+ "f3": 91,
94
+ "f4": 92,
95
+ "f5": 93,
96
+ "f6": 94,
97
+ "f7": 95,
98
+ "f8": 96,
99
+ "g1": 97,
100
+ "g2": 98,
101
+ "g3": 99,
102
+ "g4": 100,
103
+ "g5": 101,
104
+ "g6": 102,
105
+ "g7": 103,
106
+ "g8": 104,
107
+ "h1": 105,
108
+ "h2": 106,
109
+ "h3": 107,
110
+ "h4": 108,
111
+ "h5": 109,
112
+ "h6": 110,
113
+ "h7": 111
114
+ }