marius-785 commited on
Commit
2f5a56b
·
verified ·
1 Parent(s): 9571e11

Chess Challenge submission by marius-785

Browse files
Files changed (5) hide show
  1. README.md +4 -8
  2. config.json +4 -0
  3. model.py +531 -0
  4. tokenizer_config.json +2 -2
  5. tokenizer_decomposed.py +156 -0
README.md CHANGED
@@ -7,20 +7,16 @@ tags:
7
  license: mit
8
  ---
9
 
10
- # chess_M15_BOSS
11
-
12
- Chess model submitted to the LLM Course Chess Challenge. marius-785's model
13
-
14
- ## Submission Info
15
 
 
16
  - **Submitted by**: [marius-785](https://huggingface.co/marius-785)
17
  - **Parameters**: 996,608
18
  - **Organization**: LLM-course
19
 
20
- ## Model Details
21
-
22
  - **Architecture**: Chess Transformer (GPT-style)
23
- - **Vocab size**: 83
24
  - **Embedding dim**: 136
25
  - **Layers**: 6
26
  - **Heads**: 4
 
7
  license: mit
8
  ---
9
 
10
+ ## Chess model submitted to the LLM Course Chess Challenge.
 
 
 
 
11
 
12
+ ### Submission Info
13
  - **Submitted by**: [marius-785](https://huggingface.co/marius-785)
14
  - **Parameters**: 996,608
15
  - **Organization**: LLM-course
16
 
17
+ ### Model Details
 
18
  - **Architecture**: Chess Transformer (GPT-style)
19
+ - **Vocab size**: 82
20
  - **Embedding dim**: 136
21
  - **Layers**: 6
22
  - **Heads**: 4
config.json CHANGED
@@ -2,6 +2,10 @@
2
  "architectures": [
3
  "ChessForCausalLM"
4
  ],
 
 
 
 
5
  "bos_token_id": 1,
6
  "dropout": 0.1,
7
  "dtype": "float32",
 
2
  "architectures": [
3
  "ChessForCausalLM"
4
  ],
5
+ "auto_map": {
6
+ "AutoConfig": "model.ChessConfig",
7
+ "AutoModelForCausalLM": "model.ChessForCausalLM"
8
+ },
9
  "bos_token_id": 1,
10
  "dropout": 0.1,
11
  "dtype": "float32",
model.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chess Transformer Model for the Chess Challenge.
3
+
4
+ This module provides a simple GPT-style transformer architecture
5
+ designed to fit within the 1M parameter constraint.
6
+
7
+ Key components:
8
+ - ChessConfig: Configuration class for model hyperparameters
9
+ - ChessForCausalLM: The main model class for next-move prediction
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import math
15
+ from dataclasses import dataclass
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from transformers import PretrainedConfig, PreTrainedModel
22
+ from transformers.modeling_outputs import CausalLMOutputWithPast
23
+
24
+
25
+ class ChessConfig(PretrainedConfig):
26
+ """
27
+ Configuration class for the Chess Transformer model.
28
+
29
+ This configuration is designed for a ~1M parameter model.
30
+ Students can adjust these values to explore different architectures.
31
+
32
+ Parameter budget breakdown (with default values):
33
+ - Embeddings (vocab): 1200 x 128 = 153,600
34
+ - Position Embeddings: 256 x 128 = 32,768
35
+ - Transformer Layers: 6 x ~120,000 = ~720,000
36
+ - LM Head (with weight tying): 0 (shared with embeddings)
37
+ - Total: ~906,000 parameters
38
+
39
+ Attributes:
40
+ vocab_size: Size of the vocabulary (number of unique moves).
41
+ n_embd: Embedding dimension (d_model).
42
+ n_layer: Number of transformer layers.
43
+ n_head: Number of attention heads.
44
+ n_ctx: Maximum sequence length (context window).
45
+ n_inner: Feed-forward inner dimension (default: 3 * n_embd).
46
+ dropout: Dropout probability.
47
+ layer_norm_epsilon: Epsilon for layer normalization.
48
+ tie_weights: Whether to tie embedding and output weights.
49
+ rms_Norm: Whether to use RMSNorm instead of LayerNorm.
50
+
51
+ """
52
+ model_type = "chess_transformer"
53
+
54
+ def __init__(
55
+ self,
56
+ vocab_size: int = 1200,
57
+ n_embd: int = 128,
58
+ n_layer: int = 6,
59
+ n_head: int = 4,
60
+ n_ctx: int = 256,
61
+ n_inner: Optional[int] = None,
62
+ group_size: Optional[int] = None,
63
+ dropout: float = 0.1,
64
+ layer_norm_epsilon: float = 1e-5,
65
+ tie_weights: bool = True,
66
+ rms_Norm: bool = False,
67
+ pad_token_id: int = 0,
68
+ bos_token_id: int = 1,
69
+ eos_token_id: int = 2,
70
+ **kwargs,
71
+ ):
72
+ super().__init__(
73
+ pad_token_id=pad_token_id,
74
+ bos_token_id=bos_token_id,
75
+ eos_token_id=eos_token_id,
76
+ **kwargs,
77
+ )
78
+
79
+ self.vocab_size = vocab_size
80
+ self.n_embd = n_embd
81
+ self.n_layer = n_layer
82
+ self.n_head = n_head
83
+ self.n_ctx = n_ctx
84
+ self.group_size = group_size
85
+ self.n_inner = n_inner if n_inner is not None else 3 * n_embd # Reduced from 4x to 3x
86
+ self.dropout = dropout
87
+ self.layer_norm_epsilon = layer_norm_epsilon
88
+ self.tie_weights = tie_weights
89
+ self.rms_Norm = rms_Norm
90
+ # Inform HF base class about tying behavior
91
+ self.tie_word_embeddings = bool(tie_weights)
92
+
93
+
94
+ class GroupedQueryAttention(nn.Module):
95
+
96
+ def __init__(self, config: ChessConfig):
97
+ super().__init__()
98
+
99
+ assert config.n_head % config.group_size == 0, "n_head must be divisible by group_size"
100
+ print(f"Using Grouped Query Attention with group_size={config.group_size}")
101
+ self.n_head = config.n_head # Total Query heads
102
+ self.group_size = config.group_size
103
+ self.n_kv_head = self.n_head // config.group_size # Number of KV heads
104
+
105
+ self.n_embd = config.n_embd
106
+ self.head_dim = config.n_embd // config.n_head
107
+
108
+ # Q projection stays the same, but K and V projections are smaller
109
+ # Total output: n_embd (for Q) + 2 * (n_kv_head * head_dim) (for K and V)
110
+ self.q_proj = nn.Linear(config.n_embd, config.n_embd)
111
+ self.kv_proj = nn.Linear(config.n_embd, 2 * self.n_kv_head * self.head_dim)
112
+
113
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
114
+ self.dropout = nn.Dropout(config.dropout)
115
+
116
+ self.register_buffer("bias", torch.tril(torch.ones(config.n_ctx, config.n_ctx))
117
+ .view(1, 1, config.n_ctx, config.n_ctx), persistent=False)
118
+
119
+
120
+ def forward(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
121
+ batch_size, seq_len, _ = x.size()
122
+
123
+ # 1. Project Q, K, V
124
+ q = self.q_proj(x) # (B, T, n_head * head_dim)
125
+ kv = self.kv_proj(x) # (B, T, 2 * n_kv_head * head_dim)
126
+ k, v = kv.split(self.n_kv_head * self.head_dim, dim=2)
127
+
128
+ # 2. Reshape Q normally
129
+ q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
130
+
131
+ # 3. Reshape K, V and REPEAT them to match Q
132
+ k = k.view(batch_size, seq_len, self.n_kv_head, self.head_dim).transpose(1, 2)
133
+ v = v.view(batch_size, seq_len, self.n_kv_head, self.head_dim).transpose(1, 2)
134
+
135
+ # Repeat KV heads 'group_size' times to match n_head
136
+ # We use .repeat_interleave to ensure head 0 of KV is used by the first 'group_size' Q heads
137
+ k = k.repeat_interleave(self.group_size, dim=1) # (B, n_head, T, head_dim)
138
+ v = v.repeat_interleave(self.group_size, dim=1) # (B, n_head, T, head_dim)
139
+
140
+ # 4. Standard Scaled Dot-Product Attention
141
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
142
+
143
+ causal_mask = self.bias[:, :, :seq_len, :seq_len]
144
+ attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
145
+
146
+ if attention_mask is not None:
147
+ attn_weights = attn_weights.masked_fill(attention_mask.unsqueeze(1).unsqueeze(2) == 0, float("-inf"))
148
+
149
+ attn_weights = F.softmax(attn_weights, dim=-1)
150
+ attn_output = torch.matmul(self.dropout(attn_weights), v)
151
+
152
+ # 5. Recombine
153
+ attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, seq_len, self.n_embd)
154
+ return self.c_proj(attn_output)
155
+
156
+
157
+
158
+ class MultiHeadAttention(nn.Module):
159
+ """
160
+ Multi-head self-attention module.
161
+
162
+ This is a standard scaled dot-product attention implementation
163
+ with causal masking for autoregressive generation.
164
+ """
165
+
166
+ def __init__(self, config: ChessConfig):
167
+ super().__init__()
168
+
169
+ assert config.n_embd % config.n_head == 0, \
170
+ f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
171
+
172
+ print(f"Using Regular Attention with group_size={config.group_size}")
173
+ self.n_head = config.n_head
174
+ self.n_embd = config.n_embd
175
+ self.head_dim = config.n_embd // config.n_head
176
+
177
+ # Combined QKV projection for efficiency
178
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
179
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
180
+
181
+ self.dropout = nn.Dropout(config.dropout)
182
+
183
+ # Causal mask (will be created on first forward pass)
184
+ self.register_buffer(
185
+ "bias",
186
+ torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
187
+ 1, 1, config.n_ctx, config.n_ctx
188
+ ),
189
+ persistent=False,
190
+ )
191
+
192
+ def forward(
193
+ self,
194
+ x: torch.Tensor,
195
+ attention_mask: Optional[torch.Tensor] = None,
196
+ ) -> torch.Tensor:
197
+ batch_size, seq_len, _ = x.size()
198
+
199
+ # Compute Q, K, V
200
+ qkv = self.c_attn(x)
201
+ q, k, v = qkv.split(self.n_embd, dim=2)
202
+
203
+ # Reshape for multi-head attention
204
+ q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
205
+ k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
206
+ v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
207
+
208
+ # Scaled dot-product attention
209
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
210
+
211
+ # Apply causal mask
212
+ causal_mask = self.bias[:, :, :seq_len, :seq_len]
213
+ attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
214
+
215
+ # Apply attention mask (for padding)
216
+ if attention_mask is not None:
217
+ # attention_mask shape: (batch_size, seq_len) -> (batch_size, 1, 1, seq_len)
218
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
219
+ attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
220
+
221
+ attn_weights = F.softmax(attn_weights, dim=-1)
222
+ attn_weights = self.dropout(attn_weights)
223
+
224
+ # Apply attention to values
225
+ attn_output = torch.matmul(attn_weights, v)
226
+
227
+ # Reshape back
228
+ attn_output = attn_output.transpose(1, 2).contiguous().view(
229
+ batch_size, seq_len, self.n_embd
230
+ )
231
+
232
+ # Output projection
233
+ attn_output = self.c_proj(attn_output)
234
+
235
+ return attn_output
236
+
237
+
238
+ class FeedForward(nn.Module):
239
+ """
240
+ Feed-forward network (MLP) module.
241
+
242
+ Standard two-layer MLP with GELU activation.
243
+ """
244
+
245
+ def __init__(self, config: ChessConfig):
246
+ super().__init__()
247
+
248
+ self.c_fc = nn.Linear(config.n_embd, config.n_inner)
249
+ self.c_proj = nn.Linear(config.n_inner, config.n_embd)
250
+ self.dropout = nn.Dropout(config.dropout)
251
+
252
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
253
+ x = self.c_fc(x)
254
+ x = F.gelu(x)
255
+ x = self.c_proj(x)
256
+ x = self.dropout(x)
257
+ return x
258
+
259
+
260
+ class TransformerBlock(nn.Module):
261
+ """
262
+ A single transformer block with attention and feed-forward layers.
263
+
264
+ Uses pre-normalization (LayerNorm before attention/FFN) for better
265
+ training stability.
266
+ """
267
+
268
+ def __init__(self, config: ChessConfig,group_size: int = None):
269
+ super().__init__()
270
+ #nn.modules.normalization.RMSNorm
271
+ if config.rms_Norm == True:
272
+ print(f"using RMSNorm")
273
+ self.ln_1 = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
274
+ else:
275
+ print(f"using LayerNorm")
276
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
277
+
278
+ if config.group_size is not None:
279
+ self.attn = GroupedQueryAttention(config)
280
+ else:
281
+ self.attn = MultiHeadAttention(config)
282
+ if config.rms_Norm == True:
283
+ self.ln_2 = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
284
+ else:
285
+ self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
286
+ self.mlp = FeedForward(config)
287
+
288
+ def forward(
289
+ self,
290
+ x: torch.Tensor,
291
+ attention_mask: Optional[torch.Tensor] = None,
292
+ ) -> torch.Tensor:
293
+ # Pre-norm attention
294
+ x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
295
+ # Pre-norm FFN
296
+ x = x + self.mlp(self.ln_2(x))
297
+ return x
298
+
299
+
300
+ class ChessForCausalLM(PreTrainedModel):
301
+ """
302
+ Chess Transformer for Causal Language Modeling (next-move prediction).
303
+
304
+ This model is designed to predict the next chess move given a sequence
305
+ of previous moves. It uses a GPT-style architecture with:
306
+ - Token embeddings for chess moves
307
+ - Learned positional embeddings
308
+ - Stacked transformer blocks
309
+ - Linear head for next-token prediction
310
+
311
+ The model supports weight tying between the embedding layer and the
312
+ output projection to save parameters.
313
+
314
+ Example:
315
+ >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
316
+ >>> model = ChessForCausalLM(config)
317
+ >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
318
+ >>> outputs = model(**inputs)
319
+ >>> next_move_logits = outputs.logits[:, -1, :]
320
+ """
321
+
322
+ config_class = ChessConfig
323
+ base_model_prefix = "transformer"
324
+ supports_gradient_checkpointing = True
325
+ # Suppress missing-key warning for tied lm_head when loading
326
+ keys_to_ignore_on_load_missing = ["lm_head.weight"]
327
+
328
+ def __init__(self, config: ChessConfig):
329
+ super().__init__(config)
330
+
331
+ # Token and position embeddings
332
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
333
+ self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
334
+
335
+ self.drop = nn.Dropout(config.dropout)
336
+
337
+ # Transformer blocks
338
+ self.h = nn.ModuleList([
339
+ TransformerBlock(config) for _ in range(config.n_layer)
340
+ ])
341
+
342
+
343
+
344
+ # Final layer norm
345
+ self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
346
+
347
+ # Output head
348
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
349
+
350
+ # Declare tied weights for proper serialization
351
+ if config.tie_weights:
352
+ self._tied_weights_keys = ["lm_head.weight"]
353
+
354
+ # Initialize weights
355
+ self.post_init()
356
+
357
+ # Tie weights if configured
358
+ if config.tie_weights:
359
+ self.tie_weights()
360
+
361
+ def get_input_embeddings(self) -> nn.Module:
362
+ return self.wte
363
+
364
+ def set_input_embeddings(self, new_embeddings: nn.Module):
365
+ self.wte = new_embeddings
366
+ if getattr(self.config, "tie_weights", False):
367
+ self.tie_weights()
368
+
369
+ def get_output_embeddings(self) -> nn.Module:
370
+ return self.lm_head
371
+
372
+ def set_output_embeddings(self, new_embeddings: nn.Module):
373
+ self.lm_head = new_embeddings
374
+
375
+ def tie_weights(self):
376
+ # Use HF helper to tie or clone depending on config
377
+ if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
378
+ self._tie_or_clone_weights(self.lm_head, self.wte)
379
+
380
+ def _init_weights(self, module: nn.Module):
381
+ """Initialize weights following GPT-2 style."""
382
+ if isinstance(module, nn.Linear):
383
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
384
+ if module.bias is not None:
385
+ torch.nn.init.zeros_(module.bias)
386
+ elif isinstance(module, nn.Embedding):
387
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
388
+ elif isinstance(module, nn.LayerNorm):
389
+ torch.nn.init.ones_(module.weight)
390
+ torch.nn.init.zeros_(module.bias)
391
+
392
+ def forward(
393
+ self,
394
+ input_ids: torch.LongTensor,
395
+ attention_mask: Optional[torch.Tensor] = None,
396
+ position_ids: Optional[torch.LongTensor] = None,
397
+ labels: Optional[torch.LongTensor] = None,
398
+ return_dict: Optional[bool] = None,
399
+ **kwargs,
400
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
401
+ """
402
+ Forward pass of the model.
403
+
404
+ Args:
405
+ input_ids: Token IDs of shape (batch_size, seq_len).
406
+ attention_mask: Attention mask of shape (batch_size, seq_len).
407
+ position_ids: Position IDs of shape (batch_size, seq_len).
408
+ labels: Labels for language modeling loss.
409
+ return_dict: Whether to return a ModelOutput object.
410
+
411
+ Returns:
412
+ CausalLMOutputWithPast containing loss (if labels provided) and logits.
413
+ """
414
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
415
+
416
+ batch_size, seq_len = input_ids.size()
417
+ device = input_ids.device
418
+
419
+ # Create position IDs if not provided
420
+ if position_ids is None:
421
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
422
+
423
+ # Get embeddings
424
+ token_embeds = self.wte(input_ids)
425
+ position_embeds = self.wpe(position_ids)
426
+ hidden_states = self.drop(token_embeds + position_embeds)
427
+
428
+ # Pass through transformer blocks
429
+ for block in self.h:
430
+ hidden_states = block(hidden_states, attention_mask=attention_mask)
431
+
432
+ # Final layer norm
433
+ hidden_states = self.ln_f(hidden_states)
434
+
435
+ # Get logits
436
+ logits = self.lm_head(hidden_states)
437
+
438
+ # Compute loss if labels are provided
439
+ loss = None
440
+ if labels is not None:
441
+ # Shift logits and labels for next-token prediction
442
+ shift_logits = logits[..., :-1, :].contiguous()
443
+ shift_labels = labels[..., 1:].contiguous()
444
+
445
+ # Flatten for cross-entropy
446
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
447
+ # loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
448
+ loss = loss_fct(
449
+ shift_logits.view(-1, shift_logits.size(-1)),
450
+ shift_labels.view(-1),
451
+ )
452
+
453
+ if not return_dict:
454
+ output = (logits,)
455
+ return ((loss,) + output) if loss is not None else output
456
+
457
+ return CausalLMOutputWithPast(
458
+ loss=loss,
459
+ logits=logits,
460
+ past_key_values=None,
461
+ hidden_states=None,
462
+ attentions=None,
463
+ )
464
+
465
+ @torch.no_grad()
466
+ def generate_move(
467
+ self,
468
+ input_ids: torch.LongTensor,
469
+ temperature: float = 1.0,
470
+ top_k: Optional[int] = None,
471
+ top_p: Optional[float] = None,
472
+ ) -> int:
473
+ """
474
+ Generate the next move given a sequence of moves.
475
+
476
+ Applies structural constraints enforcing:
477
+ ColoredPiece [SOURCE] source [DEST] dest [modifiers]*
478
+
479
+ Args:
480
+ input_ids: Token IDs of shape (1, seq_len).
481
+ temperature: Sampling temperature (1.0 = no change).
482
+ top_k: If set, only sample from top k tokens.
483
+ top_p: If set, use nucleus sampling with this threshold.
484
+
485
+ Returns:
486
+ The token ID of the predicted next move.
487
+ """
488
+ self.eval()
489
+
490
+ # Get logits for the last position
491
+ outputs = self(input_ids)
492
+ logits = outputs.logits[:, -1, :].clone() / temperature
493
+
494
+ # Apply structural constraints (hardcoded to ChessTokenizer structure)
495
+ from src.tokenizer import ChessLogitsProcessor
496
+ processor = ChessLogitsProcessor()
497
+ logits = processor.constrain_logits(input_ids, logits)
498
+
499
+ # Apply top-k filtering
500
+ if top_k is not None:
501
+ indices_to_remove = logits < torch.topk(logits, min(top_k, logits.size(-1)))[0][..., -1, None]
502
+ logits[indices_to_remove] = float("-inf")
503
+
504
+ # Apply top-p (nucleus) filtering
505
+ if top_p is not None:
506
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
507
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
508
+
509
+ # Remove tokens with cumulative probability above the threshold
510
+ sorted_indices_to_remove = cumulative_probs > top_p
511
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
512
+ sorted_indices_to_remove[..., 0] = 0
513
+
514
+ indices_to_remove = sorted_indices_to_remove.scatter(
515
+ dim=-1, index=sorted_indices, src=sorted_indices_to_remove
516
+ )
517
+ logits[indices_to_remove] = float("-inf")
518
+
519
+ # Sample from the distribution
520
+ probs = F.softmax(logits, dim=-1)
521
+ next_token = torch.multinomial(probs, num_samples=1)
522
+ return next_token.item()
523
+
524
+
525
+
526
+
527
+ # Register the model with Auto classes for easy loading
528
+ from transformers import AutoConfig, AutoModelForCausalLM
529
+
530
+ AutoConfig.register("chess_transformer", ChessConfig)
531
+ AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)
tokenizer_config.json CHANGED
@@ -35,7 +35,7 @@
35
  },
36
  "auto_map": {
37
  "AutoTokenizer": [
38
- "tokenizer.ChessTokenizer",
39
  null
40
  ]
41
  },
@@ -45,6 +45,6 @@
45
  "extra_special_tokens": {},
46
  "model_max_length": 1000000000000000019884624838656,
47
  "pad_token": "[PAD]",
48
- "tokenizer_class": "ChessTokenizer",
49
  "unk_token": "[UNK]"
50
  }
 
35
  },
36
  "auto_map": {
37
  "AutoTokenizer": [
38
+ "tokenizer_decomposed.ChessDecomposedTokenizer",
39
  null
40
  ]
41
  },
 
45
  "extra_special_tokens": {},
46
  "model_max_length": 1000000000000000019884624838656,
47
  "pad_token": "[PAD]",
48
+ "tokenizer_class": "ChessDecomposedTokenizer",
49
  "unk_token": "[UNK]"
50
  }
tokenizer_decomposed.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Decomposed Chess Tokenizer.
3
+
4
+ This tokenizer decomposes each move into 3-4 tokens:
5
+ - color+piece token (e.g., "WP", "BN")
6
+ - from-square token with suffix "_f" (e.g., "e2_f")
7
+ - to-square token with suffix "_t" (e.g., "e4_t")
8
+ - optional promotion token (one of "q", "r", "b", "n")
9
+
10
+ This avoids UNKs for rare moves and makes legality learning easier because the model
11
+ always emits explicit squares.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import os
18
+ import re
19
+ from typing import Dict, List, Optional
20
+
21
+ from transformers import PreTrainedTokenizer
22
+
23
+
24
+ class ChessDecomposedTokenizer(PreTrainedTokenizer):
25
+ model_input_names = ["input_ids", "attention_mask"]
26
+ vocab_files_names = {"vocab_file": "vocab.json"}
27
+
28
+ PAD_TOKEN = "[PAD]"
29
+ BOS_TOKEN = "[BOS]"
30
+ EOS_TOKEN = "[EOS]"
31
+ UNK_TOKEN = "[UNK]"
32
+
33
+ _MOVE_RE = re.compile(r"^[WB][PNBRQK][a-h][1-8][a-h][1-8].*$")
34
+
35
+ def __init__(
36
+ self,
37
+ vocab_file: Optional[str] = None,
38
+ vocab: Optional[Dict[str, int]] = None,
39
+ **kwargs,
40
+ ):
41
+ self._pad_token = self.PAD_TOKEN
42
+ self._bos_token = self.BOS_TOKEN
43
+ self._eos_token = self.EOS_TOKEN
44
+ self._unk_token = self.UNK_TOKEN
45
+
46
+ kwargs.pop("pad_token", None)
47
+ kwargs.pop("bos_token", None)
48
+ kwargs.pop("eos_token", None)
49
+ kwargs.pop("unk_token", None)
50
+
51
+ if vocab is not None:
52
+ self._vocab = vocab
53
+ elif vocab_file is not None and os.path.exists(vocab_file):
54
+ with open(vocab_file, "r", encoding="utf-8") as f:
55
+ self._vocab = json.load(f)
56
+ else:
57
+ self._vocab = self._create_full_vocab()
58
+
59
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
60
+
61
+ super().__init__(
62
+ pad_token=self._pad_token,
63
+ bos_token=self._bos_token,
64
+ eos_token=self._eos_token,
65
+ unk_token=self._unk_token,
66
+ **kwargs,
67
+ )
68
+
69
+ @staticmethod
70
+ def _create_full_vocab() -> Dict[str, int]:
71
+ special_tokens = [
72
+ ChessDecomposedTokenizer.PAD_TOKEN,
73
+ ChessDecomposedTokenizer.BOS_TOKEN,
74
+ ChessDecomposedTokenizer.EOS_TOKEN,
75
+ ChessDecomposedTokenizer.UNK_TOKEN,
76
+ ]
77
+
78
+ pieces = ["P", "N", "B", "R", "Q", "K"]
79
+ colors = ["W", "B"]
80
+ piece_tokens = [f"{c}{p}" for c in colors for p in pieces]
81
+
82
+ files = "abcdefgh"
83
+ ranks = "12345678"
84
+ squares = [f"{f}{r}" for f in files for r in ranks]
85
+ from_tokens = [f"{sq}_f" for sq in squares]
86
+ to_tokens = [f"{sq}_t" for sq in squares]
87
+
88
+ promo_tokens = ["q", "r", "b", "n"]
89
+
90
+ tokens = special_tokens + piece_tokens + from_tokens + to_tokens + promo_tokens
91
+ return {tok: idx for idx, tok in enumerate(tokens)}
92
+
93
+ @property
94
+ def vocab_size(self) -> int:
95
+ return len(self._vocab)
96
+
97
+ def get_vocab(self) -> Dict[str, int]:
98
+ return dict(self._vocab)
99
+
100
+ def _tokenize(self, text: str) -> List[str]:
101
+ raw = text.strip()
102
+ if not raw:
103
+ return []
104
+
105
+ parts = raw.split()
106
+ out: List[str] = []
107
+
108
+ for part in parts:
109
+ if part in {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}:
110
+ out.append(part)
111
+ continue
112
+
113
+ if not self._MOVE_RE.match(part):
114
+ out.append(self.UNK_TOKEN)
115
+ continue
116
+
117
+ color = part[0]
118
+ piece = part[1]
119
+ from_sq = part[2:4]
120
+ to_sq = part[4:6]
121
+ out.append(f"{color}{piece}")
122
+ out.append(f"{from_sq}_f")
123
+ out.append(f"{to_sq}_t")
124
+
125
+ if "=" in part:
126
+ promo_idx = part.find("=")
127
+ if promo_idx != -1 and promo_idx + 1 < len(part):
128
+ promo = part[promo_idx + 1].lower()
129
+ if promo in {"q", "r", "b", "n"}:
130
+ out.append(promo)
131
+
132
+ return out
133
+
134
+ def _convert_token_to_id(self, token: str) -> int:
135
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
136
+
137
+ def _convert_id_to_token(self, index: int) -> str:
138
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
139
+
140
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
141
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
142
+ return " ".join(t for t in tokens if t not in special)
143
+
144
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
145
+ if not os.path.isdir(save_directory):
146
+ os.makedirs(save_directory, exist_ok=True)
147
+
148
+ vocab_file = os.path.join(
149
+ save_directory,
150
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
151
+ )
152
+
153
+ with open(vocab_file, "w", encoding="utf-8") as f:
154
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
155
+
156
+ return (vocab_file,)