chloecourt commited on
Commit
2a14282
·
verified ·
1 Parent(s): bae78d7

Chess Challenge submission by chloecourt

Browse files
Files changed (2) hide show
  1. config.json +4 -0
  2. model.py +442 -0
config.json CHANGED
@@ -2,6 +2,10 @@
2
  "architectures": [
3
  "ChessForCausalLM"
4
  ],
 
 
 
 
5
  "bos_token_id": 1,
6
  "dropout": 0.1,
7
  "dtype": "float32",
 
2
  "architectures": [
3
  "ChessForCausalLM"
4
  ],
5
+ "auto_map": {
6
+ "AutoConfig": "model.ChessConfig",
7
+ "AutoModelForCausalLM": "model.ChessForCausalLM"
8
+ },
9
  "bos_token_id": 1,
10
  "dropout": 0.1,
11
  "dtype": "float32",
model.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chess Transformer Model for the Chess Challenge.
3
+
4
+ This module provides a simple GPT-style transformer architecture
5
+ designed to fit within the 1M parameter constraint.
6
+
7
+ Key components:
8
+ - ChessConfig: Configuration class for model hyperparameters
9
+ - ChessForCausalLM: The main model class for next-move prediction
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import math
15
+ from dataclasses import dataclass
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from transformers import PretrainedConfig, PreTrainedModel
22
+ from transformers.modeling_outputs import CausalLMOutputWithPast
23
+
24
+
25
+ class ChessConfig(PretrainedConfig):
26
+ """
27
+ Configuration class for the Chess Transformer model.
28
+
29
+ This configuration is designed for a ~1M parameter model.
30
+ Students can adjust these values to explore different architectures.
31
+
32
+ Parameter budget breakdown (with default values):
33
+ - Embeddings (vocab): 1200 x 128 = 153,600
34
+ - Position Embeddings: 256 x 128 = 32,768
35
+ - Transformer Layers: 6 x ~120,000 = ~720,000
36
+ - LM Head (with weight tying): 0 (shared with embeddings)
37
+ - Total: ~906,000 parameters
38
+
39
+ Attributes:
40
+ vocab_size: Size of the vocabulary (number of unique moves).
41
+ n_embd: Embedding dimension (d_model).
42
+ n_layer: Number of transformer layers.
43
+ n_head: Number of attention heads.
44
+ n_ctx: Maximum sequence length (context window).
45
+ n_inner: Feed-forward inner dimension (default: 3 * n_embd).
46
+ dropout: Dropout probability.
47
+ layer_norm_epsilon: Epsilon for layer normalization.
48
+ tie_weights: Whether to tie embedding and output weights.
49
+ """
50
+
51
+ model_type = "chess_transformer"
52
+
53
+ def __init__(
54
+ self,
55
+ vocab_size: int = 1200,
56
+ n_embd: int = 128,
57
+ n_layer: int = 6,
58
+ n_head: int = 4,
59
+ n_ctx: int = 256,
60
+ n_inner: Optional[int] = None,
61
+ dropout: float = 0.1,
62
+ layer_norm_epsilon: float = 1e-5,
63
+ tie_weights: bool = True,
64
+ pad_token_id: int = 0,
65
+ bos_token_id: int = 1,
66
+ eos_token_id: int = 2,
67
+ **kwargs,
68
+ ):
69
+ super().__init__(
70
+ pad_token_id=pad_token_id,
71
+ bos_token_id=bos_token_id,
72
+ eos_token_id=eos_token_id,
73
+ **kwargs,
74
+ )
75
+
76
+ self.vocab_size = vocab_size
77
+ self.n_embd = n_embd
78
+ self.n_layer = n_layer
79
+ self.n_head = n_head
80
+ self.n_ctx = n_ctx
81
+ self.n_inner = n_inner if n_inner is not None else 3 * n_embd # Reduced from 4x to 3x
82
+ self.dropout = dropout
83
+ self.layer_norm_epsilon = layer_norm_epsilon
84
+ self.tie_weights = tie_weights
85
+ # Inform HF base class about tying behavior
86
+ self.tie_word_embeddings = bool(tie_weights)
87
+
88
+ class NewGeLU(nn.Module):
89
+ def forward(self, x):
90
+ return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x)))
91
+
92
+ class MultiHeadAttention(nn.Module):
93
+ """
94
+ Multi-head self-attention module.
95
+
96
+ This is a standard scaled dot-product attention implementation
97
+ with causal masking for autoregressive generation.
98
+ """
99
+
100
+ def __init__(self, config: ChessConfig):
101
+ super().__init__()
102
+
103
+ assert config.n_embd % config.n_head == 0, \
104
+ f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
105
+
106
+ self.n_head = config.n_head
107
+ self.n_embd = config.n_embd
108
+ self.head_dim = config.n_embd // config.n_head
109
+
110
+ # Combined QKV projection for efficiency
111
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
112
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
113
+
114
+ self.dropout = nn.Dropout(config.dropout)
115
+
116
+ # Causal mask (will be created on first forward pass)
117
+ self.register_buffer(
118
+ "bias",
119
+ torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
120
+ 1, 1, config.n_ctx, config.n_ctx
121
+ ),
122
+ persistent=False,
123
+ )
124
+
125
+ def forward(
126
+ self,
127
+ x: torch.Tensor,
128
+ attention_mask: Optional[torch.Tensor] = None,
129
+ ) -> torch.Tensor:
130
+ batch_size, seq_len, _ = x.size()
131
+
132
+ # Compute Q, K, V
133
+ qkv = self.c_attn(x)
134
+ q, k, v = qkv.split(self.n_embd, dim=2)
135
+
136
+ # Reshape for multi-head attention
137
+ q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
138
+ k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
139
+ v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
140
+
141
+ # Scaled dot-product attention
142
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
143
+
144
+ # Apply causal mask
145
+ causal_mask = self.bias[:, :, :seq_len, :seq_len]
146
+ attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
147
+
148
+ # Apply attention mask (for padding)
149
+ if attention_mask is not None:
150
+ # attention_mask shape: (batch_size, seq_len) -> (batch_size, 1, 1, seq_len)
151
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
152
+ attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
153
+
154
+ attn_weights = F.softmax(attn_weights, dim=-1)
155
+ attn_weights = self.dropout(attn_weights)
156
+
157
+ # Apply attention to values
158
+ attn_output = torch.matmul(attn_weights, v)
159
+
160
+ # Reshape back
161
+ attn_output = attn_output.transpose(1, 2).contiguous().view(
162
+ batch_size, seq_len, self.n_embd
163
+ )
164
+
165
+ # Output projection
166
+ attn_output = self.c_proj(attn_output)
167
+
168
+ return attn_output
169
+
170
+
171
+ class FeedForward(nn.Module):
172
+ """
173
+ Feed-forward network (MLP) module.
174
+
175
+ Standard two-layer MLP with GELU activation.
176
+ """
177
+
178
+ def __init__(self, config: ChessConfig):
179
+ super().__init__()
180
+
181
+ self.c_fc = nn.Linear(config.n_embd, config.n_inner)
182
+ self.c_proj = nn.Linear(config.n_inner, config.n_embd)
183
+ self.dropout = nn.Dropout(config.dropout)
184
+ self.act = NewGeLU()
185
+
186
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
187
+ x = self.c_fc(x)
188
+ x = self.act(x)
189
+ x = self.c_proj(x)
190
+ x = self.dropout(x)
191
+ return x
192
+
193
+
194
+ class TransformerBlock(nn.Module):
195
+ """
196
+ A single transformer block with attention and feed-forward layers.
197
+
198
+ Uses pre-normalization (LayerNorm before attention/FFN) for better
199
+ training stability.
200
+ """
201
+
202
+ def __init__(self, config: ChessConfig):
203
+ super().__init__()
204
+
205
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
206
+ self.attn = MultiHeadAttention(config)
207
+ self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
208
+ self.mlp = FeedForward(config)
209
+
210
+ def forward(
211
+ self,
212
+ x: torch.Tensor,
213
+ attention_mask: Optional[torch.Tensor] = None,
214
+ ) -> torch.Tensor:
215
+ # Pre-norm attention
216
+ x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
217
+ # Pre-norm FFN
218
+ x = x + self.mlp(self.ln_2(x))
219
+ return x
220
+
221
+
222
+ class ChessForCausalLM(PreTrainedModel):
223
+ """
224
+ Chess Transformer for Causal Language Modeling (next-move prediction).
225
+
226
+ This model is designed to predict the next chess move given a sequence
227
+ of previous moves. It uses a GPT-style architecture with:
228
+ - Token embeddings for chess moves
229
+ - Learned positional embeddings
230
+ - Stacked transformer blocks
231
+ - Linear head for next-token prediction
232
+
233
+ The model supports weight tying between the embedding layer and the
234
+ output projection to save parameters.
235
+
236
+ Example:
237
+ >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
238
+ >>> model = ChessForCausalLM(config)
239
+ >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
240
+ >>> outputs = model(**inputs)
241
+ >>> next_move_logits = outputs.logits[:, -1, :]
242
+ """
243
+
244
+ config_class = ChessConfig
245
+ base_model_prefix = "transformer"
246
+ supports_gradient_checkpointing = True
247
+ # Suppress missing-key warning for tied lm_head when loading
248
+ keys_to_ignore_on_load_missing = ["lm_head.weight"]
249
+
250
+ def __init__(self, config: ChessConfig):
251
+ super().__init__(config)
252
+
253
+ # Token and position embeddings
254
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
255
+ self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
256
+
257
+ self.drop = nn.Dropout(config.dropout)
258
+
259
+ # Transformer blocks
260
+ self.h = nn.ModuleList([
261
+ TransformerBlock(config) for _ in range(config.n_layer)
262
+ ])
263
+
264
+ # Final layer norm
265
+ self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
266
+
267
+ # Output head
268
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
269
+
270
+ # Declare tied weights for proper serialization
271
+ if config.tie_weights:
272
+ self._tied_weights_keys = ["lm_head.weight"]
273
+
274
+ # Initialize weights
275
+ self.post_init()
276
+
277
+ # Tie weights if configured
278
+ if config.tie_weights:
279
+ self.tie_weights()
280
+
281
+ def get_input_embeddings(self) -> nn.Module:
282
+ return self.wte
283
+
284
+ def set_input_embeddings(self, new_embeddings: nn.Module):
285
+ self.wte = new_embeddings
286
+ if getattr(self.config, "tie_weights", False):
287
+ self.tie_weights()
288
+
289
+ def get_output_embeddings(self) -> nn.Module:
290
+ return self.lm_head
291
+
292
+ def set_output_embeddings(self, new_embeddings: nn.Module):
293
+ self.lm_head = new_embeddings
294
+
295
+ def tie_weights(self):
296
+ # Use HF helper to tie or clone depending on config
297
+ if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
298
+ self._tie_or_clone_weights(self.lm_head, self.wte)
299
+
300
+ def _init_weights(self, module: nn.Module):
301
+ """Initialize weights following GPT-2 style."""
302
+ if isinstance(module, nn.Linear):
303
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
304
+ if module.bias is not None:
305
+ torch.nn.init.zeros_(module.bias)
306
+ elif isinstance(module, nn.Embedding):
307
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
308
+ elif isinstance(module, nn.LayerNorm):
309
+ torch.nn.init.ones_(module.weight)
310
+ torch.nn.init.zeros_(module.bias)
311
+
312
+ def forward(
313
+ self,
314
+ input_ids: torch.LongTensor,
315
+ attention_mask: Optional[torch.Tensor] = None,
316
+ position_ids: Optional[torch.LongTensor] = None,
317
+ labels: Optional[torch.LongTensor] = None,
318
+ return_dict: Optional[bool] = None,
319
+ **kwargs,
320
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
321
+ """
322
+ Forward pass of the model.
323
+
324
+ Args:
325
+ input_ids: Token IDs of shape (batch_size, seq_len).
326
+ attention_mask: Attention mask of shape (batch_size, seq_len).
327
+ position_ids: Position IDs of shape (batch_size, seq_len).
328
+ labels: Labels for language modeling loss.
329
+ return_dict: Whether to return a ModelOutput object.
330
+
331
+ Returns:
332
+ CausalLMOutputWithPast containing loss (if labels provided) and logits.
333
+ """
334
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
335
+
336
+ batch_size, seq_len = input_ids.size()
337
+ device = input_ids.device
338
+
339
+ # Create position IDs if not provided
340
+ if position_ids is None:
341
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
342
+
343
+ # Get embeddings
344
+ token_embeds = self.wte(input_ids)
345
+ position_embeds = self.wpe(position_ids)
346
+ hidden_states = self.drop(token_embeds + position_embeds)
347
+
348
+ # Pass through transformer blocks
349
+ for block in self.h:
350
+ hidden_states = block(hidden_states, attention_mask=attention_mask)
351
+
352
+ # Final layer norm
353
+ hidden_states = self.ln_f(hidden_states)
354
+
355
+ # Get logits
356
+ logits = self.lm_head(hidden_states)
357
+
358
+ # Compute loss if labels are provided
359
+ loss = None
360
+ if labels is not None:
361
+ # Shift logits and labels for next-token prediction
362
+ shift_logits = logits[..., :-1, :].contiguous()
363
+ shift_labels = labels[..., 1:].contiguous()
364
+
365
+ # Flatten for cross-entropy
366
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
367
+ # loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
368
+ loss = loss_fct(
369
+ shift_logits.view(-1, shift_logits.size(-1)),
370
+ shift_labels.view(-1),
371
+ )
372
+
373
+ if not return_dict:
374
+ output = (logits,)
375
+ return ((loss,) + output) if loss is not None else output
376
+
377
+ return CausalLMOutputWithPast(
378
+ loss=loss,
379
+ logits=logits,
380
+ past_key_values=None,
381
+ hidden_states=None,
382
+ attentions=None,
383
+ )
384
+
385
+ @torch.no_grad()
386
+ def generate_move(
387
+ self,
388
+ input_ids: torch.LongTensor,
389
+ temperature: float = 1.0,
390
+ top_k: Optional[int] = None,
391
+ top_p: Optional[float] = None,
392
+ ) -> int:
393
+ """
394
+ Generate the next move given a sequence of moves.
395
+
396
+ Args:
397
+ input_ids: Token IDs of shape (1, seq_len).
398
+ temperature: Sampling temperature (1.0 = no change).
399
+ top_k: If set, only sample from top k tokens.
400
+ top_p: If set, use nucleus sampling with this threshold.
401
+
402
+ Returns:
403
+ The token ID of the predicted next move.
404
+ """
405
+ self.eval()
406
+
407
+ # Get logits for the last position
408
+ outputs = self(input_ids)
409
+ logits = outputs.logits[:, -1, :] / temperature
410
+
411
+ # Apply top-k filtering
412
+ if top_k is not None:
413
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
414
+ logits[indices_to_remove] = float("-inf")
415
+
416
+ # Apply top-p (nucleus) filtering
417
+ if top_p is not None:
418
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
419
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
420
+
421
+ # Remove tokens with cumulative probability above the threshold
422
+ sorted_indices_to_remove = cumulative_probs > top_p
423
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
424
+ sorted_indices_to_remove[..., 0] = 0
425
+
426
+ indices_to_remove = sorted_indices_to_remove.scatter(
427
+ dim=-1, index=sorted_indices, src=sorted_indices_to_remove
428
+ )
429
+ logits[indices_to_remove] = float("-inf")
430
+
431
+ # Sample from the distribution
432
+ probs = F.softmax(logits, dim=-1)
433
+ next_token = torch.multinomial(probs, num_samples=1)
434
+
435
+ return next_token.item()
436
+
437
+
438
+ # Register the model with Auto classes for easy loading
439
+ from transformers import AutoConfig, AutoModelForCausalLM
440
+
441
+ AutoConfig.register("chess_transformer", ChessConfig)
442
+ AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)