gbl1357 commited on
Commit
bc80f73
·
verified ·
1 Parent(s): 80c1f0e

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +346 -0
model.py CHANGED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chess Transformer Model for the Chess Challenge.
3
+ This module provides a simple GPT-style transformer architecture
4
+ designed to fit within the 1M parameter constraint.
5
+ Key components:
6
+ - ChessConfig: Configuration class for model hyperparameters
7
+ - ChessForCausalLM: The main model class for next-move prediction
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import math
13
+ from typing import Optional, Tuple, Union
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+ from transformers import PretrainedConfig, PreTrainedModel
19
+ from transformers.modeling_outputs import CausalLMOutputWithPast
20
+
21
+
22
+ class ChessConfig(PretrainedConfig):
23
+ """
24
+ Configuration class for the Chess Transformer model.
25
+
26
+ This configuration is designed for a ~1M parameter model.
27
+ Students can adjust these values to explore different architectures.
28
+
29
+ Parameter budget breakdown (with default values):
30
+ - Embeddings (vocab): 1200 x 128 = 153,600
31
+ - Position Embeddings: 256 x 128 = 32,768
32
+ - Transformer Layers: 6 x ~120,000 = ~720,000
33
+ - LM Head (with weight tying): 0 (shared with embeddings)
34
+ - Total: ~906,000 parameters
35
+
36
+ Attributes:
37
+ vocab_size: Size of the vocabulary (number of unique moves).
38
+ n_embd: Embedding dimension (d_model).
39
+ n_layer: Number of transformer layers.
40
+ n_head: Number of attention heads.
41
+ n_ctx: Maximum sequence length (context window).
42
+ n_inner: Feed-forward inner dimension (default: 3 * n_embd).
43
+ dropout: Dropout probability.
44
+ layer_norm_epsilon: Epsilon for layer normalization.
45
+ tie_weights: Whether to tie embedding and output weights.
46
+ """
47
+
48
+ model_type = "chess_transformer"
49
+
50
+ def __init__(
51
+ self,
52
+ vocab_size: int = 84,
53
+ n_embd: int = 128,
54
+ n_layer: int = 7,
55
+ n_head: int = 4,
56
+ n_ctx: int = 512,
57
+ n_inner: Optional[int] = 256,
58
+ dropout: float = 0.1,
59
+ layer_norm_epsilon: float = 1e-5,
60
+ tie_weights: bool = True,
61
+ pad_token_id: int = 0,
62
+ bos_token_id: int = 1,
63
+ eos_token_id: int = 2,
64
+ **kwargs,
65
+ ):
66
+ super().__init__(
67
+ pad_token_id=pad_token_id,
68
+ bos_token_id=bos_token_id,
69
+ eos_token_id=eos_token_id,
70
+ **kwargs,
71
+ )
72
+
73
+ self.vocab_size = vocab_size
74
+ self.n_embd = n_embd
75
+ self.n_layer = n_layer
76
+ self.n_head = n_head
77
+ self.n_ctx = n_ctx
78
+ self.n_inner = n_inner if n_inner is not None else 3 * n_embd # Reduced from 4x to 3x
79
+ self.dropout = dropout
80
+ self.layer_norm_epsilon = layer_norm_epsilon
81
+ self.tie_weights = tie_weights
82
+ # Inform HF base class about tying behavior
83
+ self.tie_word_embeddings = bool(tie_weights)
84
+
85
+
86
+ class MultiHeadAttention(nn.Module):
87
+ """
88
+ Multi-head self-attention module.
89
+
90
+ This is a standard scaled dot-product attention implementation
91
+ with causal masking for autoregressive generation.
92
+ """
93
+
94
+ def __init__(self, config: ChessConfig):
95
+ super().__init__()
96
+
97
+ assert config.n_embd % config.n_head == 0, \
98
+ f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
99
+
100
+ self.n_head = config.n_head
101
+ self.n_embd = config.n_embd
102
+ self.head_dim = config.n_embd // config.n_head
103
+
104
+ # Combined QKV projection for efficiency
105
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
106
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
107
+
108
+ self.dropout = nn.Dropout(config.dropout)
109
+
110
+ def forward(
111
+ self,
112
+ x: torch.Tensor,
113
+ attention_mask: Optional[torch.Tensor] = None,
114
+ ) -> torch.Tensor:
115
+ batch_size, seq_len, _ = x.size()
116
+
117
+ qkv = self.c_attn(x)
118
+ q, k, v = qkv.split(self.n_embd, dim=2)
119
+
120
+ q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
121
+ k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
122
+ v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
123
+
124
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
125
+
126
+ causal = torch.ones(seq_len, seq_len, device=x.device, dtype=torch.bool).tril()
127
+ attn_weights = attn_weights.masked_fill(~causal.view(1, 1, seq_len, seq_len), float("-inf"))
128
+
129
+ if attention_mask is not None:
130
+ attention_mask = attention_mask.to(torch.bool)
131
+ attn_weights = attn_weights.masked_fill(~attention_mask.view(batch_size, 1, 1, seq_len), float("-inf"))
132
+
133
+ attn_weights = F.softmax(attn_weights, dim=-1)
134
+ attn_weights = self.dropout(attn_weights)
135
+
136
+ attn_output = torch.matmul(attn_weights, v)
137
+
138
+ attn_output = attn_output.transpose(1, 2).contiguous().view(
139
+ batch_size, seq_len, self.n_embd
140
+ )
141
+
142
+ attn_output = self.c_proj(attn_output)
143
+
144
+ return attn_output
145
+
146
+
147
+ class FeedForward(nn.Module):
148
+ """
149
+ Feed-forward network (MLP) module.
150
+
151
+ Standard two-layer MLP with GELU activation.
152
+ """
153
+
154
+ def __init__(self, config: ChessConfig):
155
+ super().__init__()
156
+
157
+ self.c_fc = nn.Linear(config.n_embd, config.n_inner)
158
+ self.c_proj = nn.Linear(config.n_inner, config.n_embd)
159
+ self.dropout = nn.Dropout(config.dropout)
160
+
161
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
162
+ x = self.c_fc(x)
163
+ x = F.gelu(x)
164
+ x = self.c_proj(x)
165
+ x = self.dropout(x)
166
+ return x
167
+
168
+
169
+ class TransformerBlock(nn.Module):
170
+ """
171
+ A single transformer block with attention and feed-forward layers.
172
+
173
+ Uses pre-normalization (LayerNorm before attention/FFN) for better
174
+ training stability.
175
+ """
176
+
177
+ def __init__(self, config: ChessConfig):
178
+ super().__init__()
179
+
180
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
181
+ self.attn = MultiHeadAttention(config)
182
+ self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
183
+ self.mlp = FeedForward(config)
184
+
185
+ def forward(
186
+ self,
187
+ x: torch.Tensor,
188
+ attention_mask: Optional[torch.Tensor] = None,
189
+ ) -> torch.Tensor:
190
+ x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
191
+ x = x + self.mlp(self.ln_2(x))
192
+ return x
193
+
194
+
195
+ class ChessForCausalLM(PreTrainedModel):
196
+ """
197
+ Chess Transformer for Causal Language Modeling (next-move prediction).
198
+
199
+ This model is designed to predict the next chess move given a sequence
200
+ of previous moves. It uses a GPT-style architecture with:
201
+ - Token embeddings for chess moves
202
+ - Learned positional embeddings
203
+ - Stacked transformer blocks
204
+ - Linear head for next-token prediction
205
+
206
+ The model supports weight tying between the embedding layer and the
207
+ output projection to save parameters.
208
+
209
+ Example:
210
+ >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
211
+ >>> model = ChessForCausalLM(config)
212
+ >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
213
+ >>> outputs = model(**inputs)
214
+ >>> next_move_logits = outputs.logits[:, -1, :]
215
+ """
216
+
217
+ config_class = ChessConfig
218
+ base_model_prefix = "transformer"
219
+ supports_gradient_checkpointing = True
220
+ # Suppress missing-key warning for tied lm_head when loading
221
+ keys_to_ignore_on_load_missing = ["lm_head.weight"]
222
+
223
+ def __init__(self, config: ChessConfig):
224
+ super().__init__(config)
225
+
226
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
227
+ self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
228
+
229
+ self.drop = nn.Dropout(config.dropout)
230
+
231
+ self.h = nn.ModuleList([
232
+ TransformerBlock(config) for _ in range(config.n_layer)
233
+ ])
234
+
235
+ self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
236
+
237
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
238
+
239
+ if config.tie_weights:
240
+ self._tied_weights_keys = ["lm_head.weight"]
241
+
242
+ self.post_init()
243
+
244
+ if config.tie_weights:
245
+ self.tie_weights()
246
+
247
+ def get_input_embeddings(self) -> nn.Module:
248
+ return self.wte
249
+
250
+ def set_input_embeddings(self, new_embeddings: nn.Module):
251
+ self.wte = new_embeddings
252
+ if getattr(self.config, "tie_weights", False):
253
+ self.tie_weights()
254
+
255
+ def get_output_embeddings(self) -> nn.Module:
256
+ return self.lm_head
257
+
258
+ def set_output_embeddings(self, new_embeddings: nn.Module):
259
+ self.lm_head = new_embeddings
260
+
261
+ def tie_weights(self):
262
+ if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
263
+ self._tie_or_clone_weights(self.lm_head, self.wte)
264
+
265
+ def _init_weights(self, module: nn.Module):
266
+ """Initialize weights following GPT-2 style."""
267
+ if isinstance(module, nn.Linear):
268
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
269
+ if module.bias is not None:
270
+ torch.nn.init.zeros_(module.bias)
271
+ elif isinstance(module, nn.Embedding):
272
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
273
+ elif isinstance(module, nn.LayerNorm):
274
+ torch.nn.init.ones_(module.weight)
275
+ torch.nn.init.zeros_(module.bias)
276
+
277
+ def forward(
278
+ self,
279
+ input_ids: torch.LongTensor,
280
+ attention_mask: Optional[torch.Tensor] = None,
281
+ position_ids: Optional[torch.LongTensor] = None,
282
+ labels: Optional[torch.LongTensor] = None,
283
+ return_dict: Optional[bool] = None,
284
+ **kwargs,
285
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
286
+ """
287
+ Forward pass of the model.
288
+
289
+ Args:
290
+ input_ids: Token IDs of shape (batch_size, seq_len).
291
+ attention_mask: Attention mask of shape (batch_size, seq_len).
292
+ position_ids: Position IDs of shape (batch_size, seq_len).
293
+ labels: Labels for language modeling loss.
294
+ return_dict: Whether to return a ModelOutput object.
295
+
296
+ Returns:
297
+ CausalLMOutputWithPast containing loss (if labels provided) and logits.
298
+ """
299
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
300
+
301
+ batch_size, seq_len = input_ids.size()
302
+ device = input_ids.device
303
+
304
+ if position_ids is None:
305
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
306
+
307
+ token_embeds = self.wte(input_ids)
308
+ position_embeds = self.wpe(position_ids)
309
+ hidden_states = self.drop(token_embeds + position_embeds)
310
+
311
+ for block in self.h:
312
+ hidden_states = block(hidden_states, attention_mask=attention_mask)
313
+
314
+ hidden_states = self.ln_f(hidden_states)
315
+
316
+ logits = self.lm_head(hidden_states)
317
+
318
+ loss = None
319
+ if labels is not None:
320
+ shift_logits = logits[..., :-1, :].contiguous()
321
+ shift_labels = labels[..., 1:].contiguous()
322
+
323
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
324
+ loss = loss_fct(
325
+ shift_logits.view(-1, shift_logits.size(-1)),
326
+ shift_labels.view(-1),
327
+ )
328
+
329
+ if not return_dict:
330
+ output = (logits,)
331
+ return ((loss,) + output) if loss is not None else output
332
+
333
+ return CausalLMOutputWithPast(
334
+ loss=loss,
335
+ logits=logits,
336
+ past_key_values=None,
337
+ hidden_states=None,
338
+ attentions=None,
339
+ )
340
+
341
+
342
+ # Register the model with Auto classes for easy loading
343
+ from transformers import AutoConfig, AutoModelForCausalLM
344
+
345
+ AutoConfig.register("chess_transformer", ChessConfig)
346
+ AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)