FatimaAlHarake commited on
Commit
e2b53e1
·
verified ·
1 Parent(s): 9bfe224

Upload model.py

Browse files
Files changed (1) hide show
  1. model.py +503 -0
model.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chess Transformer Model for the Chess Challenge.
3
+
4
+ This module provides a simple GPT-style transformer architecture
5
+ designed to fit within the 1M parameter constraint.
6
+
7
+ Key components:
8
+ - ChessConfig: Configuration class for model hyperparameters
9
+ - ChessForCausalLM: The main model class for next-move prediction
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import math
15
+ # from dataclasses import dataclass
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from transformers import PretrainedConfig, PreTrainedModel
22
+ from transformers.modeling_outputs import CausalLMOutputWithPast
23
+
24
+ # Register the model with Auto classes for easy loading
25
+ from transformers import AutoConfig, AutoModelForCausalLM
26
+
27
+
28
+ class ChessConfig(PretrainedConfig):
29
+ """
30
+ Configuration class for the Chess Transformer model.
31
+
32
+ This configuration is designed for a ~1M parameter model.
33
+ Students can adjust these values to explore different architectures.
34
+
35
+ Parameter budget breakdown (with default values):
36
+ - Embeddings (vocab): 1200 x 128 = 153,600
37
+ - Position Embeddings: 256 x 128 = 32,768
38
+ - Transformer Layers: 6 x ~120,000 = ~720,000
39
+ - LM Head (with weight tying): 0 (shared with embeddings)
40
+ - Total: ~906,000 parameters
41
+
42
+ Attributes:
43
+ vocab_size: Size of the vocabulary (number of unique moves).
44
+ n_embd: Embedding dimension (d_model).
45
+ n_layer: Number of transformer layers.
46
+ n_head: Number of attention heads.
47
+ n_ctx: Maximum sequence length (context window).
48
+ n_inner: Feed-forward inner dimension (default: 3 * n_embd).
49
+ dropout: Dropout probability.
50
+ layer_norm_epsilon: Epsilon for layer normalization.
51
+ tie_weights: Whether to tie embedding and output weights.
52
+ """
53
+
54
+ model_type = "chess_transformer"
55
+
56
+ def __init__(
57
+ self,
58
+ vocab_size: int = 1200,
59
+ n_embd: int = 128,
60
+ n_layer: int = 6,
61
+ n_head: int = 4,
62
+ num_kv_groups: int = 2,
63
+ n_ctx: int = 256,
64
+ n_inner: Optional[int] = None,
65
+ dropout: float = 0.1,
66
+ layer_norm_epsilon: float = 1e-5,
67
+ tie_weights: bool = True,
68
+ pad_token_id: int = 0,
69
+ bos_token_id: int = 1,
70
+ eos_token_id: int = 2,
71
+ **kwargs,
72
+ ):
73
+ super().__init__(
74
+ pad_token_id=pad_token_id,
75
+ bos_token_id=bos_token_id,
76
+ eos_token_id=eos_token_id,
77
+ **kwargs,
78
+ )
79
+
80
+ self.vocab_size = vocab_size
81
+ self.n_embd = n_embd
82
+ self.n_layer = n_layer
83
+ self.n_head = n_head
84
+ self.num_kv_groups = num_kv_groups
85
+ self.n_ctx = n_ctx
86
+ self.n_inner = n_inner if n_inner is not None else 3 * n_embd # Reduced from 4x to 3x
87
+ self.dropout = dropout
88
+ self.layer_norm_epsilon = layer_norm_epsilon
89
+ self.tie_weights = tie_weights
90
+ # Inform HF base class about tying behavior
91
+ self.tie_word_embeddings = bool(tie_weights)
92
+
93
+
94
+ class GroupedQueryAttention(nn.Module):
95
+ def __init__(
96
+ self, config: ChessConfig
97
+ ):
98
+ super().__init__()
99
+ # assert d_out % num_heads == 0, "d_out must be divisible by num_heads"
100
+ assert config.n_embd % config.n_head == 0, \
101
+ f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
102
+ assert config.n_head % config.num_kv_groups == 0, \
103
+ "num_heads must be divisible by num_kv_groups"
104
+
105
+ self.n_embd = config.n_embd
106
+ self.n_head = config.n_head
107
+ self.head_dim = config.n_embd // config.n_head
108
+
109
+ self.W_key = nn.Linear(config.n_embd, config.num_kv_groups * self.head_dim)
110
+ self.W_value = nn.Linear(config.n_embd, config.num_kv_groups * self.head_dim)
111
+ self.num_kv_groups = config.num_kv_groups
112
+ self.group_size = config.n_head // config.num_kv_groups
113
+
114
+ self.W_query = nn.Linear(config.n_embd, config.n_embd)
115
+ self.out_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
116
+ self.dropout = nn.Dropout(config.dropout)
117
+
118
+ # self.register_buffer("cache_k", None, persistent=False)
119
+ # self.register_buffer("cache_v", None, persistent=False)
120
+ # self.ptr_current_pos = 0
121
+
122
+ # Causal mask (will be created on first forward pass)
123
+ self.register_buffer(
124
+ "bias",
125
+ torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
126
+ 1, 1, config.n_ctx, config.n_ctx
127
+ ),
128
+ persistent=False,
129
+ )
130
+
131
+ def forward(
132
+ self,
133
+ x: torch.Tensor,
134
+ attention_mask: Optional[torch.Tensor] = None,
135
+ ) -> torch.Tensor:
136
+ batch_size, seq_len, _ = x.size()
137
+
138
+ # Apply projections
139
+ queries = self.W_query(x) # (b, seq_len, num_heads * head_dim)
140
+ keys = self.W_key(x) # (b, seq_len, num_kv_groups * head_dim)
141
+ values = self.W_value(x) # (b, seq_len, num_kv_groups * head_dim)
142
+
143
+ # Reshape
144
+ queries = queries.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
145
+ keys_new = keys.view(batch_size, seq_len, self.num_kv_groups, self.head_dim).transpose(1, 2)
146
+ values_new = (
147
+ values.view(batch_size, seq_len, self.num_kv_groups, self.head_dim)
148
+ .transpose(1, 2)
149
+ )
150
+
151
+ '''if use_cache:
152
+ if self.cache_k is None:
153
+ self.cache_k, self.cache_v = keys_new, values_new
154
+ else:
155
+ self.cache_k = torch.cat([self.cache_k, keys_new], dim=2)
156
+ self.cache_v = torch.cat([self.cache_v, values_new], dim=2)
157
+ keys_base, values_base = self.cache_k, self.cache_v
158
+ else:
159
+ keys_base, values_base = keys_new, values_new
160
+ if self.cache_k is not None or self.cache_v is not None:
161
+ self.cache_k, self.cache_v = None, None
162
+ self.ptr_current_pos = 0'''
163
+
164
+ # Expand keys and values to match the number of heads
165
+ # Shape: (b, num_heads, seq_len, head_dim)
166
+ keys = keys_new.repeat_interleave(self.group_size, dim=1)
167
+ # Shape: (b, num_heads, seq_len, head_dim)
168
+ values = values_new.repeat_interleave(self.group_size, dim=1)
169
+ # Shape: (b, num_heads, seq_len, head_dim)
170
+ # For example, before repeat_interleave along dim=1 (query groups):
171
+ # [K1, K2]
172
+ # After repeat_interleave (each query group is repeated group_size times):
173
+ # [K1, K1, K2, K2]
174
+ # If we used regular repeat instead of repeat_interleave, we'd get:
175
+ # [K1, K2, K1, K2]
176
+
177
+ # Compute scaled dot-product attention (aka self-attention) with a causal mask
178
+ # Shape: (b, num_heads, seq_len, seq_len)
179
+ attn_scores = queries @ keys.transpose(2, 3) # Dot product for each head
180
+
181
+ ####################################################
182
+ # causal mask
183
+ '''num_tokens_Q = queries.shape[-2]
184
+ num_tokens_K = keys.shape[-2]
185
+ device = queries.device
186
+ if use_cache:
187
+ q_positions = torch.arange(
188
+ self.ptr_current_pos,
189
+ self.ptr_current_pos + num_tokens_Q,
190
+ device=device,
191
+ dtype=torch.long,
192
+ )
193
+ self.ptr_current_pos += num_tokens_Q
194
+ else:
195
+ q_positions = torch.arange(num_tokens_Q, device=device, dtype=torch.long)
196
+ self.ptr_current_pos = 0
197
+ k_positions = torch.arange(num_tokens_K, device=device, dtype=torch.long)
198
+ mask = q_positions.unsqueeze(-1) < k_positions.unsqueeze(0)'''
199
+
200
+ # Use the mask to fill attention scores
201
+ # attn_scores = attn_scores.masked_fill(mask, -torch.inf)
202
+ attn_weights = attn_scores / math.sqrt(self.head_dim)
203
+
204
+ causal_mask = self.bias[:, :, :seq_len, :seq_len]
205
+ attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
206
+
207
+ # Apply attention mask (for padding)
208
+ if attention_mask is not None:
209
+ # attention_mask shape: (batch_size, seq_len) -> (batch_size, 1, 1, seq_len)
210
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
211
+ attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
212
+
213
+ attn_weights = F.softmax(attn_weights, dim=-1)
214
+ assert keys.shape[-1] == self.head_dim
215
+ attn_weights = self.dropout(attn_weights)
216
+
217
+ # Shape: (b, seq_len, num_heads, head_dim)
218
+ context_vec = (attn_weights @ values).transpose(1, 2)
219
+
220
+ # Combine heads, where self.d_out = self.num_heads * self.head_dim
221
+ context_vec = context_vec.contiguous().view(batch_size, seq_len, self.n_embd)
222
+ context_vec = self.out_proj(context_vec) # optional projection
223
+
224
+ return context_vec
225
+
226
+
227
+ class FeedForward(nn.Module):
228
+ """
229
+ Feed-forward network (MLP) module.
230
+
231
+ Standard two-layer MLP with GELU activation.
232
+ """
233
+
234
+ def __init__(self, config: ChessConfig):
235
+ super().__init__()
236
+
237
+ self.c_fc = nn.Linear(config.n_embd, config.n_inner)
238
+ self.c_proj = nn.Linear(config.n_inner, config.n_embd)
239
+ self.dropout = nn.Dropout(config.dropout)
240
+
241
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
242
+ x = self.c_fc(x)
243
+ x = F.gelu(x)
244
+ x = self.c_proj(x)
245
+ x = self.dropout(x)
246
+ return x
247
+
248
+
249
+ class TransformerBlock(nn.Module):
250
+ """
251
+ A single transformer block with attention and feed-forward layers.
252
+
253
+ Uses pre-normalization (LayerNorm before attention/FFN) for better
254
+ training stability.
255
+ """
256
+
257
+ def __init__(self, config: ChessConfig):
258
+ super().__init__()
259
+
260
+ self.ln_1 = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
261
+ self.attn = GroupedQueryAttention(config)
262
+ self.ln_2 = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
263
+ self.mlp = FeedForward(config)
264
+
265
+ def forward(
266
+ self,
267
+ x: torch.Tensor,
268
+ attention_mask: Optional[torch.Tensor] = None,
269
+ ) -> torch.Tensor:
270
+ # Pre-norm attention
271
+ x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
272
+ # Pre-norm FFN
273
+ x = x + self.mlp(self.ln_2(x))
274
+ return x
275
+
276
+
277
+ class ChessForCausalLM(PreTrainedModel):
278
+ """
279
+ Chess Transformer for Causal Language Modeling (next-move prediction).
280
+
281
+ This model is designed to predict the next chess move given a sequence
282
+ of previous moves. It uses a GPT-style architecture with:
283
+ - Token embeddings for chess moves
284
+ - Learned positional embeddings
285
+ - Stacked transformer blocks
286
+ - Linear head for next-token prediction
287
+
288
+ The model supports weight tying between the embedding layer and the
289
+ output projection to save parameters.
290
+
291
+ Example:
292
+ >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
293
+ >>> model = ChessForCausalLM(config)
294
+ >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
295
+ >>> outputs = model(**inputs)
296
+ >>> next_move_logits = outputs.logits[:, -1, :]
297
+ """
298
+
299
+ config_class = ChessConfig
300
+ base_model_prefix = "transformer"
301
+ supports_gradient_checkpointing = True
302
+ # Suppress missing-key warning for tied lm_head when loading
303
+ keys_to_ignore_on_load_missing = ["lm_head.weight"]
304
+
305
+ def __init__(self, config: ChessConfig):
306
+ super().__init__(config)
307
+
308
+ # Token and position embeddings
309
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
310
+ self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
311
+
312
+ self.drop = nn.Dropout(config.dropout)
313
+ self.n_layer = config.n_layer
314
+
315
+ # Transformer blocks
316
+ '''self.h = nn.ModuleList([
317
+ TransformerBlock(config) for _ in range(config.n_layer)
318
+ ])'''
319
+
320
+ self.shared_block = TransformerBlock(config)
321
+
322
+ # Final layer norm
323
+ self.ln_f = nn.RMSNorm(config.n_embd, eps=config.layer_norm_epsilon)
324
+
325
+ # Output head
326
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
327
+
328
+ # Declare tied weights for proper serialization
329
+ if config.tie_weights:
330
+ self._tied_weights_keys = ["lm_head.weight"]
331
+
332
+ # Initialize weights
333
+ self.post_init()
334
+
335
+ # Tie weights if configured
336
+ if config.tie_weights:
337
+ self.tie_weights()
338
+
339
+ def get_input_embeddings(self) -> nn.Module:
340
+ return self.wte
341
+
342
+ def set_input_embeddings(self, new_embeddings: nn.Module):
343
+ self.wte = new_embeddings
344
+ if getattr(self.config, "tie_weights", False):
345
+ self.tie_weights()
346
+
347
+ def get_output_embeddings(self) -> nn.Module:
348
+ return self.lm_head
349
+
350
+ def set_output_embeddings(self, new_embeddings: nn.Module):
351
+ self.lm_head = new_embeddings
352
+
353
+ def tie_weights(self):
354
+ # Use HF helper to tie or clone depending on config
355
+ if (
356
+ getattr(self.config, "tie_weights", False)
357
+ or getattr(self.config, "tie_word_embeddings", False)
358
+ ):
359
+ self._tie_or_clone_weights(self.lm_head, self.wte)
360
+
361
+ def _init_weights(self, module: nn.Module):
362
+ """Initialize weights following GPT-2 style."""
363
+ if isinstance(module, nn.Linear):
364
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
365
+ if module.bias is not None:
366
+ torch.nn.init.zeros_(module.bias)
367
+ elif isinstance(module, nn.Embedding):
368
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
369
+ elif isinstance(module, nn.RMSNorm):
370
+ torch.nn.init.ones_(module.weight)
371
+ # torch.nn.init.zeros_(module.bias)
372
+
373
+ def forward(
374
+ self,
375
+ input_ids: torch.LongTensor,
376
+ attention_mask: Optional[torch.Tensor] = None,
377
+ position_ids: Optional[torch.LongTensor] = None,
378
+ labels: Optional[torch.LongTensor] = None,
379
+ return_dict: Optional[bool] = None,
380
+ **kwargs,
381
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
382
+ """
383
+ Forward pass of the model.
384
+
385
+ Args:
386
+ input_ids: Token IDs of shape (batch_size, seq_len).
387
+ attention_mask: Attention mask of shape (batch_size, seq_len).
388
+ position_ids: Position IDs of shape (batch_size, seq_len).
389
+ labels: Labels for language modeling loss.
390
+ return_dict: Whether to return a ModelOutput object.
391
+
392
+ Returns:
393
+ CausalLMOutputWithPast containing loss (if labels provided) and logits.
394
+ """
395
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
396
+
397
+ batch_size, seq_len = input_ids.size()
398
+ device = input_ids.device
399
+
400
+ # Create position IDs if not provided
401
+ if position_ids is None:
402
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
403
+
404
+ # Get embeddings
405
+ token_embeds = self.wte(input_ids)
406
+ position_embeds = self.wpe(position_ids)
407
+ hidden_states = self.drop(token_embeds + position_embeds)
408
+
409
+ # Pass through transformer blocks
410
+ # for block in self.h:
411
+ # hidden_states = block(hidden_states, attention_mask=attention_mask)
412
+
413
+ for _ in range(self.n_layer):
414
+ hidden_states = self.shared_block(hidden_states, attention_mask=attention_mask)
415
+
416
+ # Final layer norm
417
+ hidden_states = self.ln_f(hidden_states)
418
+
419
+ # Get logits
420
+ logits = self.lm_head(hidden_states)
421
+
422
+ # Compute loss if labels are provided
423
+ loss = None
424
+ if labels is not None:
425
+ # Shift logits and labels for next-token prediction
426
+ shift_logits = logits[..., :-1, :].contiguous()
427
+ shift_labels = labels[..., 1:].contiguous()
428
+
429
+ # Flatten for cross-entropy
430
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
431
+ # loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
432
+ loss = loss_fct(
433
+ shift_logits.view(-1, shift_logits.size(-1)),
434
+ shift_labels.view(-1),
435
+ )
436
+
437
+ if not return_dict:
438
+ output = (logits,)
439
+ return ((loss,) + output) if loss is not None else output
440
+
441
+ return CausalLMOutputWithPast(
442
+ loss=loss,
443
+ logits=logits,
444
+ past_key_values=None,
445
+ hidden_states=None,
446
+ attentions=None,
447
+ )
448
+
449
+ @torch.no_grad()
450
+ def generate_move(
451
+ self,
452
+ input_ids: torch.LongTensor,
453
+ temperature: float = 1.0,
454
+ top_k: Optional[int] = None,
455
+ top_p: Optional[float] = None,
456
+ ) -> int:
457
+ """
458
+ Generate the next move given a sequence of moves.
459
+
460
+ Args:
461
+ input_ids: Token IDs of shape (1, seq_len).
462
+ temperature: Sampling temperature (1.0 = no change).
463
+ top_k: If set, only sample from top k tokens.
464
+ top_p: If set, use nucleus sampling with this threshold.
465
+
466
+ Returns:
467
+ The token ID of the predicted next move.
468
+ """
469
+ self.eval()
470
+
471
+ # Get logits for the last position
472
+ outputs = self(input_ids)
473
+ logits = outputs.logits[:, -1, :] / temperature
474
+
475
+ # Apply top-k filtering
476
+ if top_k is not None:
477
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
478
+ logits[indices_to_remove] = float("-inf")
479
+
480
+ # Apply top-p (nucleus) filtering
481
+ if top_p is not None:
482
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
483
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
484
+
485
+ # Remove tokens with cumulative probability above the threshold
486
+ sorted_indices_to_remove = cumulative_probs > top_p
487
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
488
+ sorted_indices_to_remove[..., 0] = 0
489
+
490
+ indices_to_remove = sorted_indices_to_remove.scatter(
491
+ dim=-1, index=sorted_indices, src=sorted_indices_to_remove
492
+ )
493
+ logits[indices_to_remove] = float("-inf")
494
+
495
+ # Sample from the distribution
496
+ probs = F.softmax(logits, dim=-1)
497
+ next_token = torch.multinomial(probs, num_samples=1)
498
+
499
+ return next_token.item()
500
+
501
+
502
+ AutoConfig.register("chess_transformer", ChessConfig)
503
+ AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)