| import math | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from transformers import PretrainedConfig, PreTrainedModel | |
| from transformers.modeling_outputs import CausalLMOutputWithPast | |
| def rotate_half(x): | |
| x1 = x[..., : x.shape[-1] // 2] | |
| x2 = x[..., x.shape[-1] // 2 :] | |
| return torch.cat((-x2, x1), dim=-1) | |
| def apply_rope(q, k): | |
| dim = q.shape[-1] | |
| device = q.device | |
| seq_len = q.shape[-2] | |
| theta = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).float() / dim)) | |
| pos = torch.arange(seq_len, device=device).float() | |
| freqs = torch.einsum('i,j->ij', pos, theta) | |
| emb = torch.cat((freqs, freqs), dim=-1) | |
| cos = emb.cos()[None, None, :, :] | |
| sin = emb.sin()[None, None, :, :] | |
| q = (q * cos) + (rotate_half(q) * sin) | |
| k = (k * cos) + (rotate_half(k) * sin) | |
| return q, k | |
| class ChessConfig(PretrainedConfig): | |
| model_type = "chess_transformer" | |
| def __init__( | |
| self, | |
| vocab_size=1682, | |
| n_embd=96, | |
| n_layer=8, | |
| n_head=8, | |
| n_ctx=336, | |
| n_inner=None, | |
| dropout=0.15, | |
| layer_norm_epsilon=1e-5, | |
| tie_weights=True, | |
| pad_token_id=0, | |
| bos_token_id=1, | |
| eos_token_id=2, | |
| **kwargs, | |
| ): | |
| super().__init__( | |
| pad_token_id=pad_token_id, | |
| bos_token_id=bos_token_id, | |
| eos_token_id=eos_token_id, | |
| **kwargs, | |
| ) | |
| self.vocab_size = vocab_size | |
| self.n_embd = n_embd | |
| self.n_layer = n_layer | |
| self.n_head = n_head | |
| self.n_ctx = n_ctx | |
| self.n_inner = n_inner if n_inner is not None else int(3.5 * n_embd) | |
| self.dropout = dropout | |
| self.layer_norm_epsilon = layer_norm_epsilon | |
| self.tie_weights = tie_weights | |
| self.tie_word_embeddings = True | |
| class MultiHeadAttention(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.n_head = config.n_head | |
| self.n_embd = config.n_embd | |
| self.head_dim = config.n_embd // config.n_head | |
| self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd) | |
| self.c_proj = nn.Linear(config.n_embd, config.n_embd) | |
| self.dropout = nn.Dropout(config.dropout) | |
| self.register_buffer("bias", torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(1, 1, config.n_ctx, config.n_ctx)) | |
| def forward(self, x, attention_mask=None): | |
| batch_size, seq_len, _ = x.size() | |
| qkv = self.c_attn(x) | |
| q, k, v = qkv.split(self.n_embd, dim=2) | |
| q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2) | |
| k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2) | |
| v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2) | |
| q, k = apply_rope(q, k) | |
| attn_weights = (q @ k.transpose(-2, -1)) / math.sqrt(self.head_dim) | |
| attn_weights = attn_weights.masked_fill(self.bias[:, :, :seq_len, :seq_len] == 0, float("-inf")) | |
| if attention_mask is not None: | |
| attn_weights = attn_weights.masked_fill(attention_mask.view(batch_size, 1, 1, seq_len) == 0, float("-inf")) | |
| attn_weights = F.softmax(attn_weights, dim=-1) | |
| return self.c_proj((attn_weights @ v).transpose(1, 2).contiguous().view(batch_size, seq_len, self.n_embd)) | |
| class FeedForward(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.c_fc = nn.Linear(config.n_embd, config.n_inner) | |
| self.c_proj = nn.Linear(config.n_inner, config.n_embd) | |
| self.dropout = nn.Dropout(config.dropout) | |
| def forward(self, x): | |
| return self.dropout(self.c_proj(F.gelu(self.c_fc(x)))) | |
| class TransformerBlock(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.ln_1 = nn.LayerNorm(config.n_embd) | |
| self.attn = MultiHeadAttention(config) | |
| self.ln_2 = nn.LayerNorm(config.n_embd) | |
| self.mlp = FeedForward(config) | |
| def forward(self, x, attention_mask=None): | |
| x = x + self.attn(self.ln_1(x), attention_mask=attention_mask) | |
| x = x + self.mlp(self.ln_2(x)) | |
| return x | |
| class ChessForCausalLM(PreTrainedModel): | |
| config_class = ChessConfig | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.wte = nn.Embedding(config.vocab_size, config.n_embd) | |
| self.drop = nn.Dropout(config.dropout) | |
| self.h = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layer)]) | |
| self.ln_f = nn.LayerNorm(config.n_embd) | |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) | |
| self.post_init() | |
| if config.tie_weights: self.lm_head.weight = self.wte.weight | |
| def forward(self, input_ids, attention_mask=None, labels=None, **kwargs): | |
| x = self.drop(self.wte(input_ids)) | |
| for block in self.h: x = block(x, attention_mask=attention_mask) | |
| logits = self.lm_head(self.ln_f(x)) | |
| loss = None | |
| if labels is not None: | |
| loss = F.cross_entropy(logits[..., :-1, :].contiguous().view(-1, logits.size(-1)), labels[..., 1:].contiguous().view(-1), ignore_index=-100) | |
| return CausalLMOutputWithPast(loss=loss, logits=logits) |