File size: 2,411 Bytes
ba848b8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import torch
import torch.nn as nn
from transformers import PreTrainedModel, PretrainedConfig
class ChessConfig(PretrainedConfig):
model_type = "chess_transformer"
def __init__(self, vocab_size=1000, n_embd=128, n_layer=4, n_head=4, n_inner=512, n_ctx=256, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.n_embd, self.n_layer, self.n_head, self.n_inner, self.n_ctx = n_embd, n_layer, n_head, n_inner, n_ctx
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.ln1=nn.LayerNorm(config.n_embd)
self.attn=nn.MultiheadAttention(config.n_embd,config.n_head,batch_first=True)
self.ln2=nn.LayerNorm(config.n_embd)
self.mlp=nn.Sequential(nn.Linear(config.n_embd,config.n_inner),nn.GELU(),nn.Linear(config.n_inner, config.n_embd))
def forward(self, x, mask=None):
attn_out,_=self.attn(self.ln1(x),self.ln1(x),self.ln1(x),attn_mask=mask,need_weights=False)
return x+attn_out+self.mlp(self.ln2(x+attn_out))
class ChessForCausalLM(PreTrainedModel):
config_class = ChessConfig
def __init__(self, config):
super().__init__(config)
self.config = config
self.token_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Embedding(config.n_ctx, config.n_embd)
self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(self, input_ids, attention_mask=None, labels=None, **kwargs):
B, T =input_ids.shape
x = self.token_emb(input_ids)+self.pos_emb(torch.arange(T, device=input_ids.device))
mask = torch.triu(torch.ones(T, T, device=input_ids.device) * float('-inf'), diagonal=1)
for block in self.blocks: x = block(x, mask=mask)
logits = self.lm_head(self.ln_f(x))
loss = None
if labels is not None:
loss = nn.CrossEntropyLoss(ignore_index=-100)(logits[..., :-1, :].contiguous().view(-1, self.config.vocab_size), labels[..., 1:].contiguous().view(-1))
return {"loss": loss, "logits": logits}
|