import torch, torch.nn as nn from transformers import PreTrainedModel, PretrainedConfig from transformers.modeling_outputs import CausalLMOutputWithPast class ChessConfig(PretrainedConfig): model_type = "chess_lm" def __init__(self, vocab_size=1350, n_positions=256, n_embd=128, n_layer=4, n_head=4, n_ctx=256, tie_word_embeddings=True, **kwargs): self.vocab_size, self.n_positions, self.n_embd = vocab_size, n_positions, n_embd self.n_layer, self.n_head, self.n_ctx = n_layer, n_head, n_ctx self.tie_word_embeddings = tie_word_embeddings super().__init__(**kwargs) class ChessForCausalLM(PreTrainedModel): config_class = ChessConfig def __init__(self, config): super().__init__(config) self.config = config self.token_embedding = nn.Embedding(config.vocab_size, config.n_embd) self.position_embedding = nn.Embedding(config.n_positions, config.n_embd) encoder_layer = nn.TransformerEncoderLayer(d_model=config.n_embd, nhead=config.n_head, dim_feedforward=config.n_embd*4, batch_first=True, norm_first=True) self.blocks = nn.TransformerEncoder(encoder_layer, num_layers=config.n_layer) self.ln_f = nn.LayerNorm(config.n_embd) self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) if config.tie_word_embeddings: self.head.weight = self.token_embedding.weight self.post_init() def get_input_embeddings(self): return self.token_embedding def set_input_embeddings(self, value): self.token_embedding = value def forward(self, input_ids, labels=None, **kwargs): B, T = input_ids.shape x = self.token_embedding(input_ids) + self.position_embedding(torch.arange(T, device=input_ids.device)) mask = torch.triu(torch.ones(T, T, device=input_ids.device) * float('-inf'), diagonal=1) x = self.ln_f(self.blocks(x, mask=mask, is_causal=True)) logits = self.head(x) loss = None if labels is not None: loss = nn.CrossEntropyLoss()(logits[..., :-1, :].contiguous().view(-1, self.config.vocab_size), labels[..., 1:].contiguous().view(-1)) return CausalLMOutputWithPast(loss=loss, logits=logits)