import torch import torch.nn as nn from transformers import PreTrainedModel, PretrainedConfig, GenerationMixin from transformers.modeling_outputs import CausalLMOutput class ChessConfig(PretrainedConfig): model_type = "chess_transformer" def __init__(self, vocab_size=1000, n_embd=128, n_layer=4, n_head=4, n_inner=512, n_ctx=256, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.n_ctx = n_ctx # Alias pour compatibilité Hugging Face (Important pour .generate) self.num_hidden_layers = n_layer self.hidden_size = n_embd self.num_attention_heads = n_head class Block(nn.Module): def __init__(self, config): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) self.attn = nn.MultiheadAttention(config.n_embd, config.n_head, batch_first=True) self.ln2 = nn.LayerNorm(config.n_embd) self.mlp = nn.Sequential(nn.Linear(config.n_embd, config.n_inner), nn.GELU(), nn.Linear(config.n_inner, config.n_embd)) def forward(self, x, mask=None): attn_out, _ = self.attn(self.ln1(x), self.ln1(x), self.ln1(x), attn_mask=mask, need_weights=False) return x + attn_out + self.mlp(self.ln2(x + attn_out)) class ChessForCausalLM(PreTrainedModel, GenerationMixin): config_class = ChessConfig def __init__(self, config): super().__init__(config) self.config = config self.token_emb = nn.Embedding(config.vocab_size, config.n_embd) self.pos_emb = nn.Embedding(config.n_ctx, config.n_embd) self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)]) self.ln_f = nn.LayerNorm(config.n_embd) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # Weight Tying (Partage de poids) self.lm_head.weight = self.token_emb.weight self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) # Obligatoire pour .generate() def prepare_inputs_for_generation(self, input_ids, **kwargs): return {"input_ids": input_ids} def forward(self, input_ids, attention_mask=None, labels=None, **kwargs): B, T = input_ids.shape x = self.token_emb(input_ids) + self.pos_emb(torch.arange(T, device=input_ids.device)) # Masque causal (Interdiction de voir le futur) mask = torch.triu(torch.ones(T, T, device=input_ids.device) * float('-inf'), diagonal=1) for block in self.blocks: x = block(x, mask=mask) logits = self.lm_head(self.ln_f(x)) loss = None if labels is not None: # Shift des labels (input [BOS, A, B] -> predit [A, B, C]) # Grâce à l'ajout de [BOS] par le tokenizer, cette formule est correcte. loss = nn.CrossEntropyLoss(ignore_index=-100)( logits[..., :-1, :].contiguous().view(-1, self.config.vocab_size), labels[..., 1:].contiguous().view(-1) ) return CausalLMOutput(loss=loss, logits=logits)