|
|
""" |
|
|
Chess Transformer Model for the Chess Challenge. |
|
|
|
|
|
This module provides a simple GPT-style transformer architecture |
|
|
designed to fit within the 1M parameter constraint. |
|
|
|
|
|
Key components: |
|
|
- ChessConfig: Configuration class for model hyperparameters |
|
|
- ChessForCausalLM: The main model class for next-move prediction |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import math |
|
|
from dataclasses import dataclass |
|
|
from typing import Optional, Tuple, Union |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from transformers import PretrainedConfig, PreTrainedModel |
|
|
from transformers.modeling_outputs import CausalLMOutputWithPast |
|
|
|
|
|
|
|
|
class ChessConfig(PretrainedConfig): |
|
|
""" |
|
|
Configuration class for the Chess Transformer model. |
|
|
|
|
|
This configuration is designed for a ~1M parameter model. |
|
|
Students can adjust these values to explore different architectures. |
|
|
|
|
|
Parameter budget breakdown (with default values): |
|
|
- Embeddings (vocab): 1200 x 128 = 153,600 |
|
|
- Position Embeddings: 256 x 128 = 32,768 |
|
|
- Transformer Layers: 6 x ~120,000 = ~720,000 |
|
|
- LM Head (with weight tying): 0 (shared with embeddings) |
|
|
- Total: ~906,000 parameters |
|
|
|
|
|
Attributes: |
|
|
vocab_size: Size of the vocabulary (number of unique moves). |
|
|
n_embd: Embedding dimension (d_model). |
|
|
n_layer: Number of transformer layers. |
|
|
n_head: Number of attention heads. |
|
|
n_ctx: Maximum sequence length (context window). |
|
|
n_inner: Feed-forward inner dimension (default: 3 * n_embd). |
|
|
dropout: Dropout probability. |
|
|
layer_norm_epsilon: Epsilon for layer normalization. |
|
|
tie_weights: Whether to tie embedding and output weights. |
|
|
""" |
|
|
|
|
|
model_type = "chess_transformer" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_size: int = 1200, |
|
|
n_embd: int = 128, |
|
|
n_layer: int = 6, |
|
|
n_head: int = 4, |
|
|
n_ctx: int = 256, |
|
|
n_inner: Optional[int] = None, |
|
|
dropout: float = 0.1, |
|
|
layer_norm_epsilon: float = 1e-5, |
|
|
tie_weights: bool = True, |
|
|
pad_token_id: int = 0, |
|
|
bos_token_id: int = 1, |
|
|
eos_token_id: int = 2, |
|
|
attn: str = "MHA", |
|
|
num_groups: int = 2, |
|
|
rot_pos_emb=False, |
|
|
rotary_base=10000, |
|
|
**kwargs, |
|
|
): |
|
|
super().__init__( |
|
|
pad_token_id=pad_token_id, |
|
|
bos_token_id=bos_token_id, |
|
|
eos_token_id=eos_token_id, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
self.vocab_size = vocab_size |
|
|
self.n_embd = n_embd |
|
|
self.n_layer = n_layer |
|
|
self.n_head = n_head |
|
|
self.n_ctx = n_ctx |
|
|
self.n_inner = n_inner if n_inner is not None else 3 * n_embd |
|
|
self.dropout = dropout |
|
|
self.layer_norm_epsilon = layer_norm_epsilon |
|
|
self.tie_weights = tie_weights |
|
|
|
|
|
self.tie_word_embeddings = bool(tie_weights) |
|
|
|
|
|
|
|
|
self.attn = attn |
|
|
self.num_groups = num_groups |
|
|
|
|
|
|
|
|
self.rot_pos_emb = rot_pos_emb |
|
|
self.rotary_base = rotary_base |
|
|
|
|
|
|
|
|
|
|
|
class MultiHeadAttention(nn.Module): |
|
|
""" |
|
|
Multi-head self-attention module. |
|
|
|
|
|
This is a standard scaled dot-product attention implementation |
|
|
with causal masking for autoregressive generation. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: ChessConfig): |
|
|
super().__init__() |
|
|
|
|
|
assert config.n_embd % config.n_head == 0, \ |
|
|
f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})" |
|
|
|
|
|
self.n_head = config.n_head |
|
|
self.n_embd = config.n_embd |
|
|
self.head_dim = config.n_embd // config.n_head |
|
|
|
|
|
self.rot_pos_emb = config.rot_pos_emb |
|
|
if self.rot_pos_emb: |
|
|
self.rotary_emb = RotaryEmbedding( |
|
|
self.head_dim, |
|
|
max_position_embeddings=config.n_ctx, |
|
|
base=getattr(config, 'rotary_base', 10000) |
|
|
) |
|
|
|
|
|
|
|
|
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd) |
|
|
self.c_proj = nn.Linear(config.n_embd, config.n_embd) |
|
|
|
|
|
self.dropout = nn.Dropout(config.dropout) |
|
|
|
|
|
|
|
|
self.register_buffer( |
|
|
"bias", |
|
|
torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view( |
|
|
1, 1, config.n_ctx, config.n_ctx |
|
|
), |
|
|
persistent=False, |
|
|
) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
x: torch.Tensor, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
) -> torch.Tensor: |
|
|
batch_size, seq_len, _ = x.size() |
|
|
|
|
|
|
|
|
qkv = self.c_attn(x) |
|
|
q, k, v = qkv.split(self.n_embd, dim=2) |
|
|
|
|
|
|
|
|
q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2) |
|
|
k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2) |
|
|
v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2) |
|
|
|
|
|
if self.rot_pos_emb: |
|
|
cos, sin = self.rotary_emb(v, seq_len=seq_len) |
|
|
q, k = apply_rotary_pos_emb(q, k, cos, sin) |
|
|
|
|
|
|
|
|
attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim) |
|
|
|
|
|
|
|
|
causal_mask = self.bias[:, :, :seq_len, :seq_len] |
|
|
attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf")) |
|
|
|
|
|
|
|
|
if attention_mask is not None: |
|
|
|
|
|
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) |
|
|
attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf")) |
|
|
|
|
|
attn_weights = F.softmax(attn_weights, dim=-1) |
|
|
attn_weights = self.dropout(attn_weights) |
|
|
|
|
|
|
|
|
attn_output = torch.matmul(attn_weights, v) |
|
|
|
|
|
|
|
|
attn_output = attn_output.transpose(1, 2).contiguous().view( |
|
|
batch_size, seq_len, self.n_embd |
|
|
) |
|
|
|
|
|
|
|
|
attn_output = self.c_proj(attn_output) |
|
|
|
|
|
return attn_output |
|
|
|
|
|
|
|
|
class FeedForward(nn.Module): |
|
|
""" |
|
|
Feed-forward network (MLP) module. |
|
|
|
|
|
Standard two-layer MLP with GELU activation. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: ChessConfig): |
|
|
super().__init__() |
|
|
|
|
|
self.c_fc = nn.Linear(config.n_embd, config.n_inner) |
|
|
self.c_proj = nn.Linear(config.n_inner, config.n_embd) |
|
|
self.dropout = nn.Dropout(config.dropout) |
|
|
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
|
x = self.c_fc(x) |
|
|
x = F.gelu(x) |
|
|
x = self.c_proj(x) |
|
|
x = self.dropout(x) |
|
|
return x |
|
|
|
|
|
class GroupedQueryAttention(nn.Module): |
|
|
""" |
|
|
Standard Grouped Query Attention |
|
|
|
|
|
""" |
|
|
def __init__(self, config: ChessConfig): |
|
|
super().__init__() |
|
|
|
|
|
assert config.n_embd % config.n_head == 0, \ |
|
|
f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})" |
|
|
|
|
|
self.n_head = config.n_head |
|
|
self.n_embd = config.n_embd |
|
|
self.head_dim = config.n_embd // config.n_head |
|
|
|
|
|
self.num_groups = config.num_groups |
|
|
self.group_size = config.n_head // config.num_groups |
|
|
|
|
|
self.q_proj = nn.Linear(self.n_embd, self.n_head * self.head_dim) |
|
|
self.k_proj = nn.Linear(self.n_embd, self.num_groups * self.head_dim) |
|
|
self.v_proj = nn.Linear(self.n_embd, self.num_groups * self.head_dim) |
|
|
self.out_proj = nn.Linear(self.n_head * self.head_dim, self.n_embd) |
|
|
|
|
|
|
|
|
self.register_buffer( |
|
|
"bias", |
|
|
torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view( |
|
|
1, 1, config.n_ctx, config.n_ctx |
|
|
), |
|
|
persistent=False, |
|
|
) |
|
|
def forward( |
|
|
self, |
|
|
x: torch.Tensor, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
) -> torch.Tensor: |
|
|
batch_size, seq_len, _ = x.size() |
|
|
|
|
|
|
|
|
q = self.q_proj(x).view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2) |
|
|
k = self.k_proj(x).view(batch_size, seq_len, self.num_groups, self.head_dim).transpose(1, 2) |
|
|
v = self.v_proj(x).view(batch_size, seq_len, self.num_groups, self.head_dim).transpose(1, 2) |
|
|
|
|
|
|
|
|
k = k.repeat_interleave(self.group_size, dim=1) |
|
|
v = v.repeat_interleave(self.group_size, dim=1) |
|
|
|
|
|
|
|
|
attn_weights = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(self.head_dim) |
|
|
|
|
|
causal_mask = self.bias[:, :, :seq_len, :seq_len] |
|
|
attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf")) |
|
|
|
|
|
|
|
|
if attention_mask is not None: |
|
|
|
|
|
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) |
|
|
attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf")) |
|
|
|
|
|
attn_weights = F.softmax(attn_weights, dim=-1) |
|
|
attn_weights = F.dropout(attn_weights, p=0.1) |
|
|
|
|
|
|
|
|
context = torch.matmul(attn_weights, v).transpose(1, 2).contiguous() |
|
|
context = context.view(batch_size, seq_len, self.n_embd) |
|
|
|
|
|
return self.out_proj(context) |
|
|
|
|
|
|
|
|
class TransformerBlock(nn.Module): |
|
|
""" |
|
|
A single transformer block with attention and feed-forward layers. |
|
|
|
|
|
Uses pre-normalization (LayerNorm before attention/FFN) for better |
|
|
training stability. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: ChessConfig): |
|
|
super().__init__() |
|
|
|
|
|
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
|
|
if config.attn == "MHA": |
|
|
self.attn = MultiHeadAttention(config) |
|
|
elif config.attn == "GQA": |
|
|
self.attn = GroupedQueryAttention(config) |
|
|
else: |
|
|
raise ValueError(f"config.attn expected either MHA or GQA, got {config.attn}") |
|
|
|
|
|
self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
|
|
self.mlp = FeedForward(config) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
x: torch.Tensor, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
) -> torch.Tensor: |
|
|
|
|
|
x = x + self.attn(self.ln_1(x), attention_mask=attention_mask) |
|
|
|
|
|
x = x + self.mlp(self.ln_2(x)) |
|
|
return x |
|
|
|
|
|
|
|
|
class ChessForCausalLM(PreTrainedModel): |
|
|
""" |
|
|
Chess Transformer for Causal Language Modeling (next-move prediction). |
|
|
|
|
|
This model is designed to predict the next chess move given a sequence |
|
|
of previous moves. It uses a GPT-style architecture with: |
|
|
- Token embeddings for chess moves |
|
|
- Learned positional embeddings |
|
|
- Stacked transformer blocks |
|
|
- Linear head for next-token prediction |
|
|
|
|
|
The model supports weight tying between the embedding layer and the |
|
|
output projection to save parameters. |
|
|
|
|
|
Example: |
|
|
>>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6) |
|
|
>>> model = ChessForCausalLM(config) |
|
|
>>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])} |
|
|
>>> outputs = model(**inputs) |
|
|
>>> next_move_logits = outputs.logits[:, -1, :] |
|
|
""" |
|
|
|
|
|
config_class = ChessConfig |
|
|
base_model_prefix = "transformer" |
|
|
supports_gradient_checkpointing = True |
|
|
|
|
|
keys_to_ignore_on_load_missing = ["lm_head.weight"] |
|
|
|
|
|
def __init__(self, config: ChessConfig): |
|
|
super().__init__(config) |
|
|
|
|
|
|
|
|
self.wte = nn.Embedding(config.vocab_size, config.n_embd) |
|
|
if not config.rot_pos_emb: |
|
|
self.wpe = nn.Embedding(config.n_ctx, config.n_embd) |
|
|
self.rot_pos_emb = config.rot_pos_emb |
|
|
|
|
|
self.drop = nn.Dropout(config.dropout) |
|
|
|
|
|
|
|
|
self.h = nn.ModuleList([ |
|
|
TransformerBlock(config) for _ in range(config.n_layer) |
|
|
]) |
|
|
|
|
|
|
|
|
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
|
|
|
|
|
|
|
|
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
|
|
|
|
|
|
if config.tie_weights: |
|
|
self._tied_weights_keys = ["lm_head.weight"] |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
|
|
|
if config.tie_weights: |
|
|
self.tie_weights() |
|
|
|
|
|
def get_input_embeddings(self) -> nn.Module: |
|
|
return self.wte |
|
|
|
|
|
def set_input_embeddings(self, new_embeddings: nn.Module): |
|
|
self.wte = new_embeddings |
|
|
if getattr(self.config, "tie_weights", False): |
|
|
self.tie_weights() |
|
|
|
|
|
def get_output_embeddings(self) -> nn.Module: |
|
|
return self.lm_head |
|
|
|
|
|
def set_output_embeddings(self, new_embeddings: nn.Module): |
|
|
self.lm_head = new_embeddings |
|
|
|
|
|
def tie_weights(self): |
|
|
|
|
|
if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False): |
|
|
self._tie_or_clone_weights(self.lm_head, self.wte) |
|
|
|
|
|
def _init_weights(self, module: nn.Module): |
|
|
"""Initialize weights following GPT-2 style.""" |
|
|
if isinstance(module, nn.Linear): |
|
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
|
|
if module.bias is not None: |
|
|
torch.nn.init.zeros_(module.bias) |
|
|
elif isinstance(module, nn.Embedding): |
|
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
|
|
elif isinstance(module, nn.LayerNorm): |
|
|
torch.nn.init.ones_(module.weight) |
|
|
torch.nn.init.zeros_(module.bias) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: torch.LongTensor, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
labels: Optional[torch.LongTensor] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
**kwargs, |
|
|
) -> Union[Tuple, CausalLMOutputWithPast]: |
|
|
""" |
|
|
Forward pass of the model. |
|
|
|
|
|
Args: |
|
|
input_ids: Token IDs of shape (batch_size, seq_len). |
|
|
attention_mask: Attention mask of shape (batch_size, seq_len). |
|
|
position_ids: Position IDs of shape (batch_size, seq_len). |
|
|
labels: Labels for language modeling loss. |
|
|
return_dict: Whether to return a ModelOutput object. |
|
|
|
|
|
Returns: |
|
|
CausalLMOutputWithPast containing loss (if labels provided) and logits. |
|
|
""" |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
batch_size, seq_len = input_ids.size() |
|
|
device = input_ids.device |
|
|
|
|
|
|
|
|
if position_ids is None: |
|
|
position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1) |
|
|
|
|
|
|
|
|
token_embeds = self.wte(input_ids) |
|
|
if not self.rot_pos_emb: |
|
|
position_embeds = self.wpe(position_ids) |
|
|
hidden_states = self.drop(token_embeds + position_embeds) |
|
|
else: |
|
|
hidden_states = self.drop(token_embeds) |
|
|
|
|
|
|
|
|
for block in self.h: |
|
|
hidden_states = block(hidden_states, attention_mask=attention_mask) |
|
|
|
|
|
|
|
|
hidden_states = self.ln_f(hidden_states) |
|
|
|
|
|
|
|
|
logits = self.lm_head(hidden_states) |
|
|
|
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
|
|
|
shift_logits = logits[..., :-1, :].contiguous() |
|
|
shift_labels = labels[..., 1:].contiguous() |
|
|
|
|
|
|
|
|
loss_fct = nn.CrossEntropyLoss(ignore_index=-100) |
|
|
|
|
|
loss = loss_fct( |
|
|
shift_logits.view(-1, shift_logits.size(-1)), |
|
|
shift_labels.view(-1), |
|
|
) |
|
|
|
|
|
if not return_dict: |
|
|
output = (logits,) |
|
|
return ((loss,) + output) if loss is not None else output |
|
|
|
|
|
return CausalLMOutputWithPast( |
|
|
loss=loss, |
|
|
logits=logits, |
|
|
past_key_values=None, |
|
|
hidden_states=None, |
|
|
attentions=None, |
|
|
) |
|
|
|
|
|
@torch.no_grad() |
|
|
def generate_move( |
|
|
self, |
|
|
input_ids: torch.LongTensor, |
|
|
temperature: float = 1.0, |
|
|
top_k: Optional[int] = None, |
|
|
top_p: Optional[float] = None, |
|
|
) -> int: |
|
|
""" |
|
|
Generate the next move given a sequence of moves. |
|
|
|
|
|
Args: |
|
|
input_ids: Token IDs of shape (1, seq_len). |
|
|
temperature: Sampling temperature (1.0 = no change). |
|
|
top_k: If set, only sample from top k tokens. |
|
|
top_p: If set, use nucleus sampling with this threshold. |
|
|
|
|
|
Returns: |
|
|
The token ID of the predicted next move. |
|
|
""" |
|
|
self.eval() |
|
|
|
|
|
|
|
|
outputs = self(input_ids) |
|
|
logits = outputs.logits[:, -1, :] / temperature |
|
|
|
|
|
|
|
|
if top_k is not None: |
|
|
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] |
|
|
logits[indices_to_remove] = float("-inf") |
|
|
|
|
|
|
|
|
if top_p is not None: |
|
|
sorted_logits, sorted_indices = torch.sort(logits, descending=True) |
|
|
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) |
|
|
|
|
|
|
|
|
sorted_indices_to_remove = cumulative_probs > top_p |
|
|
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() |
|
|
sorted_indices_to_remove[..., 0] = 0 |
|
|
|
|
|
indices_to_remove = sorted_indices_to_remove.scatter( |
|
|
dim=-1, index=sorted_indices, src=sorted_indices_to_remove |
|
|
) |
|
|
logits[indices_to_remove] = float("-inf") |
|
|
|
|
|
|
|
|
probs = F.softmax(logits, dim=-1) |
|
|
next_token = torch.multinomial(probs, num_samples=1) |
|
|
|
|
|
return next_token.item() |
|
|
|
|
|
class RotaryEmbedding(nn.Module): |
|
|
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): |
|
|
super().__init__() |
|
|
self.dim = dim |
|
|
self.max_position_embeddings = max_position_embeddings |
|
|
self.base = base |
|
|
inv_freq = 1.0 / (self.base ** (torch.arange(0, dim, 2).float().to(device) / dim)) |
|
|
self.register_buffer("inv_freq", inv_freq, persistent=False) |
|
|
|
|
|
|
|
|
self._set_cos_sin_cache( |
|
|
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() |
|
|
) |
|
|
|
|
|
def _set_cos_sin_cache(self, seq_len, device, dtype): |
|
|
self.max_seq_len_cached = seq_len |
|
|
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) |
|
|
|
|
|
freqs = torch.outer(t, self.inv_freq) |
|
|
|
|
|
emb = torch.cat((freqs, freqs), dim=-1) |
|
|
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) |
|
|
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) |
|
|
|
|
|
def forward(self, x, seq_len=None): |
|
|
|
|
|
if seq_len > self.max_seq_len_cached: |
|
|
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) |
|
|
|
|
|
return ( |
|
|
self.cos_cached[:seq_len].to(dtype=x.dtype), |
|
|
self.sin_cached[:seq_len].to(dtype=x.dtype), |
|
|
) |
|
|
|
|
|
def rotate_half(x): |
|
|
"""Rotates half the hidden dims of the input.""" |
|
|
x1 = x[..., : x.shape[-1] // 2] |
|
|
x2 = x[..., x.shape[-1] // 2 :] |
|
|
return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
|
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cos = cos.unsqueeze(0).unsqueeze(0) |
|
|
sin = sin.unsqueeze(0).unsqueeze(0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q_embed = (q * cos) + (rotate_half(q) * sin) |
|
|
k_embed = (k * cos) + (rotate_half(k) * sin) |
|
|
return q_embed, k_embed |
|
|
|
|
|
|
|
|
from transformers import AutoConfig, AutoModelForCausalLM |
|
|
|
|
|
AutoConfig.register("chess_transformer", ChessConfig) |
|
|
AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM) |
|
|
|