Chess Challenge submission by Bnz94
Browse files- README.md +31 -0
- config.json +28 -0
- model.py +484 -0
- model.safetensors +3 -0
- special_tokens_map.json +6 -0
- tokenizer.py +131 -0
- tokenizer_config.json +50 -0
- training_args.bin +3 -0
- vocab.json +91 -0
README.md
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
tags:
|
| 4 |
+
- chess
|
| 5 |
+
- llm-course
|
| 6 |
+
- chess-challenge
|
| 7 |
+
license: mit
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# chess-model-bnz
|
| 11 |
+
|
| 12 |
+
Chess model submitted to the LLM Course Chess Challenge.
|
| 13 |
+
|
| 14 |
+
## Submission Info
|
| 15 |
+
|
| 16 |
+
- **Submitted by**: [Bnz94](https://huggingface.co/Bnz94)
|
| 17 |
+
- **Parameters**: 996,096
|
| 18 |
+
- **Organization**: LLM-course
|
| 19 |
+
|
| 20 |
+
## Usage
|
| 21 |
+
|
| 22 |
+
```python
|
| 23 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 24 |
+
|
| 25 |
+
model = AutoModelForCausalLM.from_pretrained("LLM-course/chess-model-bnz", trust_remote_code=True)
|
| 26 |
+
tokenizer = AutoTokenizer.from_pretrained("LLM-course/chess-model-bnz", trust_remote_code=True)
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
## Evaluation
|
| 30 |
+
|
| 31 |
+
This model is evaluated at the [Chess Challenge Arena](https://huggingface.co/spaces/LLM-course/Chess1MChallenge).
|
config.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"ChessForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"bos_token_id": 1,
|
| 6 |
+
"dropout": 0.1,
|
| 7 |
+
"dtype": "float32",
|
| 8 |
+
"eos_token_id": 2,
|
| 9 |
+
"layer_norm_epsilon": 1e-06,
|
| 10 |
+
"model_type": "chess_transformer",
|
| 11 |
+
"n_ctx": 256,
|
| 12 |
+
"n_embd": 128,
|
| 13 |
+
"n_head": 4,
|
| 14 |
+
"n_inner": 256,
|
| 15 |
+
"n_kv_head": 4,
|
| 16 |
+
"n_layer": 6,
|
| 17 |
+
"pad_token_id": 0,
|
| 18 |
+
"rms_norm_epsilon": 1e-06,
|
| 19 |
+
"rope_theta": 10000.0,
|
| 20 |
+
"tie_weights": true,
|
| 21 |
+
"transformers_version": "4.57.6",
|
| 22 |
+
"use_rope": true,
|
| 23 |
+
"vocab_size": 89,
|
| 24 |
+
"auto_map": {
|
| 25 |
+
"AutoConfig": "model.ChessConfig",
|
| 26 |
+
"AutoModelForCausalLM": "model.ChessForCausalLM"
|
| 27 |
+
}
|
| 28 |
+
}
|
model.py
ADDED
|
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Chess Transformer Model for the Chess Challenge.
|
| 3 |
+
This module provides a modern transformer architecture with:
|
| 4 |
+
- RoPE (Rotary Position Embeddings)
|
| 5 |
+
- SwiGLU activation
|
| 6 |
+
- RMSNorm
|
| 7 |
+
|
| 8 |
+
Designed to fit within the 1M parameter constraint.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import math
|
| 14 |
+
from typing import Optional, Tuple, Union
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
import torch.nn.functional as F
|
| 19 |
+
from transformers import PretrainedConfig, PreTrainedModel
|
| 20 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ChessConfig(PretrainedConfig):
|
| 24 |
+
"""
|
| 25 |
+
Configuration class for the Chess Transformer model.
|
| 26 |
+
|
| 27 |
+
Uses modern architecture choices:
|
| 28 |
+
- RoPE: No learned position embeddings (saves n_ctx * n_embd params)
|
| 29 |
+
- SwiGLU: 3 matrices instead of 2, but more expressive
|
| 30 |
+
- RMSNorm: Simpler and faster than LayerNorm
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
model_type = "chess_transformer"
|
| 34 |
+
|
| 35 |
+
def __init__(
|
| 36 |
+
self,
|
| 37 |
+
vocab_size: int = 1200,
|
| 38 |
+
n_embd: int = 128,
|
| 39 |
+
n_layer: int = 6,
|
| 40 |
+
n_head: int = 4,
|
| 41 |
+
n_kv_head: Optional[int] = None, # For GQA, None = MHA
|
| 42 |
+
n_ctx: int = 256,
|
| 43 |
+
n_inner: Optional[int] = None,
|
| 44 |
+
dropout: float = 0.1,
|
| 45 |
+
rms_norm_epsilon: float = 1e-6,
|
| 46 |
+
tie_weights: bool = True,
|
| 47 |
+
use_rope: bool = True,
|
| 48 |
+
rope_theta: float = 10000.0,
|
| 49 |
+
pad_token_id: int = 0,
|
| 50 |
+
bos_token_id: int = 1,
|
| 51 |
+
eos_token_id: int = 2,
|
| 52 |
+
**kwargs,
|
| 53 |
+
):
|
| 54 |
+
super().__init__(
|
| 55 |
+
pad_token_id=pad_token_id,
|
| 56 |
+
bos_token_id=bos_token_id,
|
| 57 |
+
eos_token_id=eos_token_id,
|
| 58 |
+
**kwargs,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
self.vocab_size = vocab_size
|
| 62 |
+
self.n_embd = n_embd
|
| 63 |
+
self.n_layer = n_layer
|
| 64 |
+
self.n_head = n_head
|
| 65 |
+
self.n_kv_head = n_kv_head if n_kv_head is not None else n_head
|
| 66 |
+
self.n_ctx = n_ctx
|
| 67 |
+
# SwiGLU typically uses 2/3 * 4 * n_embd, rounded to multiple of 64
|
| 68 |
+
self.n_inner = n_inner if n_inner is not None else self._compute_swiglu_dim(n_embd)
|
| 69 |
+
self.dropout = dropout
|
| 70 |
+
self.rms_norm_epsilon = rms_norm_epsilon
|
| 71 |
+
self.tie_weights = tie_weights
|
| 72 |
+
self.tie_word_embeddings = bool(tie_weights)
|
| 73 |
+
self.use_rope = use_rope
|
| 74 |
+
self.rope_theta = rope_theta
|
| 75 |
+
# For compatibility with src/utils.py parameter estimation
|
| 76 |
+
self.layer_norm_epsilon = rms_norm_epsilon
|
| 77 |
+
|
| 78 |
+
@staticmethod
|
| 79 |
+
def _compute_swiglu_dim(n_embd: int) -> int:
|
| 80 |
+
"""Compute SwiGLU hidden dimension (typically 8/3 * n_embd, rounded)."""
|
| 81 |
+
# Standard SwiGLU uses ~2.67x multiplier
|
| 82 |
+
hidden = int(8 * n_embd / 3)
|
| 83 |
+
# Round to multiple of 64 for efficiency (optional)
|
| 84 |
+
return ((hidden + 63) // 64) * 64
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class RMSNorm(nn.Module):
|
| 88 |
+
"""
|
| 89 |
+
Root Mean Square Layer Normalization.
|
| 90 |
+
|
| 91 |
+
Simpler and faster than LayerNorm - no mean centering, no bias.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(self, dim: int, eps: float = 1e-6):
|
| 95 |
+
super().__init__()
|
| 96 |
+
self.eps = eps
|
| 97 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
| 98 |
+
|
| 99 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 100 |
+
# RMSNorm: x * weight / sqrt(mean(x^2) + eps)
|
| 101 |
+
norm = torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
| 102 |
+
return x * norm * self.weight
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class RotaryEmbedding(nn.Module):
|
| 106 |
+
"""
|
| 107 |
+
Rotary Position Embeddings (RoPE).
|
| 108 |
+
|
| 109 |
+
Encodes position information directly into attention computation
|
| 110 |
+
without learnable parameters.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
def __init__(self, dim: int, max_seq_len: int = 512, theta: float = 10000.0):
|
| 114 |
+
super().__init__()
|
| 115 |
+
self.dim = dim
|
| 116 |
+
self.max_seq_len = max_seq_len
|
| 117 |
+
self.theta = theta
|
| 118 |
+
|
| 119 |
+
# Precompute frequencies
|
| 120 |
+
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
|
| 121 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 122 |
+
|
| 123 |
+
# Precompute cos/sin cache
|
| 124 |
+
self._build_cache(max_seq_len)
|
| 125 |
+
|
| 126 |
+
def _build_cache(self, seq_len: int):
|
| 127 |
+
"""Build cos/sin cache for positions."""
|
| 128 |
+
t = torch.arange(seq_len, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
|
| 129 |
+
freqs = torch.outer(t, self.inv_freq)
|
| 130 |
+
# Concatenate to get full dim
|
| 131 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 132 |
+
self.register_buffer("cos_cached", emb.cos(), persistent=False)
|
| 133 |
+
self.register_buffer("sin_cached", emb.sin(), persistent=False)
|
| 134 |
+
|
| 135 |
+
def forward(self, seq_len: int, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 136 |
+
"""Return cos and sin for the given sequence length."""
|
| 137 |
+
if seq_len > self.max_seq_len:
|
| 138 |
+
self._build_cache(seq_len)
|
| 139 |
+
self.max_seq_len = seq_len
|
| 140 |
+
|
| 141 |
+
return (
|
| 142 |
+
self.cos_cached[:seq_len].to(device),
|
| 143 |
+
self.sin_cached[:seq_len].to(device),
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def rotate_half(x: torch.Tensor) -> torch.Tensor:
|
| 148 |
+
"""Rotate half the hidden dims of the input."""
|
| 149 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 150 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 151 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def apply_rotary_pos_emb(
|
| 155 |
+
q: torch.Tensor,
|
| 156 |
+
k: torch.Tensor,
|
| 157 |
+
cos: torch.Tensor,
|
| 158 |
+
sin: torch.Tensor,
|
| 159 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 160 |
+
"""Apply rotary position embeddings to query and key tensors."""
|
| 161 |
+
# q, k: (batch, n_head, seq_len, head_dim)
|
| 162 |
+
# cos, sin: (seq_len, head_dim)
|
| 163 |
+
cos = cos.unsqueeze(0).unsqueeze(0) # (1, 1, seq_len, head_dim)
|
| 164 |
+
sin = sin.unsqueeze(0).unsqueeze(0)
|
| 165 |
+
|
| 166 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 167 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 168 |
+
|
| 169 |
+
return q_embed, k_embed
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class MultiHeadAttention(nn.Module):
|
| 173 |
+
"""
|
| 174 |
+
Multi-head self-attention with RoPE.
|
| 175 |
+
|
| 176 |
+
Supports Grouped Query Attention (GQA) when n_kv_head < n_head.
|
| 177 |
+
"""
|
| 178 |
+
|
| 179 |
+
def __init__(self, config: ChessConfig):
|
| 180 |
+
super().__init__()
|
| 181 |
+
|
| 182 |
+
assert config.n_embd % config.n_head == 0
|
| 183 |
+
|
| 184 |
+
self.n_head = config.n_head
|
| 185 |
+
self.n_kv_head = config.n_kv_head
|
| 186 |
+
self.n_embd = config.n_embd
|
| 187 |
+
self.head_dim = config.n_embd // config.n_head
|
| 188 |
+
self.n_rep = config.n_head // config.n_kv_head # For GQA
|
| 189 |
+
|
| 190 |
+
# Separate Q, K, V projections for clarity with GQA
|
| 191 |
+
self.q_proj = nn.Linear(config.n_embd, config.n_head * self.head_dim, bias=False)
|
| 192 |
+
self.k_proj = nn.Linear(config.n_embd, config.n_kv_head * self.head_dim, bias=False)
|
| 193 |
+
self.v_proj = nn.Linear(config.n_embd, config.n_kv_head * self.head_dim, bias=False)
|
| 194 |
+
self.o_proj = nn.Linear(config.n_head * self.head_dim, config.n_embd, bias=False)
|
| 195 |
+
|
| 196 |
+
self.dropout = nn.Dropout(config.dropout)
|
| 197 |
+
|
| 198 |
+
# RoPE
|
| 199 |
+
self.rotary_emb = RotaryEmbedding(
|
| 200 |
+
self.head_dim,
|
| 201 |
+
max_seq_len=config.n_ctx,
|
| 202 |
+
theta=config.rope_theta,
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
# Causal mask
|
| 206 |
+
self.register_buffer(
|
| 207 |
+
"causal_mask",
|
| 208 |
+
torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
|
| 209 |
+
1, 1, config.n_ctx, config.n_ctx
|
| 210 |
+
),
|
| 211 |
+
persistent=False,
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
def _repeat_kv(self, x: torch.Tensor) -> torch.Tensor:
|
| 215 |
+
"""Repeat KV heads for GQA."""
|
| 216 |
+
if self.n_rep == 1:
|
| 217 |
+
return x
|
| 218 |
+
batch, n_kv_head, seq_len, head_dim = x.shape
|
| 219 |
+
x = x[:, :, None, :, :].expand(batch, n_kv_head, self.n_rep, seq_len, head_dim)
|
| 220 |
+
return x.reshape(batch, n_kv_head * self.n_rep, seq_len, head_dim)
|
| 221 |
+
|
| 222 |
+
def forward(
|
| 223 |
+
self,
|
| 224 |
+
x: torch.Tensor,
|
| 225 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 226 |
+
) -> torch.Tensor:
|
| 227 |
+
batch_size, seq_len, _ = x.size()
|
| 228 |
+
|
| 229 |
+
# Project Q, K, V
|
| 230 |
+
q = self.q_proj(x)
|
| 231 |
+
k = self.k_proj(x)
|
| 232 |
+
v = self.v_proj(x)
|
| 233 |
+
|
| 234 |
+
# Reshape for attention
|
| 235 |
+
q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
|
| 236 |
+
k = k.view(batch_size, seq_len, self.n_kv_head, self.head_dim).transpose(1, 2)
|
| 237 |
+
v = v.view(batch_size, seq_len, self.n_kv_head, self.head_dim).transpose(1, 2)
|
| 238 |
+
|
| 239 |
+
# Apply RoPE
|
| 240 |
+
cos, sin = self.rotary_emb(seq_len, x.device)
|
| 241 |
+
q, k = apply_rotary_pos_emb(q, k, cos, sin)
|
| 242 |
+
|
| 243 |
+
# Repeat KV for GQA
|
| 244 |
+
k = self._repeat_kv(k)
|
| 245 |
+
v = self._repeat_kv(v)
|
| 246 |
+
|
| 247 |
+
# Scaled dot-product attention
|
| 248 |
+
attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
|
| 249 |
+
|
| 250 |
+
# Apply causal mask
|
| 251 |
+
causal_mask = self.causal_mask[:, :, :seq_len, :seq_len]
|
| 252 |
+
attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
|
| 253 |
+
|
| 254 |
+
# Apply padding mask
|
| 255 |
+
if attention_mask is not None:
|
| 256 |
+
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
| 257 |
+
attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
|
| 258 |
+
|
| 259 |
+
attn_weights = F.softmax(attn_weights, dim=-1)
|
| 260 |
+
attn_weights = self.dropout(attn_weights)
|
| 261 |
+
|
| 262 |
+
# Apply attention to values
|
| 263 |
+
attn_output = torch.matmul(attn_weights, v)
|
| 264 |
+
|
| 265 |
+
# Reshape and project output
|
| 266 |
+
attn_output = attn_output.transpose(1, 2).contiguous().view(
|
| 267 |
+
batch_size, seq_len, self.n_embd
|
| 268 |
+
)
|
| 269 |
+
attn_output = self.o_proj(attn_output)
|
| 270 |
+
|
| 271 |
+
return attn_output
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
class SwiGLU(nn.Module):
|
| 275 |
+
"""
|
| 276 |
+
SwiGLU Feed-Forward Network.
|
| 277 |
+
|
| 278 |
+
SwiGLU(x) = (xW1 * SiLU(xW_gate)) @ W2
|
| 279 |
+
|
| 280 |
+
More expressive than standard FFN with similar parameter count.
|
| 281 |
+
"""
|
| 282 |
+
|
| 283 |
+
def __init__(self, config: ChessConfig):
|
| 284 |
+
super().__init__()
|
| 285 |
+
|
| 286 |
+
hidden_dim = config.n_inner
|
| 287 |
+
|
| 288 |
+
# Gate and up projections (can be fused for efficiency)
|
| 289 |
+
self.gate_proj = nn.Linear(config.n_embd, hidden_dim, bias=False)
|
| 290 |
+
self.up_proj = nn.Linear(config.n_embd, hidden_dim, bias=False)
|
| 291 |
+
self.down_proj = nn.Linear(hidden_dim, config.n_embd, bias=False)
|
| 292 |
+
self.dropout = nn.Dropout(config.dropout)
|
| 293 |
+
|
| 294 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 295 |
+
# SwiGLU: SiLU(gate) * up, then down
|
| 296 |
+
gate = F.silu(self.gate_proj(x))
|
| 297 |
+
up = self.up_proj(x)
|
| 298 |
+
x = gate * up
|
| 299 |
+
x = self.down_proj(x)
|
| 300 |
+
x = self.dropout(x)
|
| 301 |
+
return x
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
class TransformerBlock(nn.Module):
|
| 305 |
+
"""
|
| 306 |
+
Transformer block with RMSNorm, RoPE attention, and SwiGLU FFN.
|
| 307 |
+
|
| 308 |
+
Uses pre-normalization for training stability.
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
def __init__(self, config: ChessConfig):
|
| 312 |
+
super().__init__()
|
| 313 |
+
|
| 314 |
+
self.ln_1 = RMSNorm(config.n_embd, eps=config.rms_norm_epsilon)
|
| 315 |
+
self.attn = MultiHeadAttention(config)
|
| 316 |
+
self.ln_2 = RMSNorm(config.n_embd, eps=config.rms_norm_epsilon)
|
| 317 |
+
self.mlp = SwiGLU(config)
|
| 318 |
+
|
| 319 |
+
def forward(
|
| 320 |
+
self,
|
| 321 |
+
x: torch.Tensor,
|
| 322 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 323 |
+
) -> torch.Tensor:
|
| 324 |
+
# Pre-norm attention with residual
|
| 325 |
+
x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
|
| 326 |
+
# Pre-norm FFN with residual
|
| 327 |
+
x = x + self.mlp(self.ln_2(x))
|
| 328 |
+
return x
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class ChessForCausalLM(PreTrainedModel):
|
| 332 |
+
"""
|
| 333 |
+
Chess Transformer for Causal Language Modeling.
|
| 334 |
+
|
| 335 |
+
Modern architecture with RoPE, SwiGLU, and RMSNorm.
|
| 336 |
+
"""
|
| 337 |
+
|
| 338 |
+
config_class = ChessConfig
|
| 339 |
+
base_model_prefix = "transformer"
|
| 340 |
+
supports_gradient_checkpointing = True
|
| 341 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 342 |
+
keys_to_ignore_on_load_missing = ["lm_head.weight"]
|
| 343 |
+
|
| 344 |
+
def __init__(self, config: ChessConfig):
|
| 345 |
+
super().__init__(config)
|
| 346 |
+
|
| 347 |
+
# Token embeddings (no position embeddings - using RoPE)
|
| 348 |
+
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
|
| 349 |
+
|
| 350 |
+
self.drop = nn.Dropout(config.dropout)
|
| 351 |
+
|
| 352 |
+
# Transformer blocks
|
| 353 |
+
self.h = nn.ModuleList([
|
| 354 |
+
TransformerBlock(config) for _ in range(config.n_layer)
|
| 355 |
+
])
|
| 356 |
+
|
| 357 |
+
# Final RMSNorm
|
| 358 |
+
self.ln_f = RMSNorm(config.n_embd, eps=config.rms_norm_epsilon)
|
| 359 |
+
|
| 360 |
+
# Output head
|
| 361 |
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
| 362 |
+
|
| 363 |
+
# Initialize weights
|
| 364 |
+
self.post_init()
|
| 365 |
+
|
| 366 |
+
# Tie weights if configured
|
| 367 |
+
if config.tie_weights:
|
| 368 |
+
self.tie_weights()
|
| 369 |
+
|
| 370 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 371 |
+
return self.wte
|
| 372 |
+
|
| 373 |
+
def set_input_embeddings(self, new_embeddings: nn.Module):
|
| 374 |
+
self.wte = new_embeddings
|
| 375 |
+
if getattr(self.config, "tie_weights", False):
|
| 376 |
+
self.tie_weights()
|
| 377 |
+
|
| 378 |
+
def get_output_embeddings(self) -> nn.Module:
|
| 379 |
+
return self.lm_head
|
| 380 |
+
|
| 381 |
+
def set_output_embeddings(self, new_embeddings: nn.Module):
|
| 382 |
+
self.lm_head = new_embeddings
|
| 383 |
+
|
| 384 |
+
def tie_weights(self):
|
| 385 |
+
if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
|
| 386 |
+
self._tie_or_clone_weights(self.lm_head, self.wte)
|
| 387 |
+
|
| 388 |
+
def _init_weights(self, module: nn.Module):
|
| 389 |
+
"""Initialize weights."""
|
| 390 |
+
std = 0.02
|
| 391 |
+
if isinstance(module, nn.Linear):
|
| 392 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=std)
|
| 393 |
+
if module.bias is not None:
|
| 394 |
+
torch.nn.init.zeros_(module.bias)
|
| 395 |
+
elif isinstance(module, nn.Embedding):
|
| 396 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=std)
|
| 397 |
+
elif isinstance(module, RMSNorm):
|
| 398 |
+
torch.nn.init.ones_(module.weight)
|
| 399 |
+
|
| 400 |
+
def forward(
|
| 401 |
+
self,
|
| 402 |
+
input_ids: torch.LongTensor,
|
| 403 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 404 |
+
labels: Optional[torch.LongTensor] = None,
|
| 405 |
+
return_dict: Optional[bool] = None,
|
| 406 |
+
**kwargs,
|
| 407 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 408 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 409 |
+
|
| 410 |
+
# Get token embeddings (no position embeddings - RoPE handles position)
|
| 411 |
+
hidden_states = self.wte(input_ids)
|
| 412 |
+
hidden_states = self.drop(hidden_states)
|
| 413 |
+
|
| 414 |
+
# Pass through transformer blocks
|
| 415 |
+
for block in self.h:
|
| 416 |
+
hidden_states = block(hidden_states, attention_mask=attention_mask)
|
| 417 |
+
|
| 418 |
+
# Final norm and head
|
| 419 |
+
hidden_states = self.ln_f(hidden_states)
|
| 420 |
+
logits = self.lm_head(hidden_states)
|
| 421 |
+
|
| 422 |
+
# Compute loss if labels provided
|
| 423 |
+
loss = None
|
| 424 |
+
if labels is not None:
|
| 425 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 426 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 427 |
+
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
|
| 428 |
+
loss = loss_fct(
|
| 429 |
+
shift_logits.view(-1, shift_logits.size(-1)),
|
| 430 |
+
shift_labels.view(-1),
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
if not return_dict:
|
| 434 |
+
output = (logits,)
|
| 435 |
+
return ((loss,) + output) if loss is not None else output
|
| 436 |
+
|
| 437 |
+
return CausalLMOutputWithPast(
|
| 438 |
+
loss=loss,
|
| 439 |
+
logits=logits,
|
| 440 |
+
past_key_values=None,
|
| 441 |
+
hidden_states=None,
|
| 442 |
+
attentions=None,
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
@torch.no_grad()
|
| 446 |
+
def generate_move(
|
| 447 |
+
self,
|
| 448 |
+
input_ids: torch.LongTensor,
|
| 449 |
+
temperature: float = 1.0,
|
| 450 |
+
top_k: Optional[int] = None,
|
| 451 |
+
top_p: Optional[float] = None,
|
| 452 |
+
) -> int:
|
| 453 |
+
"""Generate the next move token."""
|
| 454 |
+
self.eval()
|
| 455 |
+
|
| 456 |
+
outputs = self(input_ids)
|
| 457 |
+
logits = outputs.logits[:, -1, :] / temperature
|
| 458 |
+
|
| 459 |
+
if top_k is not None:
|
| 460 |
+
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
| 461 |
+
logits[indices_to_remove] = float("-inf")
|
| 462 |
+
|
| 463 |
+
if top_p is not None:
|
| 464 |
+
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
| 465 |
+
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
| 466 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
| 467 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
| 468 |
+
sorted_indices_to_remove[..., 0] = 0
|
| 469 |
+
indices_to_remove = sorted_indices_to_remove.scatter(
|
| 470 |
+
dim=-1, index=sorted_indices, src=sorted_indices_to_remove
|
| 471 |
+
)
|
| 472 |
+
logits[indices_to_remove] = float("-inf")
|
| 473 |
+
|
| 474 |
+
probs = F.softmax(logits, dim=-1)
|
| 475 |
+
next_token = torch.multinomial(probs, num_samples=1)
|
| 476 |
+
|
| 477 |
+
return next_token.item()
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
# Register with Auto classes
|
| 481 |
+
from transformers import AutoConfig, AutoModelForCausalLM
|
| 482 |
+
|
| 483 |
+
AutoConfig.register("chess_transformer", ChessConfig)
|
| 484 |
+
AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:72beeff5f878a0adafff7eed5ce9f82349cdc8098d9d97a2d89a4d2221b97fa7
|
| 3 |
+
size 3989408
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "[BOS]",
|
| 3 |
+
"eos_token": "[EOS]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"unk_token": "[UNK]"
|
| 6 |
+
}
|
tokenizer.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json, os, re
|
| 4 |
+
from typing import Dict, List, Optional
|
| 5 |
+
from transformers import PreTrainedTokenizer
|
| 6 |
+
|
| 7 |
+
# UCI étendu: WPe2e4, BNg8f6(x+*), promotions "=Q", roque "(o)/(O)"
|
| 8 |
+
_MOVE_RE = re.compile(r"^(?P<side>[WB])(?P<piece>[PNBRQK])(?P<src>[a-h][1-8])(?P<dst>[a-h][1-8])(?P<suffix>.*)$")
|
| 9 |
+
_PROMO_RE = re.compile(r"=([QRBNqrbn])")
|
| 10 |
+
|
| 11 |
+
def _parse_suffix(suffix: str):
|
| 12 |
+
s = (suffix or "").strip()
|
| 13 |
+
is_capture = "x" in s
|
| 14 |
+
is_check = "+" in s
|
| 15 |
+
is_mate = "*" in s
|
| 16 |
+
castle = "O-O-O" if "(O)" in s else ("O-O" if "(o)" in s else None)
|
| 17 |
+
promo = None
|
| 18 |
+
m = _PROMO_RE.search(s)
|
| 19 |
+
if m:
|
| 20 |
+
promo = m.group(1).lower()
|
| 21 |
+
return is_capture, is_check, is_mate, castle, promo
|
| 22 |
+
|
| 23 |
+
class ChessTokenizer(PreTrainedTokenizer):
|
| 24 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 25 |
+
vocab_files_names = {"vocab_file": "vocab.json"}
|
| 26 |
+
|
| 27 |
+
PAD_TOKEN = "[PAD]"
|
| 28 |
+
BOS_TOKEN = "[BOS]"
|
| 29 |
+
EOS_TOKEN = "[EOS]"
|
| 30 |
+
UNK_TOKEN = "[UNK]"
|
| 31 |
+
|
| 32 |
+
def __init__(self, vocab_file: Optional[str] = None, vocab: Optional[Dict[str, int]] = None, **kwargs):
|
| 33 |
+
self._pad_token = self.PAD_TOKEN
|
| 34 |
+
self._bos_token = self.BOS_TOKEN
|
| 35 |
+
self._eos_token = self.EOS_TOKEN
|
| 36 |
+
self._unk_token = self.UNK_TOKEN
|
| 37 |
+
|
| 38 |
+
kwargs.pop("pad_token", None)
|
| 39 |
+
kwargs.pop("bos_token", None)
|
| 40 |
+
kwargs.pop("eos_token", None)
|
| 41 |
+
kwargs.pop("unk_token", None)
|
| 42 |
+
|
| 43 |
+
if vocab is not None:
|
| 44 |
+
self._vocab = vocab
|
| 45 |
+
elif vocab_file and os.path.exists(vocab_file):
|
| 46 |
+
with open(vocab_file, "r", encoding="utf-8") as f:
|
| 47 |
+
self._vocab = json.load(f)
|
| 48 |
+
else:
|
| 49 |
+
self._vocab = self._create_default_vocab()
|
| 50 |
+
|
| 51 |
+
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
|
| 52 |
+
|
| 53 |
+
super().__init__(
|
| 54 |
+
pad_token=self._pad_token,
|
| 55 |
+
bos_token=self._bos_token,
|
| 56 |
+
eos_token=self._eos_token,
|
| 57 |
+
unk_token=self._unk_token,
|
| 58 |
+
**kwargs,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def vocab_size(self) -> int:
|
| 63 |
+
return len(self._vocab)
|
| 64 |
+
|
| 65 |
+
def get_vocab(self) -> Dict[str, int]:
|
| 66 |
+
return dict(self._vocab)
|
| 67 |
+
|
| 68 |
+
def _convert_token_to_id(self, token: str) -> int:
|
| 69 |
+
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
|
| 70 |
+
|
| 71 |
+
def _convert_id_to_token(self, index: int) -> str:
|
| 72 |
+
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
|
| 73 |
+
|
| 74 |
+
def _create_default_vocab(self) -> Dict[str, int]:
|
| 75 |
+
tokens: List[str] = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
|
| 76 |
+
# Side+piece tokens (12)
|
| 77 |
+
tokens += [f"[W{p}]" for p in "PNBRQK"]
|
| 78 |
+
tokens += [f"[B{p}]" for p in "PNBRQK"]
|
| 79 |
+
# 64 squares
|
| 80 |
+
tokens += [f"[{f}{r}]" for f in "abcdefgh" for r in "12345678"]
|
| 81 |
+
# Flags / castles / promotions
|
| 82 |
+
tokens += ["[x]", "[+]", "[#]", "[O-O]", "[O-O-O]"]
|
| 83 |
+
tokens += [f"[={p}]" for p in "qrbn"]
|
| 84 |
+
return {tok: i for i, tok in enumerate(tokens)}
|
| 85 |
+
|
| 86 |
+
def _tokenize(self, text: str) -> List[str]:
|
| 87 |
+
out: List[str] = []
|
| 88 |
+
for move in (text or "").strip().split():
|
| 89 |
+
# Raw UCI like e2e4 / e7e8q (no side/piece available)
|
| 90 |
+
if re.fullmatch(r"[a-h][1-8][a-h][1-8][qrbn]?", move):
|
| 91 |
+
src, dst = move[:2], move[2:4]
|
| 92 |
+
out += [f"[{src}]", f"[{dst}]"]
|
| 93 |
+
if len(move) == 5:
|
| 94 |
+
out += [f"[={move[4]}]"]
|
| 95 |
+
continue
|
| 96 |
+
|
| 97 |
+
m = _MOVE_RE.match(move)
|
| 98 |
+
if not m:
|
| 99 |
+
out.append(self.UNK_TOKEN)
|
| 100 |
+
continue
|
| 101 |
+
|
| 102 |
+
side = m.group("side") # "W" or "B"
|
| 103 |
+
piece = m.group("piece") # P/N/B/R/Q/K
|
| 104 |
+
src = f"[{m.group('src')}]"
|
| 105 |
+
dst = f"[{m.group('dst')}]"
|
| 106 |
+
is_cap, is_chk, is_mate, castle, promo = _parse_suffix(m.group("suffix") or "")
|
| 107 |
+
|
| 108 |
+
out += [f"[{side}{piece}]", src, dst]
|
| 109 |
+
if castle:
|
| 110 |
+
out.append(f"[{castle}]")
|
| 111 |
+
if is_cap:
|
| 112 |
+
out.append("[x]")
|
| 113 |
+
if is_mate:
|
| 114 |
+
out.append("[#]")
|
| 115 |
+
elif is_chk:
|
| 116 |
+
out.append("[+]")
|
| 117 |
+
if promo:
|
| 118 |
+
out.append(f"[={promo}]")
|
| 119 |
+
return out
|
| 120 |
+
|
| 121 |
+
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
| 122 |
+
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
|
| 123 |
+
return " ".join(t for t in tokens if t not in special)
|
| 124 |
+
|
| 125 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
|
| 126 |
+
if not os.path.isdir(save_directory):
|
| 127 |
+
os.makedirs(save_directory, exist_ok=True)
|
| 128 |
+
vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json")
|
| 129 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 130 |
+
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
|
| 131 |
+
return (vocab_file,)
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "[BOS]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"2": {
|
| 20 |
+
"content": "[EOS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"3": {
|
| 28 |
+
"content": "[UNK]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
}
|
| 35 |
+
},
|
| 36 |
+
"bos_token": "[BOS]",
|
| 37 |
+
"clean_up_tokenization_spaces": false,
|
| 38 |
+
"eos_token": "[EOS]",
|
| 39 |
+
"extra_special_tokens": {},
|
| 40 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 41 |
+
"pad_token": "[PAD]",
|
| 42 |
+
"tokenizer_class": "ChessTokenizer",
|
| 43 |
+
"unk_token": "[UNK]",
|
| 44 |
+
"auto_map": {
|
| 45 |
+
"AutoTokenizer": [
|
| 46 |
+
"tokenizer.ChessTokenizer",
|
| 47 |
+
null
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f79be9a6b9ab0e70e9325e38d9ad07047c8c571ed7101f4bfd4b65b7cc784561
|
| 3 |
+
size 5841
|
vocab.json
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"[PAD]": 0,
|
| 3 |
+
"[BOS]": 1,
|
| 4 |
+
"[EOS]": 2,
|
| 5 |
+
"[UNK]": 3,
|
| 6 |
+
"[WP]": 4,
|
| 7 |
+
"[WN]": 5,
|
| 8 |
+
"[WB]": 6,
|
| 9 |
+
"[WR]": 7,
|
| 10 |
+
"[WQ]": 8,
|
| 11 |
+
"[WK]": 9,
|
| 12 |
+
"[BP]": 10,
|
| 13 |
+
"[BN]": 11,
|
| 14 |
+
"[BB]": 12,
|
| 15 |
+
"[BR]": 13,
|
| 16 |
+
"[BQ]": 14,
|
| 17 |
+
"[BK]": 15,
|
| 18 |
+
"[a1]": 16,
|
| 19 |
+
"[a2]": 17,
|
| 20 |
+
"[a3]": 18,
|
| 21 |
+
"[a4]": 19,
|
| 22 |
+
"[a5]": 20,
|
| 23 |
+
"[a6]": 21,
|
| 24 |
+
"[a7]": 22,
|
| 25 |
+
"[a8]": 23,
|
| 26 |
+
"[b1]": 24,
|
| 27 |
+
"[b2]": 25,
|
| 28 |
+
"[b3]": 26,
|
| 29 |
+
"[b4]": 27,
|
| 30 |
+
"[b5]": 28,
|
| 31 |
+
"[b6]": 29,
|
| 32 |
+
"[b7]": 30,
|
| 33 |
+
"[b8]": 31,
|
| 34 |
+
"[c1]": 32,
|
| 35 |
+
"[c2]": 33,
|
| 36 |
+
"[c3]": 34,
|
| 37 |
+
"[c4]": 35,
|
| 38 |
+
"[c5]": 36,
|
| 39 |
+
"[c6]": 37,
|
| 40 |
+
"[c7]": 38,
|
| 41 |
+
"[c8]": 39,
|
| 42 |
+
"[d1]": 40,
|
| 43 |
+
"[d2]": 41,
|
| 44 |
+
"[d3]": 42,
|
| 45 |
+
"[d4]": 43,
|
| 46 |
+
"[d5]": 44,
|
| 47 |
+
"[d6]": 45,
|
| 48 |
+
"[d7]": 46,
|
| 49 |
+
"[d8]": 47,
|
| 50 |
+
"[e1]": 48,
|
| 51 |
+
"[e2]": 49,
|
| 52 |
+
"[e3]": 50,
|
| 53 |
+
"[e4]": 51,
|
| 54 |
+
"[e5]": 52,
|
| 55 |
+
"[e6]": 53,
|
| 56 |
+
"[e7]": 54,
|
| 57 |
+
"[e8]": 55,
|
| 58 |
+
"[f1]": 56,
|
| 59 |
+
"[f2]": 57,
|
| 60 |
+
"[f3]": 58,
|
| 61 |
+
"[f4]": 59,
|
| 62 |
+
"[f5]": 60,
|
| 63 |
+
"[f6]": 61,
|
| 64 |
+
"[f7]": 62,
|
| 65 |
+
"[f8]": 63,
|
| 66 |
+
"[g1]": 64,
|
| 67 |
+
"[g2]": 65,
|
| 68 |
+
"[g3]": 66,
|
| 69 |
+
"[g4]": 67,
|
| 70 |
+
"[g5]": 68,
|
| 71 |
+
"[g6]": 69,
|
| 72 |
+
"[g7]": 70,
|
| 73 |
+
"[g8]": 71,
|
| 74 |
+
"[h1]": 72,
|
| 75 |
+
"[h2]": 73,
|
| 76 |
+
"[h3]": 74,
|
| 77 |
+
"[h4]": 75,
|
| 78 |
+
"[h5]": 76,
|
| 79 |
+
"[h6]": 77,
|
| 80 |
+
"[h7]": 78,
|
| 81 |
+
"[h8]": 79,
|
| 82 |
+
"[x]": 80,
|
| 83 |
+
"[+]": 81,
|
| 84 |
+
"[#]": 82,
|
| 85 |
+
"[O-O]": 83,
|
| 86 |
+
"[O-O-O]": 84,
|
| 87 |
+
"[=q]": 85,
|
| 88 |
+
"[=r]": 86,
|
| 89 |
+
"[=b]": 87,
|
| 90 |
+
"[=n]": 88
|
| 91 |
+
}
|