Chess Challenge submission by stephecw
Browse files- README.md +2 -2
- config.json +2 -2
- model.safetensors +2 -2
- src/tokenizer.py +84 -18
- tokenizer.py +84 -18
- vocab.json +56 -56
README.md
CHANGED
|
@@ -14,7 +14,7 @@ Chess model submitted to the LLM Course Chess Challenge.
|
|
| 14 |
## Submission Info
|
| 15 |
|
| 16 |
- **Submitted by**: [stephecw](https://huggingface.co/stephecw)
|
| 17 |
-
- **Parameters**:
|
| 18 |
- **Organization**: LLM-course
|
| 19 |
|
| 20 |
## Model Details
|
|
@@ -23,4 +23,4 @@ Chess model submitted to the LLM Course Chess Challenge.
|
|
| 23 |
- **Vocab size**: 72
|
| 24 |
- **Embedding dim**: 128
|
| 25 |
- **Layers**: 6
|
| 26 |
-
- **Heads**:
|
|
|
|
| 14 |
## Submission Info
|
| 15 |
|
| 16 |
- **Submitted by**: [stephecw](https://huggingface.co/stephecw)
|
| 17 |
+
- **Parameters**: 997,488
|
| 18 |
- **Organization**: LLM-course
|
| 19 |
|
| 20 |
## Model Details
|
|
|
|
| 23 |
- **Vocab size**: 72
|
| 24 |
- **Embedding dim**: 128
|
| 25 |
- **Layers**: 6
|
| 26 |
+
- **Heads**: 8
|
config.json
CHANGED
|
@@ -10,8 +10,8 @@
|
|
| 10 |
"model_type": "chess_transformer",
|
| 11 |
"n_ctx": 256,
|
| 12 |
"n_embd": 128,
|
| 13 |
-
"n_head":
|
| 14 |
-
"n_inner":
|
| 15 |
"n_layer": 6,
|
| 16 |
"pad_token_id": 0,
|
| 17 |
"tie_weights": true,
|
|
|
|
| 10 |
"model_type": "chess_transformer",
|
| 11 |
"n_ctx": 256,
|
| 12 |
"n_embd": 128,
|
| 13 |
+
"n_head": 8,
|
| 14 |
+
"n_inner": 360,
|
| 15 |
"n_layer": 6,
|
| 16 |
"pad_token_id": 0,
|
| 17 |
"tie_weights": true,
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a686c5ae98a7ec29e009b43931e1e936feabe771ce50cc2ed5bb5db36196e10e
|
| 3 |
+
size 3996400
|
src/tokenizer.py
CHANGED
|
@@ -10,14 +10,23 @@ The dataset format uses:
|
|
| 10 |
- Source and destination squares (e.g., e2e4)
|
| 11 |
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
|
| 12 |
"""
|
|
|
|
| 13 |
from __future__ import annotations
|
| 14 |
|
| 15 |
import json
|
| 16 |
import os
|
|
|
|
| 17 |
from typing import Dict, List, Optional
|
| 18 |
|
| 19 |
from transformers import PreTrainedTokenizer
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
class ChessTokenizer(PreTrainedTokenizer):
|
| 23 |
vocab_files_names = {"vocab_file": "vocab.json"}
|
|
@@ -34,19 +43,16 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 34 |
vocab: Optional[Dict[str, int]] = None,
|
| 35 |
**kwargs,
|
| 36 |
):
|
| 37 |
-
# Define special tokens
|
| 38 |
self._pad_token = self.PAD_TOKEN
|
| 39 |
self._bos_token = self.BOS_TOKEN
|
| 40 |
self._eos_token = self.EOS_TOKEN
|
| 41 |
self._unk_token = self.UNK_TOKEN
|
| 42 |
|
| 43 |
-
# Avoid duplicates when loading from disk
|
| 44 |
kwargs.pop("pad_token", None)
|
| 45 |
kwargs.pop("bos_token", None)
|
| 46 |
kwargs.pop("eos_token", None)
|
| 47 |
kwargs.pop("unk_token", None)
|
| 48 |
|
| 49 |
-
# Load vocab or create fixed vocab
|
| 50 |
if vocab is not None:
|
| 51 |
self._vocab = vocab
|
| 52 |
elif vocab_file is not None and os.path.exists(vocab_file):
|
|
@@ -67,13 +73,9 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 67 |
|
| 68 |
def _create_fixed_vocab(self) -> Dict[str, int]:
|
| 69 |
specials = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
squares = [f"{file}{rank}" for rank in "12345678" for file in "abcdefgh"]
|
| 73 |
-
|
| 74 |
-
# Optional promotion tokens (evaluator can detect q/r/b/n after the 2nd square)
|
| 75 |
promos = ["q", "r", "b", "n"]
|
| 76 |
-
|
| 77 |
tokens = specials + squares + promos
|
| 78 |
return {tok: i for i, tok in enumerate(tokens)}
|
| 79 |
|
|
@@ -84,8 +86,56 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 84 |
def get_vocab(self) -> Dict[str, int]:
|
| 85 |
return dict(self._vocab)
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
def _tokenize(self, text: str) -> List[str]:
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
def _convert_token_to_id(self, token: str) -> int:
|
| 91 |
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
|
|
@@ -94,14 +144,29 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 94 |
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
|
| 95 |
|
| 96 |
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
|
|
|
|
|
|
|
|
|
| 97 |
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
os.makedirs(save_directory, exist_ok=True)
|
| 106 |
vocab_file = os.path.join(
|
| 107 |
save_directory,
|
|
@@ -110,6 +175,7 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 110 |
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 111 |
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
|
| 112 |
return (vocab_file,)
|
| 113 |
-
|
|
|
|
| 114 |
from transformers import AutoTokenizer
|
| 115 |
ChessTokenizer.register_for_auto_class("AutoTokenizer")
|
|
|
|
| 10 |
- Source and destination squares (e.g., e2e4)
|
| 11 |
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
|
| 12 |
"""
|
| 13 |
+
|
| 14 |
from __future__ import annotations
|
| 15 |
|
| 16 |
import json
|
| 17 |
import os
|
| 18 |
+
import re
|
| 19 |
from typing import Dict, List, Optional
|
| 20 |
|
| 21 |
from transformers import PreTrainedTokenizer
|
| 22 |
|
| 23 |
+
SQUARE_RE = re.compile(r"[a-h][1-8]")
|
| 24 |
+
UCI_PROMO_RE = re.compile(r"^[a-h][1-8][a-h][1-8]([qrbn])$", re.IGNORECASE)
|
| 25 |
+
EQ_PROMO_RE = re.compile(r"=([QRBNqrbn])")
|
| 26 |
+
PAREN_PROMO_RE = re.compile(r"\(([QRBNqrbn])\)")
|
| 27 |
+
|
| 28 |
+
PROMOS = {"q", "r", "b", "n"}
|
| 29 |
+
|
| 30 |
|
| 31 |
class ChessTokenizer(PreTrainedTokenizer):
|
| 32 |
vocab_files_names = {"vocab_file": "vocab.json"}
|
|
|
|
| 43 |
vocab: Optional[Dict[str, int]] = None,
|
| 44 |
**kwargs,
|
| 45 |
):
|
|
|
|
| 46 |
self._pad_token = self.PAD_TOKEN
|
| 47 |
self._bos_token = self.BOS_TOKEN
|
| 48 |
self._eos_token = self.EOS_TOKEN
|
| 49 |
self._unk_token = self.UNK_TOKEN
|
| 50 |
|
|
|
|
| 51 |
kwargs.pop("pad_token", None)
|
| 52 |
kwargs.pop("bos_token", None)
|
| 53 |
kwargs.pop("eos_token", None)
|
| 54 |
kwargs.pop("unk_token", None)
|
| 55 |
|
|
|
|
| 56 |
if vocab is not None:
|
| 57 |
self._vocab = vocab
|
| 58 |
elif vocab_file is not None and os.path.exists(vocab_file):
|
|
|
|
| 73 |
|
| 74 |
def _create_fixed_vocab(self) -> Dict[str, int]:
|
| 75 |
specials = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
|
| 76 |
+
# IMPORTANT: deterministic ids matching a1,a2,...,a8,b1,... style
|
| 77 |
+
squares = [f"{f}{r}" for f in "abcdefgh" for r in "12345678"]
|
|
|
|
|
|
|
|
|
|
| 78 |
promos = ["q", "r", "b", "n"]
|
|
|
|
| 79 |
tokens = specials + squares + promos
|
| 80 |
return {tok: i for i, tok in enumerate(tokens)}
|
| 81 |
|
|
|
|
| 86 |
def get_vocab(self) -> Dict[str, int]:
|
| 87 |
return dict(self._vocab)
|
| 88 |
|
| 89 |
+
def _extract_promo_anywhere(self, mv: str) -> Optional[str]:
|
| 90 |
+
m = EQ_PROMO_RE.search(mv)
|
| 91 |
+
if m:
|
| 92 |
+
return m.group(1).lower()
|
| 93 |
+
m = PAREN_PROMO_RE.search(mv)
|
| 94 |
+
if m:
|
| 95 |
+
return m.group(1).lower()
|
| 96 |
+
m = UCI_PROMO_RE.match(mv)
|
| 97 |
+
if m:
|
| 98 |
+
return m.group(1).lower()
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
def _tokenize(self, text: str) -> List[str]:
|
| 102 |
+
"""
|
| 103 |
+
Robust tokenization:
|
| 104 |
+
- keeps special tokens ([BOS], etc.) as-is (HF handles them)
|
| 105 |
+
- accepts already-split squares: "e2 e4"
|
| 106 |
+
- accepts uci concat: "e2e4" -> e2,e4 (+promo)
|
| 107 |
+
- accepts verbose tokens containing squares: "WPe2e4(x+)" -> e2,e4 (+promo)
|
| 108 |
+
"""
|
| 109 |
+
tokens: List[str] = []
|
| 110 |
+
|
| 111 |
+
for chunk in text.strip().split():
|
| 112 |
+
# already-split square?
|
| 113 |
+
if re.fullmatch(r"[a-h][1-8]", chunk):
|
| 114 |
+
tokens.append(chunk)
|
| 115 |
+
continue
|
| 116 |
+
|
| 117 |
+
# promo alone?
|
| 118 |
+
if chunk in PROMOS:
|
| 119 |
+
tokens.append(chunk)
|
| 120 |
+
continue
|
| 121 |
+
|
| 122 |
+
# otherwise: extract squares from inside
|
| 123 |
+
squares = SQUARE_RE.findall(chunk)
|
| 124 |
+
if len(squares) >= 2:
|
| 125 |
+
tokens.append(squares[0])
|
| 126 |
+
tokens.append(squares[1])
|
| 127 |
+
|
| 128 |
+
promo = self._extract_promo_anywhere(chunk)
|
| 129 |
+
if promo in PROMOS:
|
| 130 |
+
tokens.append(promo)
|
| 131 |
+
else:
|
| 132 |
+
# allow special tokens to pass through if present
|
| 133 |
+
if chunk in {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}:
|
| 134 |
+
tokens.append(chunk)
|
| 135 |
+
else:
|
| 136 |
+
tokens.append(self.UNK_TOKEN)
|
| 137 |
+
|
| 138 |
+
return tokens
|
| 139 |
|
| 140 |
def _convert_token_to_id(self, token: str) -> int:
|
| 141 |
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
|
|
|
|
| 144 |
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
|
| 145 |
|
| 146 |
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
| 147 |
+
"""
|
| 148 |
+
Reconstruct "e2e4 e7e8q ..."
|
| 149 |
+
"""
|
| 150 |
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
|
| 151 |
+
clean = [t for t in tokens if t not in special]
|
| 152 |
+
|
| 153 |
+
moves: List[str] = []
|
| 154 |
+
i = 0
|
| 155 |
+
while i < len(clean):
|
| 156 |
+
if re.fullmatch(r"[a-h][1-8]", clean[i]) and i + 1 < len(clean) and re.fullmatch(r"[a-h][1-8]", clean[i + 1]):
|
| 157 |
+
mv = clean[i] + clean[i + 1]
|
| 158 |
+
i += 2
|
| 159 |
+
if i < len(clean) and clean[i] in PROMOS:
|
| 160 |
+
mv += clean[i]
|
| 161 |
+
i += 1
|
| 162 |
+
moves.append(mv)
|
| 163 |
+
else:
|
| 164 |
+
moves.append(clean[i])
|
| 165 |
+
i += 1
|
| 166 |
+
|
| 167 |
+
return " ".join(moves)
|
| 168 |
+
|
| 169 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
|
| 170 |
os.makedirs(save_directory, exist_ok=True)
|
| 171 |
vocab_file = os.path.join(
|
| 172 |
save_directory,
|
|
|
|
| 175 |
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 176 |
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
|
| 177 |
return (vocab_file,)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
from transformers import AutoTokenizer
|
| 181 |
ChessTokenizer.register_for_auto_class("AutoTokenizer")
|
tokenizer.py
CHANGED
|
@@ -10,14 +10,23 @@ The dataset format uses:
|
|
| 10 |
- Source and destination squares (e.g., e2e4)
|
| 11 |
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
|
| 12 |
"""
|
|
|
|
| 13 |
from __future__ import annotations
|
| 14 |
|
| 15 |
import json
|
| 16 |
import os
|
|
|
|
| 17 |
from typing import Dict, List, Optional
|
| 18 |
|
| 19 |
from transformers import PreTrainedTokenizer
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
class ChessTokenizer(PreTrainedTokenizer):
|
| 23 |
vocab_files_names = {"vocab_file": "vocab.json"}
|
|
@@ -34,19 +43,16 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 34 |
vocab: Optional[Dict[str, int]] = None,
|
| 35 |
**kwargs,
|
| 36 |
):
|
| 37 |
-
# Define special tokens
|
| 38 |
self._pad_token = self.PAD_TOKEN
|
| 39 |
self._bos_token = self.BOS_TOKEN
|
| 40 |
self._eos_token = self.EOS_TOKEN
|
| 41 |
self._unk_token = self.UNK_TOKEN
|
| 42 |
|
| 43 |
-
# Avoid duplicates when loading from disk
|
| 44 |
kwargs.pop("pad_token", None)
|
| 45 |
kwargs.pop("bos_token", None)
|
| 46 |
kwargs.pop("eos_token", None)
|
| 47 |
kwargs.pop("unk_token", None)
|
| 48 |
|
| 49 |
-
# Load vocab or create fixed vocab
|
| 50 |
if vocab is not None:
|
| 51 |
self._vocab = vocab
|
| 52 |
elif vocab_file is not None and os.path.exists(vocab_file):
|
|
@@ -67,13 +73,9 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 67 |
|
| 68 |
def _create_fixed_vocab(self) -> Dict[str, int]:
|
| 69 |
specials = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
squares = [f"{file}{rank}" for rank in "12345678" for file in "abcdefgh"]
|
| 73 |
-
|
| 74 |
-
# Optional promotion tokens (evaluator can detect q/r/b/n after the 2nd square)
|
| 75 |
promos = ["q", "r", "b", "n"]
|
| 76 |
-
|
| 77 |
tokens = specials + squares + promos
|
| 78 |
return {tok: i for i, tok in enumerate(tokens)}
|
| 79 |
|
|
@@ -84,8 +86,56 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 84 |
def get_vocab(self) -> Dict[str, int]:
|
| 85 |
return dict(self._vocab)
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
def _tokenize(self, text: str) -> List[str]:
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
def _convert_token_to_id(self, token: str) -> int:
|
| 91 |
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
|
|
@@ -94,14 +144,29 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 94 |
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
|
| 95 |
|
| 96 |
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
|
|
|
|
|
|
|
|
|
| 97 |
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
os.makedirs(save_directory, exist_ok=True)
|
| 106 |
vocab_file = os.path.join(
|
| 107 |
save_directory,
|
|
@@ -110,6 +175,7 @@ class ChessTokenizer(PreTrainedTokenizer):
|
|
| 110 |
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 111 |
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
|
| 112 |
return (vocab_file,)
|
| 113 |
-
|
|
|
|
| 114 |
from transformers import AutoTokenizer
|
| 115 |
ChessTokenizer.register_for_auto_class("AutoTokenizer")
|
|
|
|
| 10 |
- Source and destination squares (e.g., e2e4)
|
| 11 |
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
|
| 12 |
"""
|
| 13 |
+
|
| 14 |
from __future__ import annotations
|
| 15 |
|
| 16 |
import json
|
| 17 |
import os
|
| 18 |
+
import re
|
| 19 |
from typing import Dict, List, Optional
|
| 20 |
|
| 21 |
from transformers import PreTrainedTokenizer
|
| 22 |
|
| 23 |
+
SQUARE_RE = re.compile(r"[a-h][1-8]")
|
| 24 |
+
UCI_PROMO_RE = re.compile(r"^[a-h][1-8][a-h][1-8]([qrbn])$", re.IGNORECASE)
|
| 25 |
+
EQ_PROMO_RE = re.compile(r"=([QRBNqrbn])")
|
| 26 |
+
PAREN_PROMO_RE = re.compile(r"\(([QRBNqrbn])\)")
|
| 27 |
+
|
| 28 |
+
PROMOS = {"q", "r", "b", "n"}
|
| 29 |
+
|
| 30 |
|
| 31 |
class ChessTokenizer(PreTrainedTokenizer):
|
| 32 |
vocab_files_names = {"vocab_file": "vocab.json"}
|
|
|
|
| 43 |
vocab: Optional[Dict[str, int]] = None,
|
| 44 |
**kwargs,
|
| 45 |
):
|
|
|
|
| 46 |
self._pad_token = self.PAD_TOKEN
|
| 47 |
self._bos_token = self.BOS_TOKEN
|
| 48 |
self._eos_token = self.EOS_TOKEN
|
| 49 |
self._unk_token = self.UNK_TOKEN
|
| 50 |
|
|
|
|
| 51 |
kwargs.pop("pad_token", None)
|
| 52 |
kwargs.pop("bos_token", None)
|
| 53 |
kwargs.pop("eos_token", None)
|
| 54 |
kwargs.pop("unk_token", None)
|
| 55 |
|
|
|
|
| 56 |
if vocab is not None:
|
| 57 |
self._vocab = vocab
|
| 58 |
elif vocab_file is not None and os.path.exists(vocab_file):
|
|
|
|
| 73 |
|
| 74 |
def _create_fixed_vocab(self) -> Dict[str, int]:
|
| 75 |
specials = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
|
| 76 |
+
# IMPORTANT: deterministic ids matching a1,a2,...,a8,b1,... style
|
| 77 |
+
squares = [f"{f}{r}" for f in "abcdefgh" for r in "12345678"]
|
|
|
|
|
|
|
|
|
|
| 78 |
promos = ["q", "r", "b", "n"]
|
|
|
|
| 79 |
tokens = specials + squares + promos
|
| 80 |
return {tok: i for i, tok in enumerate(tokens)}
|
| 81 |
|
|
|
|
| 86 |
def get_vocab(self) -> Dict[str, int]:
|
| 87 |
return dict(self._vocab)
|
| 88 |
|
| 89 |
+
def _extract_promo_anywhere(self, mv: str) -> Optional[str]:
|
| 90 |
+
m = EQ_PROMO_RE.search(mv)
|
| 91 |
+
if m:
|
| 92 |
+
return m.group(1).lower()
|
| 93 |
+
m = PAREN_PROMO_RE.search(mv)
|
| 94 |
+
if m:
|
| 95 |
+
return m.group(1).lower()
|
| 96 |
+
m = UCI_PROMO_RE.match(mv)
|
| 97 |
+
if m:
|
| 98 |
+
return m.group(1).lower()
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
def _tokenize(self, text: str) -> List[str]:
|
| 102 |
+
"""
|
| 103 |
+
Robust tokenization:
|
| 104 |
+
- keeps special tokens ([BOS], etc.) as-is (HF handles them)
|
| 105 |
+
- accepts already-split squares: "e2 e4"
|
| 106 |
+
- accepts uci concat: "e2e4" -> e2,e4 (+promo)
|
| 107 |
+
- accepts verbose tokens containing squares: "WPe2e4(x+)" -> e2,e4 (+promo)
|
| 108 |
+
"""
|
| 109 |
+
tokens: List[str] = []
|
| 110 |
+
|
| 111 |
+
for chunk in text.strip().split():
|
| 112 |
+
# already-split square?
|
| 113 |
+
if re.fullmatch(r"[a-h][1-8]", chunk):
|
| 114 |
+
tokens.append(chunk)
|
| 115 |
+
continue
|
| 116 |
+
|
| 117 |
+
# promo alone?
|
| 118 |
+
if chunk in PROMOS:
|
| 119 |
+
tokens.append(chunk)
|
| 120 |
+
continue
|
| 121 |
+
|
| 122 |
+
# otherwise: extract squares from inside
|
| 123 |
+
squares = SQUARE_RE.findall(chunk)
|
| 124 |
+
if len(squares) >= 2:
|
| 125 |
+
tokens.append(squares[0])
|
| 126 |
+
tokens.append(squares[1])
|
| 127 |
+
|
| 128 |
+
promo = self._extract_promo_anywhere(chunk)
|
| 129 |
+
if promo in PROMOS:
|
| 130 |
+
tokens.append(promo)
|
| 131 |
+
else:
|
| 132 |
+
# allow special tokens to pass through if present
|
| 133 |
+
if chunk in {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}:
|
| 134 |
+
tokens.append(chunk)
|
| 135 |
+
else:
|
| 136 |
+
tokens.append(self.UNK_TOKEN)
|
| 137 |
+
|
| 138 |
+
return tokens
|
| 139 |
|
| 140 |
def _convert_token_to_id(self, token: str) -> int:
|
| 141 |
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
|
|
|
|
| 144 |
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
|
| 145 |
|
| 146 |
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
| 147 |
+
"""
|
| 148 |
+
Reconstruct "e2e4 e7e8q ..."
|
| 149 |
+
"""
|
| 150 |
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
|
| 151 |
+
clean = [t for t in tokens if t not in special]
|
| 152 |
+
|
| 153 |
+
moves: List[str] = []
|
| 154 |
+
i = 0
|
| 155 |
+
while i < len(clean):
|
| 156 |
+
if re.fullmatch(r"[a-h][1-8]", clean[i]) and i + 1 < len(clean) and re.fullmatch(r"[a-h][1-8]", clean[i + 1]):
|
| 157 |
+
mv = clean[i] + clean[i + 1]
|
| 158 |
+
i += 2
|
| 159 |
+
if i < len(clean) and clean[i] in PROMOS:
|
| 160 |
+
mv += clean[i]
|
| 161 |
+
i += 1
|
| 162 |
+
moves.append(mv)
|
| 163 |
+
else:
|
| 164 |
+
moves.append(clean[i])
|
| 165 |
+
i += 1
|
| 166 |
+
|
| 167 |
+
return " ".join(moves)
|
| 168 |
+
|
| 169 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
|
| 170 |
os.makedirs(save_directory, exist_ok=True)
|
| 171 |
vocab_file = os.path.join(
|
| 172 |
save_directory,
|
|
|
|
| 175 |
with open(vocab_file, "w", encoding="utf-8") as f:
|
| 176 |
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
|
| 177 |
return (vocab_file,)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
from transformers import AutoTokenizer
|
| 181 |
ChessTokenizer.register_for_auto_class("AutoTokenizer")
|
vocab.json
CHANGED
|
@@ -4,68 +4,68 @@
|
|
| 4 |
"[EOS]": 2,
|
| 5 |
"[UNK]": 3,
|
| 6 |
"a1": 4,
|
| 7 |
-
"
|
| 8 |
-
"
|
| 9 |
-
"
|
| 10 |
-
"
|
| 11 |
-
"
|
| 12 |
-
"
|
| 13 |
-
"
|
| 14 |
-
"
|
| 15 |
"b2": 13,
|
| 16 |
-
"
|
| 17 |
-
"
|
| 18 |
-
"
|
| 19 |
-
"
|
| 20 |
-
"
|
| 21 |
-
"
|
| 22 |
-
"
|
| 23 |
-
"
|
| 24 |
"c3": 22,
|
| 25 |
-
"
|
| 26 |
-
"
|
| 27 |
-
"
|
| 28 |
-
"
|
| 29 |
-
"
|
| 30 |
-
"
|
| 31 |
-
"
|
| 32 |
-
"
|
| 33 |
"d4": 31,
|
| 34 |
-
"
|
| 35 |
-
"
|
| 36 |
-
"
|
| 37 |
-
"
|
| 38 |
-
"
|
| 39 |
-
"
|
| 40 |
-
"
|
| 41 |
-
"
|
| 42 |
"e5": 40,
|
| 43 |
-
"
|
| 44 |
-
"
|
| 45 |
-
"
|
| 46 |
-
"
|
| 47 |
-
"
|
| 48 |
-
"
|
| 49 |
-
"
|
| 50 |
-
"
|
| 51 |
"f6": 49,
|
| 52 |
-
"
|
| 53 |
-
"
|
| 54 |
-
"
|
| 55 |
-
"
|
| 56 |
-
"
|
| 57 |
-
"
|
| 58 |
-
"
|
| 59 |
-
"
|
| 60 |
"g7": 58,
|
| 61 |
-
"
|
| 62 |
-
"
|
| 63 |
-
"
|
| 64 |
-
"
|
| 65 |
-
"
|
| 66 |
-
"
|
| 67 |
-
"
|
| 68 |
-
"
|
| 69 |
"h8": 67,
|
| 70 |
"q": 68,
|
| 71 |
"r": 69,
|
|
|
|
| 4 |
"[EOS]": 2,
|
| 5 |
"[UNK]": 3,
|
| 6 |
"a1": 4,
|
| 7 |
+
"a2": 5,
|
| 8 |
+
"a3": 6,
|
| 9 |
+
"a4": 7,
|
| 10 |
+
"a5": 8,
|
| 11 |
+
"a6": 9,
|
| 12 |
+
"a7": 10,
|
| 13 |
+
"a8": 11,
|
| 14 |
+
"b1": 12,
|
| 15 |
"b2": 13,
|
| 16 |
+
"b3": 14,
|
| 17 |
+
"b4": 15,
|
| 18 |
+
"b5": 16,
|
| 19 |
+
"b6": 17,
|
| 20 |
+
"b7": 18,
|
| 21 |
+
"b8": 19,
|
| 22 |
+
"c1": 20,
|
| 23 |
+
"c2": 21,
|
| 24 |
"c3": 22,
|
| 25 |
+
"c4": 23,
|
| 26 |
+
"c5": 24,
|
| 27 |
+
"c6": 25,
|
| 28 |
+
"c7": 26,
|
| 29 |
+
"c8": 27,
|
| 30 |
+
"d1": 28,
|
| 31 |
+
"d2": 29,
|
| 32 |
+
"d3": 30,
|
| 33 |
"d4": 31,
|
| 34 |
+
"d5": 32,
|
| 35 |
+
"d6": 33,
|
| 36 |
+
"d7": 34,
|
| 37 |
+
"d8": 35,
|
| 38 |
+
"e1": 36,
|
| 39 |
+
"e2": 37,
|
| 40 |
+
"e3": 38,
|
| 41 |
+
"e4": 39,
|
| 42 |
"e5": 40,
|
| 43 |
+
"e6": 41,
|
| 44 |
+
"e7": 42,
|
| 45 |
+
"e8": 43,
|
| 46 |
+
"f1": 44,
|
| 47 |
+
"f2": 45,
|
| 48 |
+
"f3": 46,
|
| 49 |
+
"f4": 47,
|
| 50 |
+
"f5": 48,
|
| 51 |
"f6": 49,
|
| 52 |
+
"f7": 50,
|
| 53 |
+
"f8": 51,
|
| 54 |
+
"g1": 52,
|
| 55 |
+
"g2": 53,
|
| 56 |
+
"g3": 54,
|
| 57 |
+
"g4": 55,
|
| 58 |
+
"g5": 56,
|
| 59 |
+
"g6": 57,
|
| 60 |
"g7": 58,
|
| 61 |
+
"g8": 59,
|
| 62 |
+
"h1": 60,
|
| 63 |
+
"h2": 61,
|
| 64 |
+
"h3": 62,
|
| 65 |
+
"h4": 63,
|
| 66 |
+
"h5": 64,
|
| 67 |
+
"h6": 65,
|
| 68 |
+
"h7": 66,
|
| 69 |
"h8": 67,
|
| 70 |
"q": 68,
|
| 71 |
"r": 69,
|