Vincentime commited on
Commit
c41833c
·
verified ·
1 Parent(s): 3199e7d

Chess Challenge submission by Vincentime

Browse files
Files changed (7) hide show
  1. README.md +26 -0
  2. config.json +20 -0
  3. model.safetensors +3 -0
  4. special_tokens_map.json +6 -0
  5. tokenizer.py +124 -0
  6. tokenizer_config.json +50 -0
  7. vocab.json +74 -0
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - chess
5
+ - llm-course
6
+ - chess-challenge
7
+ license: mit
8
+ ---
9
+
10
+ # chess-vincentime-rook
11
+
12
+ Chess model submitted to the LLM Course Chess Challenge.
13
+
14
+ ## Submission Info
15
+
16
+ - **Submitted by**: [Vincentime](https://huggingface.co/Vincentime)
17
+ - **Parameters**: 999,032
18
+ - **Organization**: LLM-course
19
+
20
+ ## Model Details
21
+
22
+ - **Architecture**: Chess Transformer (GPT-style)
23
+ - **Vocab size**: 72
24
+ - **Embedding dim**: 112
25
+ - **Layers**: 8
26
+ - **Heads**: 8
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ChessForCausalLM"
4
+ ],
5
+ "bos_token_id": 1,
6
+ "dropout": 0.1,
7
+ "dtype": "float32",
8
+ "eos_token_id": 2,
9
+ "layer_norm_epsilon": 1e-05,
10
+ "model_type": "chess_transformer",
11
+ "n_ctx": 256,
12
+ "n_embd": 112,
13
+ "n_head": 8,
14
+ "n_inner": 307,
15
+ "n_layer": 8,
16
+ "pad_token_id": 0,
17
+ "tie_weights": true,
18
+ "transformers_version": "4.57.5",
19
+ "vocab_size": 72
20
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3b22523f2f9331a77eba1642a1d84cc8d860b799bc5459245f388d6b6d009d3
3
+ size 4004616
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[BOS]",
3
+ "eos_token": "[EOS]",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "[UNK]"
6
+ }
tokenizer.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import json
3
+ import os
4
+ import re
5
+ from typing import Dict, List, Optional
6
+ from transformers import PreTrainedTokenizer
7
+
8
+ class ChessTokenizer(PreTrainedTokenizer):
9
+ """
10
+ Tokenizer déterministe au niveau 'case' (Square-level).
11
+ Compatible avec les scripts de train/data du projet Chess Challenge.
12
+ """
13
+ model_input_names = ["input_ids", "attention_mask"]
14
+ vocab_files_names = {"vocab_file": "vocab.json"}
15
+
16
+ # Tokens spéciaux identiques au projet original
17
+ PAD_TOKEN = "[PAD]"
18
+ BOS_TOKEN = "[BOS]"
19
+ EOS_TOKEN = "[EOS]"
20
+ UNK_TOKEN = "[UNK]"
21
+
22
+ def __init__(self, vocab_file: Optional[str] = None, vocab: Optional[Dict[str, int]] = None, **kwargs):
23
+ self._pad_token = self.PAD_TOKEN
24
+ self._bos_token = self.BOS_TOKEN
25
+ self._eos_token = self.EOS_TOKEN
26
+ self._unk_token = self.UNK_TOKEN
27
+
28
+ # Nettoyage des kwargs pour éviter les doublons lors de l'init parent
29
+ kwargs.pop("pad_token", None)
30
+ kwargs.pop("bos_token", None)
31
+ kwargs.pop("eos_token", None)
32
+ kwargs.pop("unk_token", None)
33
+
34
+ if vocab is not None:
35
+ self._vocab = vocab
36
+ elif vocab_file is not None and os.path.exists(vocab_file):
37
+ with open(vocab_file, "r", encoding="utf-8") as f:
38
+ self._vocab = json.load(f)
39
+ else:
40
+ self._vocab = self._create_square_vocab()
41
+
42
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
43
+
44
+ super().__init__(
45
+ pad_token=self._pad_token,
46
+ bos_token=self._bos_token,
47
+ eos_token=self._eos_token,
48
+ unk_token=self._unk_token,
49
+ **kwargs
50
+ )
51
+
52
+ @classmethod
53
+ def build_vocab_from_dataset(
54
+ cls,
55
+ dataset_name: str = "",
56
+ split: str = "",
57
+ column: str = "",
58
+ min_frequency: int = 0,
59
+ max_samples: Optional[int] = None,
60
+ ) -> "ChessTokenizer":
61
+ """
62
+ Méthode de compatibilité.
63
+ Pour le SquareTokenizer, le vocabulaire est fixe,
64
+ on ignore donc les arguments et on retourne une instance standard.
65
+ """
66
+ print("Square Tokenizer: Using fixed deterministic vocabulary.")
67
+ return cls()
68
+
69
+ def _create_square_vocab(self) -> Dict[str, int]:
70
+ """Crée le vocabulaire fixe de cases (64) + promos (4) + spéciaux (4)."""
71
+ special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
72
+ files = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
73
+ ranks = ['1', '2', '3', '4', '5', '6', '7', '8']
74
+ squares = [f + r for f in files for r in ranks]
75
+ promotions = ['q', 'r', 'b', 'n']
76
+
77
+ all_tokens = special_tokens + squares + promotions
78
+ return {token: idx for idx, token in enumerate(all_tokens)}
79
+
80
+ # --- MÉTHODES REQUISES POUR HUGGING FACE COMPATIBILITY ---
81
+
82
+ @property
83
+ def vocab_size(self) -> int:
84
+ return len(self._vocab)
85
+
86
+ def get_vocab(self) -> Dict[str, int]:
87
+ return dict(self._vocab)
88
+
89
+ def _tokenize(self, text: str) -> List[str]:
90
+ """Découpe 'WPe2e4' en ['e2', 'e4']."""
91
+ moves = text.strip().split()
92
+ tokens = []
93
+ for m in moves:
94
+ if m in {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}:
95
+ tokens.append(m)
96
+ continue
97
+
98
+ # Nettoyage Regex : on ne garde que les coordonnées a-h, 1-8 et promos qrbn
99
+ clean_m = re.sub(r'[\(\)x\+\*WBPNBRQK]', '', m)
100
+
101
+ if len(clean_m) >= 4:
102
+ tokens.append(clean_m[0:2]) # Case départ
103
+ tokens.append(clean_m[2:4]) # Case arrivée
104
+ if len(clean_m) == 5:
105
+ tokens.append(clean_m[4]) # Promotion
106
+ return tokens
107
+
108
+ def _convert_token_to_id(self, token: str) -> int:
109
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN))
110
+
111
+ def _convert_id_to_token(self, index: int) -> str:
112
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
113
+
114
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
115
+ # Utile pour reconstruire le format texte si besoin
116
+ return "".join(tokens)
117
+
118
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
119
+ if not os.path.isdir(save_directory):
120
+ os.makedirs(save_directory, exist_ok=True)
121
+ vocab_file = os.path.join(save_directory, (f"{filename_prefix}-" if filename_prefix else "") + "vocab.json")
122
+ with open(vocab_file, "w", encoding="utf-8") as f:
123
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
124
+ return (vocab_file,)
tokenizer_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[BOS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[EOS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "auto_map": {
37
+ "AutoTokenizer": [
38
+ "tokenizer.ChessTokenizer",
39
+ null
40
+ ]
41
+ },
42
+ "bos_token": "[BOS]",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "[EOS]",
45
+ "extra_special_tokens": {},
46
+ "model_max_length": 1000000000000000019884624838656,
47
+ "pad_token": "[PAD]",
48
+ "tokenizer_class": "ChessTokenizer",
49
+ "unk_token": "[UNK]"
50
+ }
vocab.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 0,
3
+ "[BOS]": 1,
4
+ "[EOS]": 2,
5
+ "[UNK]": 3,
6
+ "a1": 4,
7
+ "a2": 5,
8
+ "a3": 6,
9
+ "a4": 7,
10
+ "a5": 8,
11
+ "a6": 9,
12
+ "a7": 10,
13
+ "a8": 11,
14
+ "b1": 12,
15
+ "b2": 13,
16
+ "b3": 14,
17
+ "b4": 15,
18
+ "b5": 16,
19
+ "b6": 17,
20
+ "b7": 18,
21
+ "b8": 19,
22
+ "c1": 20,
23
+ "c2": 21,
24
+ "c3": 22,
25
+ "c4": 23,
26
+ "c5": 24,
27
+ "c6": 25,
28
+ "c7": 26,
29
+ "c8": 27,
30
+ "d1": 28,
31
+ "d2": 29,
32
+ "d3": 30,
33
+ "d4": 31,
34
+ "d5": 32,
35
+ "d6": 33,
36
+ "d7": 34,
37
+ "d8": 35,
38
+ "e1": 36,
39
+ "e2": 37,
40
+ "e3": 38,
41
+ "e4": 39,
42
+ "e5": 40,
43
+ "e6": 41,
44
+ "e7": 42,
45
+ "e8": 43,
46
+ "f1": 44,
47
+ "f2": 45,
48
+ "f3": 46,
49
+ "f4": 47,
50
+ "f5": 48,
51
+ "f6": 49,
52
+ "f7": 50,
53
+ "f8": 51,
54
+ "g1": 52,
55
+ "g2": 53,
56
+ "g3": 54,
57
+ "g4": 55,
58
+ "g5": 56,
59
+ "g6": 57,
60
+ "g7": 58,
61
+ "g8": 59,
62
+ "h1": 60,
63
+ "h2": 61,
64
+ "h3": 62,
65
+ "h4": 63,
66
+ "h5": 64,
67
+ "h6": 65,
68
+ "h7": 66,
69
+ "h8": 67,
70
+ "q": 68,
71
+ "r": 69,
72
+ "b": 70,
73
+ "n": 71
74
+ }