Tome1 commited on
Commit
49bf32e
·
verified ·
1 Parent(s): 038fa12

Chess Challenge submission by Tome1

Browse files
Files changed (7) hide show
  1. README.md +26 -0
  2. config.json +20 -0
  3. model.safetensors +3 -0
  4. special_tokens_map.json +6 -0
  5. tokenizer.py +192 -0
  6. tokenizer_config.json +44 -0
  7. vocab.json +545 -0
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - chess
5
+ - llm-course
6
+ - chess-challenge
7
+ license: mit
8
+ ---
9
+
10
+ # chess-tomin-v3
11
+
12
+ Chess model submitted to the LLM Course Chess Challenge.
13
+
14
+ ## Submission Info
15
+
16
+ - **Submitted by**: [Tome1](https://huggingface.co/Tome1)
17
+ - **Parameters**: 631,424
18
+ - **Organization**: LLM-course
19
+
20
+ ## Model Details
21
+
22
+ - **Architecture**: Chess Transformer (GPT-style)
23
+ - **Vocab size**: 543
24
+ - **Embedding dim**: 128
25
+ - **Layers**: 3
26
+ - **Heads**: 4
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ChessForCausalLM"
4
+ ],
5
+ "bos_token_id": 1,
6
+ "dropout": 0.1,
7
+ "dtype": "float32",
8
+ "eos_token_id": 2,
9
+ "layer_norm_epsilon": 1e-05,
10
+ "model_type": "chess_transformer",
11
+ "n_ctx": 512,
12
+ "n_embd": 128,
13
+ "n_head": 4,
14
+ "n_inner": 384,
15
+ "n_layer": 3,
16
+ "pad_token_id": 0,
17
+ "tie_weights": true,
18
+ "transformers_version": "4.57.4",
19
+ "vocab_size": 543
20
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77617e4a08e4bf111c0745f35b287438d63accd689a351cd695db4099926cca1
3
+ size 2529072
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[BOS]",
3
+ "eos_token": "[EOS]",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "[UNK]"
6
+ }
tokenizer.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Chess Tokenizer for the Chess Challenge.
3
+
4
+ This tokenizer treats each move as a single token using the extended UCI notation
5
+ from the Lichess dataset (e.g., WPe2e4, BNg8f6).
6
+
7
+ The dataset format uses:
8
+ - W/B prefix for White/Black
9
+ - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
10
+ - Source and destination squares (e.g., e2e4)
11
+ - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict, List, Optional
20
+
21
+
22
+ from transformers import PreTrainedTokenizer
23
+
24
+
25
+
26
+
27
+
28
+ """
29
+ Custom Chess Tokenizer (Split Move Version).
30
+
31
+ Splits moves into 3 components:
32
+ 1. Piece (e.g., "WP")
33
+ 2. From Square (e.g., "e2")
34
+ 3. To Square (e.g., "e4")
35
+ """
36
+
37
+
38
+
39
+ class ChessTokenizer(PreTrainedTokenizer):
40
+ model_input_names = ["input_ids", "attention_mask"]
41
+
42
+ # Special tokens
43
+ PAD_TOKEN = "[PAD]"
44
+ BOS_TOKEN = "[BOS]" # Beginning of Sequence (Start of Game)
45
+ EOS_TOKEN = "[EOS]" # End of Sequence (End of Game)
46
+ UNK_TOKEN = "[UNK]"
47
+
48
+ vocab_files_names = {
49
+ "vocab_file": "vocab.json"
50
+ }
51
+
52
+ def __init__(
53
+ self,
54
+ vocab_file: Optional[str] = None,
55
+ vocab: Optional[Dict[str, int]] = None,
56
+ **kwargs,
57
+ ):
58
+ self._pad_token = self.PAD_TOKEN
59
+ self._bos_token = self.BOS_TOKEN
60
+ self._eos_token = self.EOS_TOKEN
61
+ self._unk_token = self.UNK_TOKEN
62
+
63
+ # Clean kwargs to avoid conflicts
64
+ kwargs.pop("pad_token", None)
65
+ kwargs.pop("bos_token", None)
66
+ kwargs.pop("eos_token", None)
67
+ kwargs.pop("unk_token", None)
68
+
69
+ self.vocab_file = vocab_file
70
+
71
+ if vocab is not None:
72
+ self._vocab = vocab
73
+ elif vocab_file is not None and os.path.exists(vocab_file):
74
+ with open(vocab_file, "r", encoding="utf-8") as f:
75
+ self._vocab = json.load(f)
76
+ else:
77
+ self._vocab = self._create_default_vocab()
78
+
79
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
80
+
81
+ super().__init__(
82
+ pad_token=self._pad_token,
83
+ bos_token=self._bos_token,
84
+ eos_token=self._eos_token,
85
+ unk_token=self._unk_token,
86
+ **kwargs,
87
+ )
88
+
89
+ def _create_default_vocab(self) -> Dict[str, int]:
90
+ """Creates basic vocab. Use build_vocab_from_dataset for full vocab."""
91
+ # 4 Special + 12 Pieces + 64 Squares = 80 tokens total
92
+ special = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
93
+ vocab = {t: i for i, t in enumerate(special)}
94
+ return vocab
95
+
96
+ @property
97
+ def vocab_size(self) -> int:
98
+ return len(self._vocab)
99
+
100
+ def get_vocab(self) -> Dict[str, int]:
101
+ return dict(self._vocab)
102
+
103
+ def _tokenize(self, text: str) -> List[str]:
104
+ """
105
+ Splits text "WPe2e4 BNg8f6" into ["WP", "e2", "e4", "BN", "g8", "f6"]
106
+ """
107
+ tokens = []
108
+ # Split by space to get individual moves first
109
+ raw_moves = text.strip().split()
110
+
111
+
112
+ for move in raw_moves:
113
+ # Check length to ensure it's a valid move string
114
+ if len(move) >= 6:
115
+ # Part 1: Player + Piece (Indices 0-2, e.g., "WP")
116
+ tokens.append(move[:2])
117
+
118
+ # Part 2: Start Square (Indices 2-4, e.g., "e2")
119
+ tokens.append(move[2:4])
120
+
121
+ # Part 3: End Square (Indices 4-6, e.g., "e4")
122
+ tokens.append(move[4:])
123
+
124
+
125
+
126
+ else:
127
+ tokens.append(self.UNK_TOKEN)
128
+
129
+
130
+ return tokens
131
+
132
+ def _convert_token_to_id(self, token: str) -> int:
133
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN))
134
+
135
+ def _convert_id_to_token(self, index: int) -> str:
136
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
137
+
138
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
139
+ """
140
+ Reconstructs the move string.
141
+ Note: This simply joins them. You might need custom logic
142
+ if you want to strictly recreate 'WPe2e4' from ['WP','e2','e4'].
143
+ """
144
+ return " ".join(t for t in tokens if t not in [
145
+ self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN
146
+ ])
147
+
148
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
149
+ if not os.path.isdir(save_directory):
150
+ os.makedirs(save_directory, exist_ok=True)
151
+
152
+ vocab_file = os.path.join(
153
+ save_directory,
154
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json"
155
+ )
156
+
157
+ with open(vocab_file, "w", encoding="utf-8") as f:
158
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
159
+
160
+ return (vocab_file,)
161
+
162
+ @classmethod
163
+ def build_vocab_from_dataset(cls, dataset_name="dlouapre/lichess_2025-01_1M", split="train", max_samples=10000):
164
+ """Scans dataset to find all unique pieces and squares."""
165
+ from datasets import load_dataset
166
+ dataset = load_dataset(dataset_name, split=split, streaming=True)
167
+
168
+ pieces = set()
169
+ squares = set()
170
+ endings = set()
171
+
172
+ print("Building vocabulary...")
173
+ count = 0
174
+ for example in dataset:
175
+ moves = example["text"].split()
176
+ for move in moves:
177
+ if len(move) >= 6:
178
+ pieces.add(move[:2]) # WP, BN, etc.
179
+ squares.add(move[2:4]) # e2
180
+ squares.add(move[4:]) # e4
181
+
182
+
183
+ count += 1
184
+ if count >= max_samples:
185
+ break
186
+
187
+ # Combine into vocab structure
188
+ special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
189
+ all_tokens = special + sorted(list(pieces)) + sorted(list(endings)) + sorted(list(squares))
190
+
191
+ vocab = {token: idx for idx, token in enumerate(all_tokens)}
192
+ return cls(vocab=vocab)
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[BOS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[EOS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "[BOS]",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "[EOS]",
39
+ "extra_special_tokens": {},
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "[PAD]",
42
+ "tokenizer_class": "ChessTokenizer",
43
+ "unk_token": "[UNK]"
44
+ }
vocab.json ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 0,
3
+ "[BOS]": 1,
4
+ "[EOS]": 2,
5
+ "[UNK]": 3,
6
+ "BB": 4,
7
+ "BK": 5,
8
+ "BN": 6,
9
+ "BP": 7,
10
+ "BQ": 8,
11
+ "BR": 9,
12
+ "WB": 10,
13
+ "WK": 11,
14
+ "WN": 12,
15
+ "WP": 13,
16
+ "WQ": 14,
17
+ "WR": 15,
18
+ "a1": 16,
19
+ "a1(+)": 17,
20
+ "a1(+*)": 18,
21
+ "a1(+*Q)": 19,
22
+ "a1(+*R)": 20,
23
+ "a1(+N)": 21,
24
+ "a1(+Q)": 22,
25
+ "a1(B)": 23,
26
+ "a1(N)": 24,
27
+ "a1(Q)": 25,
28
+ "a1(x)": 26,
29
+ "a1(x+)": 27,
30
+ "a1(x+Q)": 28,
31
+ "a1(xQ)": 29,
32
+ "a2": 30,
33
+ "a2(+)": 31,
34
+ "a2(+*)": 32,
35
+ "a2(x)": 33,
36
+ "a2(x+)": 34,
37
+ "a2(x+*)": 35,
38
+ "a3": 36,
39
+ "a3(+)": 37,
40
+ "a3(+*)": 38,
41
+ "a3(x)": 39,
42
+ "a3(x+)": 40,
43
+ "a3(x+*)": 41,
44
+ "a3(xE)": 42,
45
+ "a4": 43,
46
+ "a4(+)": 44,
47
+ "a4(+*)": 45,
48
+ "a4(x)": 46,
49
+ "a4(x+)": 47,
50
+ "a4(x+*)": 48,
51
+ "a5": 49,
52
+ "a5(+)": 50,
53
+ "a5(+*)": 51,
54
+ "a5(x)": 52,
55
+ "a5(x+)": 53,
56
+ "a5(x+*)": 54,
57
+ "a6": 55,
58
+ "a6(+)": 56,
59
+ "a6(+*)": 57,
60
+ "a6(x)": 58,
61
+ "a6(x+)": 59,
62
+ "a6(x+*)": 60,
63
+ "a6(xE)": 61,
64
+ "a6(xE+)": 62,
65
+ "a7": 63,
66
+ "a7(+)": 64,
67
+ "a7(+*)": 65,
68
+ "a7(x)": 66,
69
+ "a7(x+)": 67,
70
+ "a7(x+*)": 68,
71
+ "a8": 69,
72
+ "a8(+)": 70,
73
+ "a8(+*)": 71,
74
+ "a8(+*Q)": 72,
75
+ "a8(+Q)": 73,
76
+ "a8(Q)": 74,
77
+ "a8(R)": 75,
78
+ "a8(x)": 76,
79
+ "a8(x+)": 77,
80
+ "a8(x+*)": 78,
81
+ "a8(xQ)": 79,
82
+ "b1": 80,
83
+ "b1(+)": 81,
84
+ "b1(+*)": 82,
85
+ "b1(+*Q)": 83,
86
+ "b1(+N)": 84,
87
+ "b1(+Q)": 85,
88
+ "b1(N)": 86,
89
+ "b1(Q)": 87,
90
+ "b1(R)": 88,
91
+ "b1(x)": 89,
92
+ "b1(x+)": 90,
93
+ "b1(x+*)": 91,
94
+ "b1(x+Q)": 92,
95
+ "b1(xQ)": 93,
96
+ "b2": 94,
97
+ "b2(+)": 95,
98
+ "b2(+*)": 96,
99
+ "b2(x)": 97,
100
+ "b2(x+)": 98,
101
+ "b2(x+*)": 99,
102
+ "b3": 100,
103
+ "b3(+)": 101,
104
+ "b3(+*)": 102,
105
+ "b3(x)": 103,
106
+ "b3(x+)": 104,
107
+ "b3(x+*)": 105,
108
+ "b3(xE)": 106,
109
+ "b3(xE+)": 107,
110
+ "b4": 108,
111
+ "b4(+)": 109,
112
+ "b4(+*)": 110,
113
+ "b4(x)": 111,
114
+ "b4(x+)": 112,
115
+ "b4(x+*)": 113,
116
+ "b5": 114,
117
+ "b5(+)": 115,
118
+ "b5(+*)": 116,
119
+ "b5(x)": 117,
120
+ "b5(x+)": 118,
121
+ "b5(x+*)": 119,
122
+ "b6": 120,
123
+ "b6(+)": 121,
124
+ "b6(+*)": 122,
125
+ "b6(x)": 123,
126
+ "b6(x+)": 124,
127
+ "b6(x+*)": 125,
128
+ "b6(xE)": 126,
129
+ "b7": 127,
130
+ "b7(+)": 128,
131
+ "b7(+*)": 129,
132
+ "b7(x)": 130,
133
+ "b7(x+)": 131,
134
+ "b7(x+*)": 132,
135
+ "b8": 133,
136
+ "b8(+)": 134,
137
+ "b8(+*)": 135,
138
+ "b8(+*Q)": 136,
139
+ "b8(+Q)": 137,
140
+ "b8(Q)": 138,
141
+ "b8(R)": 139,
142
+ "b8(x)": 140,
143
+ "b8(x+)": 141,
144
+ "b8(x+*)": 142,
145
+ "b8(x+Q)": 143,
146
+ "b8(xQ)": 144,
147
+ "c1": 145,
148
+ "c1(+)": 146,
149
+ "c1(+*)": 147,
150
+ "c1(+*Q)": 148,
151
+ "c1(+N)": 149,
152
+ "c1(+Q)": 150,
153
+ "c1(+R)": 151,
154
+ "c1(O)": 152,
155
+ "c1(O+)": 153,
156
+ "c1(O+*)": 154,
157
+ "c1(Q)": 155,
158
+ "c1(x)": 156,
159
+ "c1(x+)": 157,
160
+ "c1(x+*)": 158,
161
+ "c1(x+*Q)": 159,
162
+ "c1(x+Q)": 160,
163
+ "c1(xN)": 161,
164
+ "c1(xQ)": 162,
165
+ "c2": 163,
166
+ "c2(+)": 164,
167
+ "c2(+*)": 165,
168
+ "c2(x)": 166,
169
+ "c2(x+)": 167,
170
+ "c2(x+*)": 168,
171
+ "c3": 169,
172
+ "c3(+)": 170,
173
+ "c3(+*)": 171,
174
+ "c3(x)": 172,
175
+ "c3(x+)": 173,
176
+ "c3(x+*)": 174,
177
+ "c3(xE)": 175,
178
+ "c3(xE+)": 176,
179
+ "c4": 177,
180
+ "c4(+)": 178,
181
+ "c4(+*)": 179,
182
+ "c4(x)": 180,
183
+ "c4(x+)": 181,
184
+ "c4(x+*)": 182,
185
+ "c5": 183,
186
+ "c5(+)": 184,
187
+ "c5(+*)": 185,
188
+ "c5(x)": 186,
189
+ "c5(x+)": 187,
190
+ "c5(x+*)": 188,
191
+ "c6": 189,
192
+ "c6(+)": 190,
193
+ "c6(+*)": 191,
194
+ "c6(x)": 192,
195
+ "c6(x+)": 193,
196
+ "c6(x+*)": 194,
197
+ "c6(xE)": 195,
198
+ "c6(xE+)": 196,
199
+ "c7": 197,
200
+ "c7(+)": 198,
201
+ "c7(+*)": 199,
202
+ "c7(x)": 200,
203
+ "c7(x+)": 201,
204
+ "c7(x+*)": 202,
205
+ "c8": 203,
206
+ "c8(+)": 204,
207
+ "c8(+*)": 205,
208
+ "c8(+*Q)": 206,
209
+ "c8(+Q)": 207,
210
+ "c8(O)": 208,
211
+ "c8(O+)": 209,
212
+ "c8(Q)": 210,
213
+ "c8(x)": 211,
214
+ "c8(x+)": 212,
215
+ "c8(x+*)": 213,
216
+ "c8(x+Q)": 214,
217
+ "c8(xQ)": 215,
218
+ "d1": 216,
219
+ "d1(+)": 217,
220
+ "d1(+*)": 218,
221
+ "d1(+*Q)": 219,
222
+ "d1(+Q)": 220,
223
+ "d1(Q)": 221,
224
+ "d1(x)": 222,
225
+ "d1(x+)": 223,
226
+ "d1(x+*)": 224,
227
+ "d1(x+Q)": 225,
228
+ "d1(xQ)": 226,
229
+ "d2": 227,
230
+ "d2(+)": 228,
231
+ "d2(+*)": 229,
232
+ "d2(x)": 230,
233
+ "d2(x+)": 231,
234
+ "d2(x+*)": 232,
235
+ "d3": 233,
236
+ "d3(+)": 234,
237
+ "d3(+*)": 235,
238
+ "d3(x)": 236,
239
+ "d3(x+)": 237,
240
+ "d3(x+*)": 238,
241
+ "d3(xE)": 239,
242
+ "d4": 240,
243
+ "d4(+)": 241,
244
+ "d4(+*)": 242,
245
+ "d4(x)": 243,
246
+ "d4(x+)": 244,
247
+ "d4(x+*)": 245,
248
+ "d5": 246,
249
+ "d5(+)": 247,
250
+ "d5(+*)": 248,
251
+ "d5(x)": 249,
252
+ "d5(x+)": 250,
253
+ "d5(x+*)": 251,
254
+ "d6": 252,
255
+ "d6(+)": 253,
256
+ "d6(+*)": 254,
257
+ "d6(x)": 255,
258
+ "d6(x+)": 256,
259
+ "d6(x+*)": 257,
260
+ "d6(xE)": 258,
261
+ "d7": 259,
262
+ "d7(+)": 260,
263
+ "d7(+*)": 261,
264
+ "d7(x)": 262,
265
+ "d7(x+)": 263,
266
+ "d7(x+*)": 264,
267
+ "d8": 265,
268
+ "d8(+)": 266,
269
+ "d8(+*)": 267,
270
+ "d8(+Q)": 268,
271
+ "d8(Q)": 269,
272
+ "d8(R)": 270,
273
+ "d8(x)": 271,
274
+ "d8(x+)": 272,
275
+ "d8(x+*)": 273,
276
+ "d8(x+Q)": 274,
277
+ "d8(xQ)": 275,
278
+ "e1": 276,
279
+ "e1(+)": 277,
280
+ "e1(+*)": 278,
281
+ "e1(+*Q)": 279,
282
+ "e1(+*R)": 280,
283
+ "e1(+Q)": 281,
284
+ "e1(Q)": 282,
285
+ "e1(x)": 283,
286
+ "e1(x+)": 284,
287
+ "e1(x+*)": 285,
288
+ "e1(x+Q)": 286,
289
+ "e1(xQ)": 287,
290
+ "e2": 288,
291
+ "e2(+)": 289,
292
+ "e2(+*)": 290,
293
+ "e2(x)": 291,
294
+ "e2(x+)": 292,
295
+ "e2(x+*)": 293,
296
+ "e3": 294,
297
+ "e3(+)": 295,
298
+ "e3(+*)": 296,
299
+ "e3(x)": 297,
300
+ "e3(x+)": 298,
301
+ "e3(x+*)": 299,
302
+ "e3(xE)": 300,
303
+ "e3(xE+)": 301,
304
+ "e4": 302,
305
+ "e4(+)": 303,
306
+ "e4(+*)": 304,
307
+ "e4(x)": 305,
308
+ "e4(x+)": 306,
309
+ "e4(x+*)": 307,
310
+ "e5": 308,
311
+ "e5(+)": 309,
312
+ "e5(+*)": 310,
313
+ "e5(x)": 311,
314
+ "e5(x+)": 312,
315
+ "e5(x+*)": 313,
316
+ "e6": 314,
317
+ "e6(+)": 315,
318
+ "e6(+*)": 316,
319
+ "e6(x)": 317,
320
+ "e6(x+)": 318,
321
+ "e6(x+*)": 319,
322
+ "e6(xE)": 320,
323
+ "e6(xE+)": 321,
324
+ "e7": 322,
325
+ "e7(+)": 323,
326
+ "e7(+*)": 324,
327
+ "e7(x)": 325,
328
+ "e7(x+)": 326,
329
+ "e7(x+*)": 327,
330
+ "e8": 328,
331
+ "e8(+)": 329,
332
+ "e8(+*)": 330,
333
+ "e8(+*Q)": 331,
334
+ "e8(+N)": 332,
335
+ "e8(+Q)": 333,
336
+ "e8(Q)": 334,
337
+ "e8(R)": 335,
338
+ "e8(x)": 336,
339
+ "e8(x+)": 337,
340
+ "e8(x+*)": 338,
341
+ "e8(x+*Q)": 339,
342
+ "e8(x+Q)": 340,
343
+ "e8(x+R)": 341,
344
+ "e8(xQ)": 342,
345
+ "f1": 343,
346
+ "f1(+)": 344,
347
+ "f1(+*)": 345,
348
+ "f1(+*Q)": 346,
349
+ "f1(+*R)": 347,
350
+ "f1(+N)": 348,
351
+ "f1(+Q)": 349,
352
+ "f1(B)": 350,
353
+ "f1(Q)": 351,
354
+ "f1(R)": 352,
355
+ "f1(x)": 353,
356
+ "f1(x+)": 354,
357
+ "f1(x+*)": 355,
358
+ "f1(x+N)": 356,
359
+ "f1(x+Q)": 357,
360
+ "f1(xQ)": 358,
361
+ "f2": 359,
362
+ "f2(+)": 360,
363
+ "f2(+*)": 361,
364
+ "f2(x)": 362,
365
+ "f2(x+)": 363,
366
+ "f2(x+*)": 364,
367
+ "f3": 365,
368
+ "f3(+)": 366,
369
+ "f3(+*)": 367,
370
+ "f3(x)": 368,
371
+ "f3(x+)": 369,
372
+ "f3(x+*)": 370,
373
+ "f3(xE)": 371,
374
+ "f3(xE+)": 372,
375
+ "f4": 373,
376
+ "f4(+)": 374,
377
+ "f4(+*)": 375,
378
+ "f4(x)": 376,
379
+ "f4(x+)": 377,
380
+ "f4(x+*)": 378,
381
+ "f5": 379,
382
+ "f5(+)": 380,
383
+ "f5(+*)": 381,
384
+ "f5(x)": 382,
385
+ "f5(x+)": 383,
386
+ "f5(x+*)": 384,
387
+ "f6": 385,
388
+ "f6(+)": 386,
389
+ "f6(+*)": 387,
390
+ "f6(x)": 388,
391
+ "f6(x+)": 389,
392
+ "f6(x+*)": 390,
393
+ "f6(xE)": 391,
394
+ "f6(xE+)": 392,
395
+ "f7": 393,
396
+ "f7(+)": 394,
397
+ "f7(+*)": 395,
398
+ "f7(x)": 396,
399
+ "f7(x+)": 397,
400
+ "f7(x+*)": 398,
401
+ "f8": 399,
402
+ "f8(+)": 400,
403
+ "f8(+*)": 401,
404
+ "f8(+*Q)": 402,
405
+ "f8(+Q)": 403,
406
+ "f8(+R)": 404,
407
+ "f8(Q)": 405,
408
+ "f8(x)": 406,
409
+ "f8(x+)": 407,
410
+ "f8(x+*)": 408,
411
+ "f8(x+Q)": 409,
412
+ "f8(xN)": 410,
413
+ "f8(xQ)": 411,
414
+ "g1": 412,
415
+ "g1(+)": 413,
416
+ "g1(+*)": 414,
417
+ "g1(+*Q)": 415,
418
+ "g1(+Q)": 416,
419
+ "g1(B)": 417,
420
+ "g1(Q)": 418,
421
+ "g1(o)": 419,
422
+ "g1(o+)": 420,
423
+ "g1(o+*)": 421,
424
+ "g1(x)": 422,
425
+ "g1(x+)": 423,
426
+ "g1(x+*)": 424,
427
+ "g1(x+Q)": 425,
428
+ "g1(xQ)": 426,
429
+ "g2": 427,
430
+ "g2(+)": 428,
431
+ "g2(+*)": 429,
432
+ "g2(x)": 430,
433
+ "g2(x+)": 431,
434
+ "g2(x+*)": 432,
435
+ "g3": 433,
436
+ "g3(+)": 434,
437
+ "g3(+*)": 435,
438
+ "g3(x)": 436,
439
+ "g3(x+)": 437,
440
+ "g3(x+*)": 438,
441
+ "g3(xE)": 439,
442
+ "g3(xE+)": 440,
443
+ "g4": 441,
444
+ "g4(+)": 442,
445
+ "g4(+*)": 443,
446
+ "g4(x)": 444,
447
+ "g4(x+)": 445,
448
+ "g4(x+*)": 446,
449
+ "g5": 447,
450
+ "g5(+)": 448,
451
+ "g5(+*)": 449,
452
+ "g5(x)": 450,
453
+ "g5(x+)": 451,
454
+ "g5(x+*)": 452,
455
+ "g6": 453,
456
+ "g6(+)": 454,
457
+ "g6(+*)": 455,
458
+ "g6(x)": 456,
459
+ "g6(x+)": 457,
460
+ "g6(x+*)": 458,
461
+ "g6(xE)": 459,
462
+ "g6(xE+)": 460,
463
+ "g7": 461,
464
+ "g7(+)": 462,
465
+ "g7(+*)": 463,
466
+ "g7(x)": 464,
467
+ "g7(x+)": 465,
468
+ "g7(x+*)": 466,
469
+ "g8": 467,
470
+ "g8(+)": 468,
471
+ "g8(+*)": 469,
472
+ "g8(+N)": 470,
473
+ "g8(+Q)": 471,
474
+ "g8(N)": 472,
475
+ "g8(Q)": 473,
476
+ "g8(o)": 474,
477
+ "g8(o+)": 475,
478
+ "g8(x)": 476,
479
+ "g8(x+)": 477,
480
+ "g8(x+*)": 478,
481
+ "g8(x+Q)": 479,
482
+ "h1": 480,
483
+ "h1(+)": 481,
484
+ "h1(+*)": 482,
485
+ "h1(+*Q)": 483,
486
+ "h1(+Q)": 484,
487
+ "h1(Q)": 485,
488
+ "h1(x)": 486,
489
+ "h1(x+)": 487,
490
+ "h1(x+*)": 488,
491
+ "h1(x+Q)": 489,
492
+ "h1(xQ)": 490,
493
+ "h2": 491,
494
+ "h2(+)": 492,
495
+ "h2(+*)": 493,
496
+ "h2(x)": 494,
497
+ "h2(x+)": 495,
498
+ "h2(x+*)": 496,
499
+ "h3": 497,
500
+ "h3(+)": 498,
501
+ "h3(+*)": 499,
502
+ "h3(x)": 500,
503
+ "h3(x+)": 501,
504
+ "h3(x+*)": 502,
505
+ "h3(xE)": 503,
506
+ "h4": 504,
507
+ "h4(+)": 505,
508
+ "h4(+*)": 506,
509
+ "h4(x)": 507,
510
+ "h4(x+)": 508,
511
+ "h4(x+*)": 509,
512
+ "h5": 510,
513
+ "h5(+)": 511,
514
+ "h5(+*)": 512,
515
+ "h5(x)": 513,
516
+ "h5(x+)": 514,
517
+ "h5(x+*)": 515,
518
+ "h6": 516,
519
+ "h6(+)": 517,
520
+ "h6(+*)": 518,
521
+ "h6(x)": 519,
522
+ "h6(x+)": 520,
523
+ "h6(x+*)": 521,
524
+ "h6(xE)": 522,
525
+ "h6(xE+)": 523,
526
+ "h7": 524,
527
+ "h7(+)": 525,
528
+ "h7(+*)": 526,
529
+ "h7(x)": 527,
530
+ "h7(x+)": 528,
531
+ "h7(x+*)": 529,
532
+ "h8": 530,
533
+ "h8(+)": 531,
534
+ "h8(+*)": 532,
535
+ "h8(+*Q)": 533,
536
+ "h8(+*R)": 534,
537
+ "h8(+Q)": 535,
538
+ "h8(+R)": 536,
539
+ "h8(Q)": 537,
540
+ "h8(R)": 538,
541
+ "h8(x)": 539,
542
+ "h8(x+)": 540,
543
+ "h8(x+*)": 541,
544
+ "h8(xQ)": 542
545
+ }