Sunxt25 commited on
Commit
a17994d
·
verified ·
1 Parent(s): 7de47b2

Chess Challenge submission by Sunxt25

Browse files
Files changed (5) hide show
  1. README.md +2 -2
  2. config.json +1 -1
  3. model.safetensors +2 -2
  4. tokenizer_config.json +0 -6
  5. vocab.json +5 -1
README.md CHANGED
@@ -14,13 +14,13 @@ Chess model submitted to the LLM Course Chess Challenge.
14
  ## Submission Info
15
 
16
  - **Submitted by**: [Sunxt25](https://huggingface.co/Sunxt25)
17
- - **Parameters**: 921,480
18
  - **Organization**: LLM-course
19
 
20
  ## Model Details
21
 
22
  - **Architecture**: Chess Transformer (GPT-style)
23
- - **Vocab size**: 149
24
  - **Embedding dim**: 120
25
  - **Layers**: 6
26
  - **Heads**: 8
 
14
  ## Submission Info
15
 
16
  - **Submitted by**: [Sunxt25](https://huggingface.co/Sunxt25)
17
+ - **Parameters**: 921,960
18
  - **Organization**: LLM-course
19
 
20
  ## Model Details
21
 
22
  - **Architecture**: Chess Transformer (GPT-style)
23
+ - **Vocab size**: 153
24
  - **Embedding dim**: 120
25
  - **Layers**: 6
26
  - **Heads**: 8
config.json CHANGED
@@ -16,5 +16,5 @@
16
  "pad_token_id": 0,
17
  "tie_weights": true,
18
  "transformers_version": "4.57.5",
19
- "vocab_size": 149
20
  }
 
16
  "pad_token_id": 0,
17
  "tie_weights": true,
18
  "transformers_version": "4.57.5",
19
+ "vocab_size": 153
20
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85c0ec63b6d2b29f9f2c84fbb33ddbf94f846e5a31e78be226e496c4cd86d418
3
- size 3692360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9517fd738f366244654ec61b5249180da1d24342abccf7170f43faa7160360
3
+ size 3694280
tokenizer_config.json CHANGED
@@ -33,12 +33,6 @@
33
  "special": true
34
  }
35
  },
36
- "auto_map": {
37
- "AutoTokenizer": [
38
- "tokenizer.ChessTokenizer",
39
- null
40
- ]
41
- },
42
  "bos_token": "[BOS]",
43
  "clean_up_tokenization_spaces": false,
44
  "eos_token": "[EOS]",
 
33
  "special": true
34
  }
35
  },
 
 
 
 
 
 
36
  "bos_token": "[BOS]",
37
  "clean_up_tokenization_spaces": false,
38
  "eos_token": "[EOS]",
vocab.json CHANGED
@@ -147,5 +147,9 @@
147
  "(+)": 145,
148
  "(+*)": 146,
149
  "(o)": 147,
150
- "(O)": 148
 
 
 
 
151
  }
 
147
  "(+)": 145,
148
  "(+*)": 146,
149
  "(o)": 147,
150
+ "(O)": 148,
151
+ "q": 149,
152
+ "r": 150,
153
+ "b": 151,
154
+ "n": 152
155
  }