Drilly93 commited on
Commit
327bdc9
·
verified ·
1 Parent(s): 08a8113

Chess Challenge submission by Drilly93

Browse files
Files changed (4) hide show
  1. README.md +2 -2
  2. config.json +3 -3
  3. model.safetensors +2 -2
  4. tokenizer_config.json +0 -3
README.md CHANGED
@@ -14,13 +14,13 @@ Chess model submitted to the LLM Course Chess Challenge.
14
  ## Submission Info
15
 
16
  - **Submitted by**: [Drilly93](https://huggingface.co/Drilly93)
17
- - **Parameters**: 475,584
18
  - **Organization**: LLM-course
19
 
20
  ## Model Details
21
 
22
  - **Architecture**: Chess Transformer (GPT-style)
23
  - **Vocab size**: 86
24
- - **Embedding dim**: 96
25
  - **Layers**: 6
26
  - **Heads**: 4
 
14
  ## Submission Info
15
 
16
  - **Submitted by**: [Drilly93](https://huggingface.co/Drilly93)
17
+ - **Parameters**: 937,600
18
  - **Organization**: LLM-course
19
 
20
  ## Model Details
21
 
22
  - **Architecture**: Chess Transformer (GPT-style)
23
  - **Vocab size**: 86
24
+ - **Embedding dim**: 128
25
  - **Layers**: 6
26
  - **Heads**: 4
config.json CHANGED
@@ -8,10 +8,10 @@
8
  "eos_token_id": 2,
9
  "layer_norm_epsilon": 1e-05,
10
  "model_type": "chess_transformer",
11
- "n_ctx": 192,
12
- "n_embd": 96,
13
  "n_head": 4,
14
- "n_inner": 192,
15
  "n_layer": 6,
16
  "pad_token_id": 0,
17
  "tie_weights": true,
 
8
  "eos_token_id": 2,
9
  "layer_norm_epsilon": 1e-05,
10
  "model_type": "chess_transformer",
11
+ "n_ctx": 256,
12
+ "n_embd": 128,
13
  "n_head": 4,
14
+ "n_inner": 320,
15
  "n_layer": 6,
16
  "pad_token_id": 0,
17
  "tie_weights": true,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cef042c2edf862c94ec89087b56736afc19a2bdbbc053b691955ca74698e539
3
- size 1908680
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68bba7b575d04fdb9970c196c970e35e83dc8aab38b2fa08be483c13088ac904
3
+ size 3756848
tokenizer_config.json CHANGED
@@ -40,8 +40,5 @@
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
42
  "tokenizer_class": "ChessTokenizer",
43
- "auto_map": {
44
- "AutoTokenizer": "tokenizer.ChessTokenizer"
45
- },
46
  "unk_token": "[UNK]"
47
  }
 
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
42
  "tokenizer_class": "ChessTokenizer",
 
 
 
43
  "unk_token": "[UNK]"
44
  }