iliasslasri commited on
Commit
1635a64
·
verified ·
1 Parent(s): f8d818a

Chess Challenge submission by iliasslasri

Browse files
README.md CHANGED
@@ -17,10 +17,15 @@ Chess model submitted to the LLM Course Chess Challenge.
17
  - **Parameters**: 997,136
18
  - **Organization**: LLM-course
19
 
20
- ## Model Details
21
 
22
- - **Architecture**: Chess Transformer (GPT-style)
23
- - **Vocab size**: 75
24
- - **Embedding dim**: 96
25
- - **Layers**: 11
26
- - **Heads**: 8
 
 
 
 
 
 
17
  - **Parameters**: 997,136
18
  - **Organization**: LLM-course
19
 
20
+ ## Usage
21
 
22
+ ```python
23
+ from transformers import AutoModelForCausalLM, AutoTokenizer
24
+
25
+ model = AutoModelForCausalLM.from_pretrained("LLM-course/chess-player-v2", trust_remote_code=True)
26
+ tokenizer = AutoTokenizer.from_pretrained("LLM-course/chess-player-v2", trust_remote_code=True)
27
+ ```
28
+
29
+ ## Evaluation
30
+
31
+ This model is evaluated at the [Chess Challenge Arena](https://huggingface.co/spaces/LLM-course/Chess1MChallenge).
config.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "_name_or_path": "./gqa_1_ft_ft/checkpoint-794154/",
3
- "architectures": [
4
- "ChessForCausalLM"
5
- ],
6
- "attn": "GQA",
7
  "auto_map": {
8
  "AutoConfig": "model.ChessConfig",
9
  "AutoModelForCausalLM": "model.ChessForCausalLM"
10
  },
 
 
 
 
11
  "bos_token_id": 1,
12
  "dropout": 0.1,
13
  "eos_token_id": 2,
@@ -27,4 +27,4 @@
27
  "torch_dtype": "float32",
28
  "transformers_version": "4.40.0",
29
  "vocab_size": 75
30
- }
 
1
  {
2
+ "_name_or_path": "/home/infres/lasri-22/llm-for-code-proof/chess_challenge/gqa_1_ft/checkpoint-742200",
 
 
 
 
3
  "auto_map": {
4
  "AutoConfig": "model.ChessConfig",
5
  "AutoModelForCausalLM": "model.ChessForCausalLM"
6
  },
7
+ "architectures": [
8
+ "ChessForCausalLM"
9
+ ],
10
+ "attn": "GQA",
11
  "bos_token_id": 1,
12
  "dropout": 0.1,
13
  "eos_token_id": 2,
 
27
  "torch_dtype": "float32",
28
  "transformers_version": "4.40.0",
29
  "vocab_size": 75
30
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df76bd301382f175badd06e1609f362bacfdee08862ea07b3ee6801e4bbc1d07
3
  size 4003888
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aee9cd61343a4879c83371f6ad2fd879280120f3a42c6ab21681ac7746868ac1
3
  size 4003888
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7b8714f3635abcb248941a6a007b14cd4b814b97a0c068e03f81cce6384e28e
3
+ size 8127546
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a25848b45ca278e89bdb13e222774072a3fc466089b2d55e83497369580356e5
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:707d7522d3f929df8c8f69fb3d717c8cf6659601e34dca8e76711b1e0884d7eb
3
+ size 1064
tokenizer_config.json CHANGED
@@ -1,4 +1,10 @@
1
  {
 
 
 
 
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "[PAD]",
@@ -33,12 +39,6 @@
33
  "special": true
34
  }
35
  },
36
- "auto_map": {
37
- "AutoTokenizer": [
38
- "tokenizer.ChessTokenizer",
39
- null
40
- ]
41
- },
42
  "bos_token": "[BOS]",
43
  "clean_up_tokenization_spaces": true,
44
  "eos_token": "[EOS]",
@@ -46,4 +46,4 @@
46
  "pad_token": "[PAD]",
47
  "tokenizer_class": "ChessTokenizer",
48
  "unk_token": "[UNK]"
49
- }
 
1
  {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenizer.ChessTokenizer",
5
+ null
6
+ ]
7
+ },
8
  "added_tokens_decoder": {
9
  "0": {
10
  "content": "[PAD]",
 
39
  "special": true
40
  }
41
  },
 
 
 
 
 
 
42
  "bos_token": "[BOS]",
43
  "clean_up_tokenization_spaces": true,
44
  "eos_token": "[EOS]",
 
46
  "pad_token": "[PAD]",
47
  "tokenizer_class": "ChessTokenizer",
48
  "unk_token": "[UNK]"
49
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60c348786093cffe343e5d1eb9c89033c14e0c9a3deca4ac6d97b16bd462dc61
3
+ size 4920