saidabizi commited on
Commit
7f16d8a
·
verified ·
1 Parent(s): 4758cc4

End of training

Browse files
README.md CHANGED
@@ -29,18 +29,18 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/bizisaida04-fisher-college/huggingface/runs/vk1row6j)
33
 
34
 
35
  This model was trained with SFT.
36
 
37
  ### Framework versions
38
 
39
- - TRL: 0.15.2
40
- - Transformers: 4.48.3
41
- - Pytorch: 2.5.1+cu124
42
- - Datasets: 3.3.2
43
- - Tokenizers: 0.21.0
44
 
45
  ## Citations
46
 
@@ -51,7 +51,7 @@ Cite TRL as:
51
  ```bibtex
52
  @misc{vonwerra2022trl,
53
  title = {{TRL: Transformer Reinforcement Learning}},
54
- author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
55
  year = 2020,
56
  journal = {GitHub repository},
57
  publisher = {GitHub},
 
29
 
30
  ## Training procedure
31
 
32
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/bizisaida04-fisher-college/huggingface/runs/smkafpu0)
33
 
34
 
35
  This model was trained with SFT.
36
 
37
  ### Framework versions
38
 
39
+ - TRL: 0.17.0
40
+ - Transformers: 4.51.3
41
+ - Pytorch: 2.6.0+cu124
42
+ - Datasets: 3.5.1
43
+ - Tokenizers: 0.21.1
44
 
45
  ## Citations
46
 
 
51
  ```bibtex
52
  @misc{vonwerra2022trl,
53
  title = {{TRL: Transformer Reinforcement Learning}},
54
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
55
  year = 2020,
56
  journal = {GitHub repository},
57
  publisher = {GitHub},
config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_name_or_path": "HuggingFaceTB/SmolLM2-135M",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -27,7 +26,7 @@
27
  "rope_theta": 100000,
28
  "tie_word_embeddings": true,
29
  "torch_dtype": "float32",
30
- "transformers_version": "4.48.3",
31
  "use_cache": true,
32
  "vocab_size": 49152
33
  }
 
1
  {
 
2
  "architectures": [
3
  "LlamaForCausalLM"
4
  ],
 
26
  "rope_theta": 100000,
27
  "tie_word_embeddings": true,
28
  "torch_dtype": "float32",
29
+ "transformers_version": "4.51.3",
30
  "use_cache": true,
31
  "vocab_size": 49152
32
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
- "transformers_version": "4.48.3"
7
  }
 
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.51.3"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c40220acb710b334bb2bade89e9fe6bdcbd7a1e99fad1e5a7637e6487b4f0263
3
  size 538090408
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f2f5309442bd65386c8c1549b58fd9193fd2102a777b72eb9ad972d9694e81
3
  size 538090408
runs/Apr29_22-20-05_0e5f27e20233/events.out.tfevents.1745965211.0e5f27e20233.608.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78424f3684447705bab8bb5b5a8aa97697fac8862ea12df4d04284250c0bf80d
3
+ size 22336
special_tokens_map.json CHANGED
@@ -1,23 +1,37 @@
1
  {
2
  "additional_special_tokens": [
3
- {
4
- "content": "<|im_start|>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- {
11
- "content": "<|im_end|>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- }
 
 
 
17
  ],
18
- "bos_token": "<|im_start|>",
19
- "eos_token": "<|im_end|>",
20
- "pad_token": "<|im_end|>",
 
 
 
 
 
 
 
 
 
 
 
21
  "unk_token": {
22
  "content": "<|endoftext|>",
23
  "lstrip": false,
 
1
  {
2
  "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|im_start|>",
5
+ "<|im_end|>",
6
+ "<repo_name>",
7
+ "<reponame>",
8
+ "<file_sep>",
9
+ "<filename>",
10
+ "<gh_stars>",
11
+ "<issue_start>",
12
+ "<issue_comment>",
13
+ "<issue_closed>",
14
+ "<jupyter_start>",
15
+ "<jupyter_text>",
16
+ "<jupyter_code>",
17
+ "<jupyter_output>",
18
+ "<jupyter_script>",
19
+ "<empty_output>"
20
  ],
21
+ "bos_token": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "eos_token": {
29
+ "content": "<|endoftext|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
  "unk_token": {
36
  "content": "<|endoftext|>",
37
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -139,16 +139,29 @@
139
  }
140
  },
141
  "additional_special_tokens": [
 
142
  "<|im_start|>",
143
- "<|im_end|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  ],
145
- "bos_token": "<|im_start|>",
146
- "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
- "eos_token": "<|im_end|>",
149
  "extra_special_tokens": {},
150
  "model_max_length": 8192,
151
- "pad_token": "<|im_end|>",
152
  "tokenizer_class": "GPT2Tokenizer",
153
  "unk_token": "<|endoftext|>",
154
  "vocab_size": 49152
 
139
  }
140
  },
141
  "additional_special_tokens": [
142
+ "<|endoftext|>",
143
  "<|im_start|>",
144
+ "<|im_end|>",
145
+ "<repo_name>",
146
+ "<reponame>",
147
+ "<file_sep>",
148
+ "<filename>",
149
+ "<gh_stars>",
150
+ "<issue_start>",
151
+ "<issue_comment>",
152
+ "<issue_closed>",
153
+ "<jupyter_start>",
154
+ "<jupyter_text>",
155
+ "<jupyter_code>",
156
+ "<jupyter_output>",
157
+ "<jupyter_script>",
158
+ "<empty_output>"
159
  ],
160
+ "bos_token": "<|endoftext|>",
 
161
  "clean_up_tokenization_spaces": false,
162
+ "eos_token": "<|endoftext|>",
163
  "extra_special_tokens": {},
164
  "model_max_length": 8192,
 
165
  "tokenizer_class": "GPT2Tokenizer",
166
  "unk_token": "<|endoftext|>",
167
  "vocab_size": 49152
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f68d1d4171cae1c51c5b07d7a43f60f1e391e85afde25806ae18fc3159392cf
3
- size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7987155c04ccd877ada591a422e9c3d8157bb85963cd1eb7da8e47d3a075cd50
3
+ size 5688