splusminusx commited on
Commit
e793eec
·
verified ·
1 Parent(s): 4e7f599

End of training

Browse files
README.md CHANGED
@@ -6,8 +6,8 @@ tags:
6
  - generated_from_trainer
7
  - smol-course
8
  - module_1
9
- - trl
10
  - sft
 
11
  licence: license
12
  ---
13
 
@@ -29,17 +29,18 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
 
32
 
33
 
34
  This model was trained with SFT.
35
 
36
  ### Framework versions
37
 
38
- - TRL: 0.12.1
39
- - Transformers: 4.46.3
40
- - Pytorch: 2.5.1
41
- - Datasets: 3.1.0
42
- - Tokenizers: 0.20.3
43
 
44
  ## Citations
45
 
@@ -50,7 +51,7 @@ Cite TRL as:
50
  ```bibtex
51
  @misc{vonwerra2022trl,
52
  title = {{TRL: Transformer Reinforcement Learning}},
53
- author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
54
  year = 2020,
55
  journal = {GitHub repository},
56
  publisher = {GitHub},
 
6
  - generated_from_trainer
7
  - smol-course
8
  - module_1
 
9
  - sft
10
+ - trl
11
  licence: license
12
  ---
13
 
 
29
 
30
  ## Training procedure
31
 
32
+
33
 
34
 
35
  This model was trained with SFT.
36
 
37
  ### Framework versions
38
 
39
+ - TRL: 0.23.0
40
+ - Transformers: 4.56.1
41
+ - Pytorch: 2.8.0+cu126
42
+ - Datasets: 4.0.0
43
+ - Tokenizers: 0.22.0
44
 
45
  ## Citations
46
 
 
51
  ```bibtex
52
  @misc{vonwerra2022trl,
53
  title = {{TRL: Transformer Reinforcement Learning}},
54
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
55
  year = 2020,
56
  journal = {GitHub repository},
57
  publisher = {GitHub},
adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "HuggingFaceTB/SmolLM2-135M",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 8,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 6,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "o_proj",
29
+ "down_proj",
30
+ "gate_proj",
31
+ "up_proj",
32
+ "v_proj",
33
+ "q_proj",
34
+ "k_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8af0015e92ea6bb474af910ebaf2b06138bfbab989324e81a5b6f817c029e40a
3
+ size 7380952
chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% for message in messages %}{{'<|im_start|>' + message['role'] + '
2
+ ' + message['content'] + '<|im_end|>' + '
3
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
tokenizer_config.json CHANGED
@@ -143,9 +143,9 @@
143
  "<|im_end|>"
144
  ],
145
  "bos_token": "<|im_start|>",
146
- "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
  "eos_token": "<|im_end|>",
 
149
  "model_max_length": 8192,
150
  "pad_token": "<|im_end|>",
151
  "tokenizer_class": "GPT2Tokenizer",
 
143
  "<|im_end|>"
144
  ],
145
  "bos_token": "<|im_start|>",
 
146
  "clean_up_tokenization_spaces": false,
147
  "eos_token": "<|im_end|>",
148
+ "extra_special_tokens": {},
149
  "model_max_length": 8192,
150
  "pad_token": "<|im_end|>",
151
  "tokenizer_class": "GPT2Tokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b9e0a3754ccf8fbebd0fa964a4c1ba245aa1823218b611600c5585bc5d67f70
3
- size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efaec4a1e3ecc81ac60e0bbf39237b8224b6e2f03c2e843bf85688cf16943f13
3
+ size 6225