Baon2024 commited on
Commit
bafc364
·
verified ·
1 Parent(s): 7502aff

Baon2024/SmolLM-360M-Instruct-chris-data-0.1

Browse files
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: HuggingFaceTB/SmolLM2-360M-Instruct
3
+ library_name: transformers
4
+ model_name: trainer_output
5
+ tags:
6
+ - generated_from_trainer
7
+ - trackio
8
+ - sft
9
+ - trl
10
+ - trackio:https://huggingface.co/spaces/Baon2024/trackio
11
+ - hf_jobs
12
+ licence: license
13
+ ---
14
+
15
+ # Model Card for trainer_output
16
+
17
+ This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-360M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-360M-Instruct).
18
+ It has been trained using [TRL](https://github.com/huggingface/trl).
19
+
20
+ ## Quick start
21
+
22
+ ```python
23
+ from transformers import pipeline
24
+
25
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
26
+ generator = pipeline("text-generation", model="Baon2024/trainer_output", device="cuda")
27
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
28
+ print(output["generated_text"])
29
+ ```
30
+
31
+ ## Training procedure
32
+
33
+
34
+
35
+
36
+ This model was trained with SFT.
37
+
38
+ ### Framework versions
39
+
40
+ - TRL: 0.28.0
41
+ - Transformers: 5.1.0
42
+ - Pytorch: 2.10.0
43
+ - Datasets: 4.5.0
44
+ - Tokenizers: 0.22.2
45
+
46
+ ## Citations
47
+
48
+
49
+
50
+ Cite TRL as:
51
+
52
+ ```bibtex
53
+ @software{vonwerra2020trl,
54
+ title = {{TRL: Transformers Reinforcement Learning}},
55
+ author = {von Werra, Leandro and Belkada, Younes and Tunstall, Lewis and Beeching, Edward and Thrush, Tristan and Lambert, Nathan and Huang, Shengyi and Rasul, Kashif and Gallouédec, Quentin},
56
+ license = {Apache-2.0},
57
+ url = {https://github.com/huggingface/trl},
58
+ year = {2020}
59
+ }
60
+ ```
chat_template.jinja ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
2
+ You are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>
3
+ ' }}{% endif %}{{'<|im_start|>' + message['role'] + '
4
+ ' + message['content'] + '<|im_end|>' + '
5
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
6
+ ' }}{% endif %}
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "dtype": "float16",
9
+ "eos_token_id": 2,
10
+ "head_dim": 64,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 960,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 2560,
15
+ "is_llama_config": true,
16
+ "max_position_embeddings": 8192,
17
+ "mlp_bias": false,
18
+ "model_type": "llama",
19
+ "num_attention_heads": 15,
20
+ "num_hidden_layers": 32,
21
+ "num_key_value_heads": 5,
22
+ "pad_token_id": 2,
23
+ "pretraining_tp": 1,
24
+ "rms_norm_eps": 1e-05,
25
+ "rope_interleaved": false,
26
+ "rope_parameters": {
27
+ "rope_theta": 100000,
28
+ "rope_type": "default"
29
+ },
30
+ "tie_word_embeddings": true,
31
+ "transformers.js_config": {
32
+ "kv_cache_dtype": {
33
+ "fp16": "float16",
34
+ "q4f16": "float16"
35
+ }
36
+ },
37
+ "transformers_version": "5.1.0",
38
+ "use_cache": false,
39
+ "vocab_size": 49152
40
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": [
5
+ 2
6
+ ],
7
+ "pad_token_id": 2,
8
+ "transformers_version": "5.1.0"
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed172e28bef9395097d990c05bb8552667799222410b89c60e516236ef741a61
3
+ size 723674624
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|im_start|>",
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "extra_special_tokens": [
8
+ "<|im_start|>",
9
+ "<|im_end|>"
10
+ ],
11
+ "is_local": false,
12
+ "model_max_length": 8192,
13
+ "pad_token": "<|im_end|>",
14
+ "tokenizer_class": "TokenizersBackend",
15
+ "unk_token": "<|endoftext|>",
16
+ "vocab_size": 49152
17
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f53de7a393d914653d9a4350032a1eed6198c39fa79c278047558b125f22bd06
3
+ size 5649