ssgplabon commited on
Commit
a8bc012
·
verified ·
1 Parent(s): 65e6aac

RLHF model of StarCoder

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: bigcode/tiny_starcoder_py
3
+ library_name: transformers
4
+ model_name: tinystarcoder-rlhf-model
5
+ tags:
6
+ - generated_from_trainer
7
+ - reward-trainer
8
+ - trl
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for tinystarcoder-rlhf-model
13
+
14
+ This model is a fine-tuned version of [bigcode/tiny_starcoder_py](https://huggingface.co/bigcode/tiny_starcoder_py).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ text = "The capital of France is Paris."
23
+ rewarder = pipeline(model="ssgplabon/tinystarcoder-rlhf-model", device="cuda")
24
+ output = rewarder(text)[0]
25
+ print(output["score"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+
31
+
32
+
33
+ This model was trained with Reward.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.27.2
38
+ - Transformers: 5.0.0
39
+ - Pytorch: 2.9.0+cu126
40
+ - Datasets: 4.0.0
41
+ - Tokenizers: 0.22.2
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_pytorch_tanh",
3
+ "add_cross_attention": false,
4
+ "architectures": [
5
+ "GPTBigCodeForCausalLM"
6
+ ],
7
+ "attention_softmax_in_fp32": true,
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 0,
10
+ "dtype": "float32",
11
+ "embd_pdrop": 0.1,
12
+ "eos_token_id": 0,
13
+ "inference_runner": 0,
14
+ "initializer_range": 0.02,
15
+ "layer_norm_epsilon": 1e-05,
16
+ "max_batch_size": null,
17
+ "max_sequence_length": null,
18
+ "model_type": "gpt_bigcode",
19
+ "multi_query": true,
20
+ "n_embd": 768,
21
+ "n_head": 12,
22
+ "n_inner": 3072,
23
+ "n_layer": 20,
24
+ "n_positions": 8192,
25
+ "num_key_value_heads": 1,
26
+ "pad_key_length": true,
27
+ "pad_token_id": 49152,
28
+ "pre_allocate_kv_cache": false,
29
+ "resid_pdrop": 0.1,
30
+ "scale_attention_softmax_in_fp32": true,
31
+ "scale_attn_weights": true,
32
+ "summary_activation": null,
33
+ "summary_first_dropout": 0.1,
34
+ "summary_proj_to_labels": true,
35
+ "summary_type": "cls_index",
36
+ "summary_use_proj": true,
37
+ "tie_word_embeddings": true,
38
+ "transformers_version": "5.0.0",
39
+ "use_cache": false,
40
+ "validate_runner_input": true,
41
+ "vocab_size": 49153
42
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": [
5
+ 0
6
+ ],
7
+ "pad_token_id": 49152,
8
+ "transformers_version": "5.0.0"
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8113d37c91a4c332ae7b2b840c44a48b04c8efe2d74b9a52fd1118d8d9266b65
3
+ size 656604376
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|endoftext|>",
5
+ "eos_token": "<|endoftext|>",
6
+ "errors": "replace",
7
+ "extra_special_tokens": [
8
+ "<|endoftext|>",
9
+ "<fim_prefix>",
10
+ "<fim_middle>",
11
+ "<fim_suffix>",
12
+ "<fim_pad>",
13
+ "<filename>",
14
+ "<gh_stars>",
15
+ "<issue_start>",
16
+ "<issue_comment>",
17
+ "<issue_closed>",
18
+ "<jupyter_start>",
19
+ "<jupyter_text>",
20
+ "<jupyter_code>",
21
+ "<jupyter_output>",
22
+ "<empty_output>",
23
+ "<commit_before>",
24
+ "<commit_msg>",
25
+ "<commit_after>",
26
+ "<reponame>"
27
+ ],
28
+ "is_local": false,
29
+ "model_max_length": 1000000000000000019884624838656,
30
+ "pad_token": "[PAD]",
31
+ "tokenizer_class": "GPT2Tokenizer",
32
+ "unk_token": "<|endoftext|>",
33
+ "vocab_size": 49152
34
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c604c5f4a28ab90ff4542c0234fe0927302ff071ab2de72e61bc0d5380e62a2
3
+ size 5393