ikerm11 commited on
Commit
71daa84
·
verified ·
1 Parent(s): 766a4fe

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,3 +1,43 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - trl
5
+ - ppo
6
+ - transformers
7
+ - reinforcement-learning
8
+ ---
9
+
10
+ # TRL Model
11
+
12
+ This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to
13
+ guide the model outputs according to a value, function, or human feedback. The model can be used for text generation.
14
+
15
+ ## Usage
16
+
17
+ To use this model for inference, first install the TRL library:
18
+
19
+ ```bash
20
+ python -m pip install trl
21
+ ```
22
+
23
+ You can then generate text as follows:
24
+
25
+ ```python
26
+ from transformers import pipeline
27
+
28
+ generator = pipeline("text-generation", model="ikerm11/gemma1b_humanizer_lora")
29
+ outputs = generator("Hello, my llama is cute")
30
+ ```
31
+
32
+ If you want to use the model for training or to obtain the outputs from the value head, load the model as follows:
33
+
34
+ ```python
35
+ from transformers import AutoTokenizer
36
+ from trl import AutoModelForCausalLMWithValueHead
37
+
38
+ tokenizer = AutoTokenizer.from_pretrained("ikerm11/gemma1b_humanizer_lora")
39
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("ikerm11/gemma1b_humanizer_lora")
40
+
41
+ inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
42
+ outputs = model(**inputs, labels=inputs["input_ids"])
43
+ ```
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-3-1b-it",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb7f816624ef11c1e18feb6184cf6456ae5fb1167c379f294ac662749639cb71
3
+ size 2995512
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accelerator_kwargs": {},
3
+ "adap_kl_ctrl": true,
4
+ "backward_batch_size": 1,
5
+ "batch_size": 2,
6
+ "cliprange": 0.2,
7
+ "cliprange_value": 0.2,
8
+ "compare_steps": 1,
9
+ "early_stopping": false,
10
+ "exp_name": "colab_kernel_launcher",
11
+ "forward_batch_size": null,
12
+ "gamma": 1,
13
+ "global_backward_batch_size": 1,
14
+ "global_batch_size": 2,
15
+ "gradient_accumulation_steps": 1,
16
+ "horizon": 10000,
17
+ "init_kl_coef": 0.1,
18
+ "is_encoder_decoder": false,
19
+ "is_peft_model": true,
20
+ "kl_penalty": "kl",
21
+ "lam": 0.95,
22
+ "learning_rate": 5e-06,
23
+ "log_with": null,
24
+ "max_grad_norm": null,
25
+ "mini_batch_size": 1,
26
+ "model_name": "gpt2",
27
+ "optimize_cuda_cache": null,
28
+ "optimize_device_cache": false,
29
+ "ppo_epochs": 2,
30
+ "project_kwargs": {},
31
+ "push_to_hub_if_best_kwargs": {},
32
+ "query_dataset": "imdb",
33
+ "ratio_threshold": 10.0,
34
+ "remove_unused_columns": false,
35
+ "reward_model": "sentiment-analysis:lvwerra/distilbert-imdb",
36
+ "score_clip": null,
37
+ "seed": 0,
38
+ "steps": 20000,
39
+ "target": 6,
40
+ "target_kl": 0.1,
41
+ "task_name": null,
42
+ "tracker_kwargs": {},
43
+ "tracker_project_name": "trl",
44
+ "use_score_norm": false,
45
+ "use_score_scaling": false,
46
+ "vf_coef": 0.1,
47
+ "whiten_rewards": false,
48
+ "world_size": 1
49
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f82f888037ba0a99e9f3b14188148e2edbb621482e802f37fe8ff617f65caf
3
+ size 6140
special_tokens_map.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": "<eos>",
20
+ "unk_token": {
21
+ "content": "<unk>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ }
27
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff