weathon commited on
Commit
f5b2ac7
·
verified ·
1 Parent(s): 2f5592e

Training in progress, step 50

Browse files
README.md CHANGED
@@ -4,8 +4,8 @@ library_name: transformers
4
  model_name: number
5
  tags:
6
  - generated_from_trainer
7
- - trl
8
  - sft
 
9
  licence: license
10
  ---
11
 
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/3dsmile/huggingface/runs/x4b870bi)
31
 
32
 
33
  This model was trained with SFT.
 
4
  model_name: number
5
  tags:
6
  - generated_from_trainer
 
7
  - sft
8
+ - trl
9
  licence: license
10
  ---
11
 
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/3dsmile/huggingface/runs/0b6v7xuv)
31
 
32
 
33
  This model was trained with SFT.
adapter_config.json CHANGED
@@ -16,7 +16,7 @@
16
  "layers_pattern": null,
17
  "layers_to_transform": null,
18
  "loftq_config": {},
19
- "lora_alpha": 4,
20
  "lora_bias": false,
21
  "lora_dropout": 0.05,
22
  "megatron_config": null,
@@ -25,14 +25,13 @@
25
  "peft_type": "LORA",
26
  "peft_version": "0.18.1",
27
  "qalora_group_size": 16,
28
- "r": 4,
29
  "rank_pattern": {},
30
  "revision": null,
31
  "target_modules": [
32
- "down_proj",
33
  "v_proj",
34
  "q_proj",
35
- "up_proj"
36
  ],
37
  "target_parameters": null,
38
  "task_type": "CAUSAL_LM",
 
16
  "layers_pattern": null,
17
  "layers_to_transform": null,
18
  "loftq_config": {},
19
+ "lora_alpha": 8,
20
  "lora_bias": false,
21
  "lora_dropout": 0.05,
22
  "megatron_config": null,
 
25
  "peft_type": "LORA",
26
  "peft_version": "0.18.1",
27
  "qalora_group_size": 16,
28
+ "r": 8,
29
  "rank_pattern": {},
30
  "revision": null,
31
  "target_modules": [
 
32
  "v_proj",
33
  "q_proj",
34
+ "o_proj"
35
  ],
36
  "target_parameters": null,
37
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1be92080d3da3c8370d658e70409ce98d9c469dbf75c4beafa409bcab1256e2c
3
- size 5308224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1d8279786f7f78e61d933bfa2807df8248997d6c9c5b5fddf43f9dc2dcc77f9
3
+ size 5071040
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:053c3229cb7e701a375748667df7194ea6ccf5f8f069762e644901452c0b09e1
3
  size 5649
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adbc83b8fc704a6a56308798467ddb8b274b083f9cedee2438d5ee3e87635d01
3
  size 5649