Ba2han commited on
Commit
d6f6c4f
·
verified ·
1 Parent(s): b3d5f93

Training in progress, step 1793

Browse files
README.md CHANGED
@@ -4,9 +4,9 @@ library_name: transformers
4
  model_name: m-lora-3
5
  tags:
6
  - generated_from_trainer
7
- - trl
8
  - unsloth
9
  - sft
 
10
  licence: license
11
  ---
12
 
@@ -28,7 +28,7 @@ print(output["generated_text"])
28
 
29
  ## Training procedure
30
 
31
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/batuhan409/huggingface/runs/btmz7cx0)
32
 
33
 
34
  This model was trained with SFT.
 
4
  model_name: m-lora-3
5
  tags:
6
  - generated_from_trainer
 
7
  - unsloth
8
  - sft
9
+ - trl
10
  licence: license
11
  ---
12
 
 
28
 
29
  ## Training procedure
30
 
31
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/batuhan409/huggingface/runs/4qg2fop8)
32
 
33
 
34
  This model was trained with SFT.
adapter_config.json CHANGED
@@ -22,28 +22,25 @@
22
  "lora_dropout": 0,
23
  "megatron_config": null,
24
  "megatron_core": "megatron.core",
25
- "modules_to_save": [
26
- "embed_tokens",
27
- "lm_head"
28
- ],
29
  "peft_type": "LORA",
30
  "qalora_group_size": 16,
31
- "r": 32,
32
  "rank_pattern": {},
33
  "revision": null,
34
  "target_modules": [
 
 
35
  "q_proj",
36
- "gate_proj",
37
- "o_proj",
38
  "k_proj",
39
- "down_proj",
40
- "up_proj",
41
- "v_proj"
42
  ],
43
  "target_parameters": null,
44
  "task_type": "CAUSAL_LM",
45
  "trainable_token_indices": null,
46
- "use_dora": true,
47
  "use_qalora": false,
48
- "use_rslora": false
49
  }
 
22
  "lora_dropout": 0,
23
  "megatron_config": null,
24
  "megatron_core": "megatron.core",
25
+ "modules_to_save": null,
 
 
 
26
  "peft_type": "LORA",
27
  "qalora_group_size": 16,
28
+ "r": 128,
29
  "rank_pattern": {},
30
  "revision": null,
31
  "target_modules": [
32
+ "up_proj",
33
+ "down_proj",
34
  "q_proj",
 
 
35
  "k_proj",
36
+ "o_proj",
37
+ "v_proj",
38
+ "gate_proj"
39
  ],
40
  "target_parameters": null,
41
  "task_type": "CAUSAL_LM",
42
  "trainable_token_indices": null,
43
+ "use_dora": false,
44
  "use_qalora": false,
45
+ "use_rslora": true
46
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb9178bcb130570f3c56276872aa0cf4b5263ecaeeb698f9be3af1a18b2c211e
3
- size 832034064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d25a547ed4d10b0cc6a3a698f830e443633a68cf58b5bdb730d00501bbdb83b
3
+ size 218134048
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aafdac56c2e106724ac5bea1f4e43a4d8db414f442eface15b2bf7c4461ccf48
3
  size 6225
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e2c0fb7d887bf63501113ad3e27c7a29b06c831e81b3dc8ffceaf0731863714
3
  size 6225