rayymaxx commited on
Commit
9ddb944
·
verified ·
1 Parent(s): 886cc19

Training in progress, epoch 1

Browse files
README.md CHANGED
@@ -5,8 +5,8 @@ model_name: DirectEd-AI-LoRA
5
  tags:
6
  - generated_from_trainer
7
  - unsloth
8
- - sft
9
  - trl
 
10
  licence: license
11
  ---
12
 
@@ -28,7 +28,7 @@ print(output["generated_text"])
28
 
29
  ## Training procedure
30
 
31
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/rayymondodhiambo-direct-ed/huggingface/runs/nw48kwk6)
32
 
33
 
34
  This model was trained with SFT.
 
5
  tags:
6
  - generated_from_trainer
7
  - unsloth
 
8
  - trl
9
+ - sft
10
  licence: license
11
  ---
12
 
 
28
 
29
  ## Training procedure
30
 
31
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/rayymondodhiambo-direct-ed/huggingface/runs/dsz5n99x)
32
 
33
 
34
  This model was trained with SFT.
adapter_config.json CHANGED
@@ -15,23 +15,23 @@
15
  "loftq_config": {},
16
  "lora_alpha": 16,
17
  "lora_bias": false,
18
- "lora_dropout": 0.1,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
  "qalora_group_size": 16,
24
- "r": 16,
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
 
 
28
  "gate_proj",
29
  "up_proj",
30
- "o_proj",
31
- "q_proj",
32
  "v_proj",
33
- "down_proj",
34
- "k_proj"
35
  ],
36
  "target_parameters": null,
37
  "task_type": "CAUSAL_LM",
 
15
  "loftq_config": {},
16
  "lora_alpha": 16,
17
  "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
  "megatron_config": null,
20
  "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
  "qalora_group_size": 16,
24
+ "r": 8,
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "k_proj",
29
+ "q_proj",
30
  "gate_proj",
31
  "up_proj",
 
 
32
  "v_proj",
33
+ "o_proj",
34
+ "down_proj"
35
  ],
36
  "target_parameters": null,
37
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:decdeef352fbb944cf9c0c89b258f475a8f3732412ae0339320a247d5b3b219a
3
- size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fce204dbde8c95ecb379a6057e4defa0af52f05622533787da96112cd94f1fd8
3
+ size 83945296
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0eda646b7c5f41999cd0085002feca3955366d360842a8d838f298d30e477af
3
  size 6161
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6b37da950f207ec10e24ffde6e97ff9e575c996e76fd531b2eb294ac69919b0
3
  size 6161