Ishaank18 commited on
Commit
14fc59f
·
verified ·
1 Parent(s): e476df1

Upload SFT model (train=1.5193, eval=0.0000)

Browse files
Files changed (3) hide show
  1. README.md +2 -2
  2. adapter_config.json +5 -5
  3. adapter_model.safetensors +1 -1
README.md CHANGED
@@ -30,9 +30,9 @@ Adapts explanations to different difficulty levels:
30
  | LoRA Rank | 16 |
31
  | LoRA Alpha | 32 |
32
  | Target Modules | q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj |
33
- | Epochs | 1 |
34
  | Learning Rate | 0.0001 |
35
- | Final Train Loss | 1.6684 |
36
  | Final Eval Loss | 0.0000 |
37
 
38
  ## Team
 
30
  | LoRA Rank | 16 |
31
  | LoRA Alpha | 32 |
32
  | Target Modules | q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj |
33
+ | Epochs | 3 |
34
  | Learning Rate | 0.0001 |
35
+ | Final Train Loss | 1.5193 |
36
  | Final Eval Loss | 0.0000 |
37
 
38
  ## Team
adapter_config.json CHANGED
@@ -33,13 +33,13 @@
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
36
- "k_proj",
37
  "q_proj",
38
- "v_proj",
39
- "down_proj",
40
- "o_proj",
41
  "gate_proj",
42
- "up_proj"
 
 
 
43
  ],
44
  "target_parameters": null,
45
  "task_type": "CAUSAL_LM",
 
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
 
36
  "q_proj",
37
+ "up_proj",
 
 
38
  "gate_proj",
39
+ "k_proj",
40
+ "o_proj",
41
+ "v_proj",
42
+ "down_proj"
43
  ],
44
  "target_parameters": null,
45
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d53beeb58d4d828a298374fcbe160f81d3640ead756919037356893c1b0c3021
3
  size 73911112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:092471a3668cc903a059b0998db1ba2f0849017775c1b937192c40996b0d5d60
3
  size 73911112