tmhdnishimura commited on
Commit
b4f27a6
·
verified ·
1 Parent(s): 6f2eb72

Upload LoRA adapter (README written by author)

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. adapter_config.json +2 -2
  3. adapter_model.safetensors +1 -1
README.md CHANGED
@@ -35,7 +35,7 @@ while intermediate reasoning (Chain-of-Thought) is masked.
35
  - Method: QLoRA (4-bit)
36
  - Max sequence length: 512
37
  - Epochs: 1
38
- - Learning rate: 1e-05
39
  - LoRA: r=64, alpha=128
40
 
41
  ## Usage
 
35
  - Method: QLoRA (4-bit)
36
  - Max sequence length: 512
37
  - Epochs: 1
38
+ - Learning rate: 1e-04
39
  - LoRA: r=64, alpha=128
40
 
41
  ## Usage
adapter_config.json CHANGED
@@ -35,10 +35,10 @@
35
  "target_modules": [
36
  "up_proj",
37
  "q_proj",
38
- "gate_proj",
39
  "v_proj",
40
- "o_proj",
41
  "k_proj",
 
 
42
  "down_proj"
43
  ],
44
  "target_parameters": null,
 
35
  "target_modules": [
36
  "up_proj",
37
  "q_proj",
 
38
  "v_proj",
 
39
  "k_proj",
40
+ "o_proj",
41
+ "gate_proj",
42
  "down_proj"
43
  ],
44
  "target_parameters": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:632feb1a59bcedd57367cb92151a276624dc8a4605d0741b6275562eb92b36b1
3
  size 528550256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dc2bc8e8e4ae9717b8b470a855ec099f39223695e80f2be3ab750210322e8a0
3
  size 528550256