jxta commited on
Commit
24d2940
·
verified ·
1 Parent(s): 3f5e8f1

Upload LoRA adapter (README written by author)

Browse files
Files changed (3) hide show
  1. README.md +2 -2
  2. adapter_config.json +4 -4
  3. adapter_model.safetensors +1 -1
README.md CHANGED
@@ -34,8 +34,8 @@ while intermediate reasoning (Chain-of-Thought) is masked.
34
  - Base model: Qwen/Qwen3-4B-Instruct-2507
35
  - Method: QLoRA (4-bit)
36
  - Max sequence length: 512
37
- - Epochs: 1
38
- - Learning rate: 1e-06
39
  - LoRA: r=64, alpha=128
40
 
41
  ## Usage
 
34
  - Base model: Qwen/Qwen3-4B-Instruct-2507
35
  - Method: QLoRA (4-bit)
36
  - Max sequence length: 512
37
+ - Epochs: 2
38
+ - Learning rate: 2e-06
39
  - LoRA: r=64, alpha=128
40
 
41
  ## Usage
adapter_config.json CHANGED
@@ -33,13 +33,13 @@
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
36
- "up_proj",
37
  "gate_proj",
38
- "o_proj",
 
39
  "k_proj",
40
  "v_proj",
41
- "q_proj",
42
- "down_proj"
43
  ],
44
  "target_parameters": null,
45
  "task_type": "CAUSAL_LM",
 
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
36
+ "q_proj",
37
  "gate_proj",
38
+ "down_proj",
39
+ "up_proj",
40
  "k_proj",
41
  "v_proj",
42
+ "o_proj"
 
43
  ],
44
  "target_parameters": null,
45
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:020526ab299d86fe2e89f4baa14862ee2d122d238a47df3afa5815fabeef28c9
3
  size 528550256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc96194f2a7ad45ea7f92ce3b147d4247a296a22bf6cc10e01b9b45404d9c995
3
  size 528550256