morisue commited on
Commit
bf3a123
·
verified ·
1 Parent(s): a81463e

Upload LoRA adapter (README written by author)

Browse files
Files changed (3) hide show
  1. README.md +4 -5
  2. adapter_config.json +5 -5
  3. adapter_model.safetensors +2 -2
README.md CHANGED
@@ -13,8 +13,7 @@ tags:
13
  - structured-output
14
  ---
15
 
16
- This LoRA adapter enhances structured output accuracy
17
- (JSON, YAML, XML, TOML, CSV) for Qwen3-4B-Instruct.
18
 
19
  This repository provides a **LoRA adapter** fine-tuned from
20
  **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**.
@@ -35,9 +34,9 @@ while intermediate reasoning (Chain-of-Thought) is masked.
35
  - Base model: Qwen/Qwen3-4B-Instruct-2507
36
  - Method: QLoRA (4-bit)
37
  - Max sequence length: 512
38
- - Epochs: 1
39
- - Learning rate: 1e-06
40
- - LoRA: r=64, alpha=128
41
 
42
  ## Usage
43
 
 
13
  - structured-output
14
  ---
15
 
16
+ <【課題】ここは自分で記入して下さい>
 
17
 
18
  This repository provides a **LoRA adapter** fine-tuned from
19
  **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**.
 
34
  - Base model: Qwen/Qwen3-4B-Instruct-2507
35
  - Method: QLoRA (4-bit)
36
  - Max sequence length: 512
37
+ - Epochs: 2
38
+ - Learning rate: 5e-06
39
+ - LoRA: r=128, alpha=256
40
 
41
  ## Usage
42
 
adapter_config.json CHANGED
@@ -20,7 +20,7 @@
20
  "layers_pattern": null,
21
  "layers_to_transform": null,
22
  "loftq_config": {},
23
- "lora_alpha": 128,
24
  "lora_bias": false,
25
  "lora_dropout": 0.0,
26
  "megatron_config": null,
@@ -29,17 +29,17 @@
29
  "peft_type": "LORA",
30
  "peft_version": "0.18.1",
31
  "qalora_group_size": 16,
32
- "r": 64,
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
 
 
36
  "v_proj",
37
  "q_proj",
38
  "k_proj",
39
- "o_proj",
40
- "up_proj",
41
  "gate_proj",
42
- "down_proj"
43
  ],
44
  "target_parameters": null,
45
  "task_type": "CAUSAL_LM",
 
20
  "layers_pattern": null,
21
  "layers_to_transform": null,
22
  "loftq_config": {},
23
+ "lora_alpha": 256,
24
  "lora_bias": false,
25
  "lora_dropout": 0.0,
26
  "megatron_config": null,
 
29
  "peft_type": "LORA",
30
  "peft_version": "0.18.1",
31
  "qalora_group_size": 16,
32
+ "r": 128,
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
36
+ "o_proj",
37
+ "down_proj",
38
  "v_proj",
39
  "q_proj",
40
  "k_proj",
 
 
41
  "gate_proj",
42
+ "up_proj"
43
  ],
44
  "target_parameters": null,
45
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25225223aa24babfb904628e81854c31fc199b078b43a96aaaa3e98f5d6227de
3
- size 528550256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6531b71c6aec7b63753ff9fa4941cf227ad6b267d05063c050b8009446cd1ebf
3
+ size 1057033224