Rondall commited on
Commit
bacbc3f
·
verified ·
1 Parent(s): 19fc5c9

Upload LoRA adapter (README written by author)

Browse files
README.md CHANGED
@@ -13,33 +13,40 @@ tags:
13
  - structured-output
14
  ---
15
 
16
- # daichira-structured-hard-sft-4k-output-lora
17
 
18
- This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**.
 
19
 
20
- This repository contains **LoRA adapter weights only**. The base model must be loaded separately.
 
21
 
22
  ## Training Objective
23
- This adapter is trained to improve **structured output accuracy** (JSON / YAML / XML / TOML / CSV).
24
 
25
- To focus specifically on extraction performance, **loss is applied only to the final assistant output**, while intermediate reasoning (Chain-of-Thought) is masked during the training process.
 
 
 
 
26
 
27
  ## Training Configuration
28
- - **Base model**: Qwen/Qwen3-4B-Instruct-2507
29
- - **Method**: QLoRA (4-bit)
30
- - **Max sequence length**: 1024
31
- - **Epochs**: 1
32
- - **Learning rate**: 2e-04
33
- - **LoRA Config**: r=32, alpha=64
 
34
 
35
  ## Usage
 
36
  ```python
37
  from transformers import AutoModelForCausalLM, AutoTokenizer
38
  from peft import PeftModel
39
  import torch
40
 
41
  base = "Qwen/Qwen3-4B-Instruct-2507"
42
- adapter = "your_id/your-repo-name" # Please replace with your actual repository ID
43
 
44
  tokenizer = AutoTokenizer.from_pretrained(base)
45
  model = AutoModelForCausalLM.from_pretrained(
@@ -48,6 +55,7 @@ model = AutoModelForCausalLM.from_pretrained(
48
  device_map="auto",
49
  )
50
  model = PeftModel.from_pretrained(model, adapter)
 
51
 
52
  ## Sources & Terms (IMPORTANT)
53
 
 
13
  - structured-output
14
  ---
15
 
16
+ <L4Cus-Qwen>
17
 
18
+ This repository provides a **LoRA adapter** fine-tuned from
19
+ **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**.
20
 
21
+ This repository contains **LoRA adapter weights only**.
22
+ The base model must be loaded separately.
23
 
24
  ## Training Objective
 
25
 
26
+ This adapter is trained to improve **structured output accuracy**
27
+ (JSON / YAML / XML / TOML / CSV).
28
+
29
+ Loss is applied only to the final assistant output,
30
+ while intermediate reasoning (Chain-of-Thought) is masked.
31
 
32
  ## Training Configuration
33
+
34
+ - Base model: Qwen/Qwen3-4B-Instruct-2507
35
+ - Method: QLoRA (4-bit)
36
+ - Max sequence length: 2048
37
+ - Epochs: 1
38
+ - Learning rate: 5e-05
39
+ - LoRA: r=64, alpha=128
40
 
41
  ## Usage
42
+
43
  ```python
44
  from transformers import AutoModelForCausalLM, AutoTokenizer
45
  from peft import PeftModel
46
  import torch
47
 
48
  base = "Qwen/Qwen3-4B-Instruct-2507"
49
+ adapter = "your_id/your-repo"
50
 
51
  tokenizer = AutoTokenizer.from_pretrained(base)
52
  model = AutoModelForCausalLM.from_pretrained(
 
55
  device_map="auto",
56
  )
57
  model = PeftModel.from_pretrained(model, adapter)
58
+ ```
59
 
60
  ## Sources & Terms (IMPORTANT)
61
 
adapter_config.json CHANGED
@@ -20,25 +20,25 @@
20
  "layers_pattern": null,
21
  "layers_to_transform": null,
22
  "loftq_config": {},
23
- "lora_alpha": 64,
24
  "lora_bias": false,
25
- "lora_dropout": 0.05,
26
  "megatron_config": null,
27
  "megatron_core": "megatron.core",
28
  "modules_to_save": null,
29
  "peft_type": "LORA",
30
  "peft_version": "0.18.1",
31
  "qalora_group_size": 16,
32
- "r": 32,
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
36
- "k_proj",
37
- "q_proj",
38
  "o_proj",
39
- "gate_proj",
40
- "v_proj",
41
  "down_proj",
 
 
 
 
42
  "up_proj"
43
  ],
44
  "target_parameters": null,
 
20
  "layers_pattern": null,
21
  "layers_to_transform": null,
22
  "loftq_config": {},
23
+ "lora_alpha": 128,
24
  "lora_bias": false,
25
+ "lora_dropout": 0.0,
26
  "megatron_config": null,
27
  "megatron_core": "megatron.core",
28
  "modules_to_save": null,
29
  "peft_type": "LORA",
30
  "peft_version": "0.18.1",
31
  "qalora_group_size": 16,
32
+ "r": 64,
33
  "rank_pattern": {},
34
  "revision": null,
35
  "target_modules": [
 
 
36
  "o_proj",
 
 
37
  "down_proj",
38
+ "v_proj",
39
+ "q_proj",
40
+ "k_proj",
41
+ "gate_proj",
42
  "up_proj"
43
  ],
44
  "target_parameters": null,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c3a479c75760028cac38423cdf13326073ce109491f93cb65f9f732daa04b8f
3
- size 264308896
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1523efc2c35df90029e6588153e08fc6ce0be45de7f29cc08a6abe5b31b8eb77
3
+ size 528550256
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|vision_pad|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:476870a1f2fb6f6a2759a6ede2383bf9d5d738f17844563b65c91965b722ae09
3
- size 11422924
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json CHANGED
@@ -1,11 +1,236 @@
1
  {
 
2
  "add_prefix_space": false,
3
- "backend": "tokenizers",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  "bos_token": null,
5
  "clean_up_tokenization_spaces": false,
6
  "eos_token": "<|im_end|>",
7
  "errors": "replace",
8
- "is_local": false,
9
  "model_max_length": 262144,
10
  "pad_token": "<|vision_pad|>",
11
  "padding_side": "right",
 
1
  {
2
+ "add_bos_token": false,
3
  "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
  "bos_token": null,
230
  "clean_up_tokenization_spaces": false,
231
  "eos_token": "<|im_end|>",
232
  "errors": "replace",
233
+ "extra_special_tokens": {},
234
  "model_max_length": 262144,
235
  "pad_token": "<|vision_pad|>",
236
  "padding_side": "right",
vocab.json ADDED
The diff for this file is too large to render. See raw diff