naru0411 commited on
Commit
1737abc
·
verified ·
1 Parent(s): 1f2c4a5

Upload merged Qwen3-4B-Instruct-2507 model (auto-generated README)

Browse files
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- base_model: Qwen/Qwen3-4B-Instruct-2507
3
  datasets:
4
  - alfworld_v5_filtered.jsonl
5
  language:
@@ -15,10 +15,10 @@ tags:
15
  - dbbench
16
  ---
17
 
18
- # qwen3-4b-agent-trajectory-lora
19
 
20
  This repository provides a **LoRA adapter** fine-tuned from
21
- **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**.
22
 
23
  This repository contains **LoRA adapter weights only**.
24
  The base model must be loaded separately.
@@ -41,12 +41,12 @@ To improve the reasoning efficiency and reduce the risk of infinite loops (repet
41
 
42
  ## Training Configuration
43
 
44
- - Base model: Qwen/Qwen3-4B-Instruct-2507
45
  - Method: LoRA (full precision base)
46
  - Max sequence length: 2048
47
  - Epochs: 2
48
  - Learning rate: 2e-06
49
- - LoRA: r=128, alpha=256
50
 
51
  ## Usage
52
 
@@ -55,7 +55,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
55
  from peft import PeftModel
56
  import torch
57
 
58
- base = "Qwen/Qwen3-4B-Instruct-2507"
59
  adapter = "your_id/your-repo"
60
 
61
  tokenizer = AutoTokenizer.from_pretrained(base)
 
1
  ---
2
+ base_model: Qwen/Qwen2.5-7B-Instruct
3
  datasets:
4
  - alfworld_v5_filtered.jsonl
5
  language:
 
15
  - dbbench
16
  ---
17
 
18
+ # qwen2.5-7b-agent-trajectory-lora
19
 
20
  This repository provides a **LoRA adapter** fine-tuned from
21
+ **Qwen/Qwen2.5-7B-Instruct** using **LoRA + Unsloth**.
22
 
23
  This repository contains **LoRA adapter weights only**.
24
  The base model must be loaded separately.
 
41
 
42
  ## Training Configuration
43
 
44
+ - Base model: Qwen/Qwen2.5-7B-Instruct
45
  - Method: LoRA (full precision base)
46
  - Max sequence length: 2048
47
  - Epochs: 2
48
  - Learning rate: 2e-06
49
+ - LoRA: r=64, alpha=128
50
 
51
  ## Usage
52
 
 
55
  from peft import PeftModel
56
  import torch
57
 
58
+ base = "Qwen/Qwen2.5-7B-Instruct"
59
  adapter = "your_id/your-repo"
60
 
61
  tokenizer = AutoTokenizer.from_pretrained(base)
added_tokens.json CHANGED
@@ -1,10 +1,6 @@
1
  {
2
- "</think>": 151668,
3
  "</tool_call>": 151658,
4
- "</tool_response>": 151666,
5
- "<think>": 151667,
6
  "<tool_call>": 151657,
7
- "<tool_response>": 151665,
8
  "<|box_end|>": 151649,
9
  "<|box_start|>": 151648,
10
  "<|endoftext|>": 151643,
 
1
  {
 
2
  "</tool_call>": 151658,
 
 
3
  "<tool_call>": 151657,
 
4
  "<|box_end|>": 151649,
5
  "<|box_start|>": 151648,
6
  "<|endoftext|>": 151643,
chat_template.jinja CHANGED
@@ -1,55 +1,48 @@
1
  {%- if tools %}
2
  {{- '<|im_start|>system\n' }}
3
- {%- if messages[0].role == 'system' %}
4
- {{- messages[0].content + '\n\n' }}
 
 
5
  {%- endif %}
6
- {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
  {%- for tool in tools %}
8
  {{- "\n" }}
9
  {{- tool | tojson }}
10
  {%- endfor %}
11
  {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
  {%- else %}
13
- {%- if messages[0].role == 'system' %}
14
- {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
 
 
15
  {%- endif %}
16
  {%- endif %}
17
  {%- for message in messages %}
18
- {%- if message.content is string %}
19
- {%- set content = message.content %}
20
- {%- else %}
21
- {%- set content = '' %}
22
- {%- endif %}
23
- {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
24
- {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
25
  {%- elif message.role == "assistant" %}
26
- {{- '<|im_start|>' + message.role + '\n' + content }}
27
- {%- if message.tool_calls %}
28
- {%- for tool_call in message.tool_calls %}
29
- {%- if (loop.first and content) or (not loop.first) %}
30
- {{- '\n' }}
31
- {%- endif %}
32
- {%- if tool_call.function %}
33
- {%- set tool_call = tool_call.function %}
34
- {%- endif %}
35
- {{- '<tool_call>\n{"name": "' }}
36
- {{- tool_call.name }}
37
- {{- '", "arguments": ' }}
38
- {%- if tool_call.arguments is string %}
39
- {{- tool_call.arguments }}
40
- {%- else %}
41
- {{- tool_call.arguments | tojson }}
42
- {%- endif %}
43
- {{- '}\n</tool_call>' }}
44
- {%- endfor %}
45
  {%- endif %}
 
 
 
 
 
 
 
 
 
 
46
  {{- '<|im_end|>\n' }}
47
  {%- elif message.role == "tool" %}
48
- {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
49
  {{- '<|im_start|>user' }}
50
  {%- endif %}
51
  {{- '\n<tool_response>\n' }}
52
- {{- content }}
53
  {{- '\n</tool_response>' }}
54
  {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
55
  {{- '<|im_end|>\n' }}
@@ -58,4 +51,4 @@
58
  {%- endfor %}
59
  {%- if add_generation_prompt %}
60
  {{- '<|im_start|>assistant\n' }}
61
- {%- endif %}
 
1
  {%- if tools %}
2
  {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
7
  {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
  {%- for tool in tools %}
10
  {{- "\n" }}
11
  {{- tool | tojson }}
12
  {%- endfor %}
13
  {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
  {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
19
  {%- endif %}
20
  {%- endif %}
21
  {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
 
 
 
 
 
24
  {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
  {{- '<|im_end|>\n' }}
40
  {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
  {{- '<|im_start|>user' }}
43
  {%- endif %}
44
  {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
  {{- '\n</tool_response>' }}
47
  {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
  {{- '<|im_end|>\n' }}
 
51
  {%- endfor %}
52
  {%- if add_generation_prompt %}
53
  {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
config.json CHANGED
@@ -1,17 +1,15 @@
1
  {
2
  "architectures": [
3
- "Qwen3ForCausalLM"
4
  ],
5
- "attention_bias": false,
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 151643,
8
  "dtype": "bfloat16",
9
  "eos_token_id": 151645,
10
- "head_dim": 128,
11
  "hidden_act": "silu",
12
- "hidden_size": 2560,
13
  "initializer_range": 0.02,
14
- "intermediate_size": 9728,
15
  "layer_types": [
16
  "full_attention",
17
  "full_attention",
@@ -40,31 +38,35 @@
40
  "full_attention",
41
  "full_attention",
42
  "full_attention",
43
- "full_attention",
44
- "full_attention",
45
- "full_attention",
46
- "full_attention",
47
- "full_attention",
48
- "full_attention",
49
- "full_attention",
50
- "full_attention",
51
  "full_attention"
52
  ],
53
- "max_position_embeddings": 262144,
54
- "max_window_layers": 36,
55
- "model_type": "qwen3",
56
- "num_attention_heads": 32,
57
- "num_hidden_layers": 36,
58
- "num_key_value_heads": 8,
59
  "pad_token_id": 151643,
 
 
 
 
 
 
 
 
 
 
 
 
60
  "rms_norm_eps": 1e-06,
61
  "rope_scaling": null,
62
- "rope_theta": 5000000,
63
  "sliding_window": null,
64
- "tie_word_embeddings": true,
65
  "transformers_version": "4.56.2",
66
  "unsloth_version": "2025.12.7",
67
  "use_cache": true,
68
  "use_sliding_window": false,
69
- "vocab_size": 151936
70
  }
 
1
  {
2
  "architectures": [
3
+ "Qwen2ForCausalLM"
4
  ],
 
5
  "attention_dropout": 0.0,
6
  "bos_token_id": 151643,
7
  "dtype": "bfloat16",
8
  "eos_token_id": 151645,
 
9
  "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
  "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
  "layer_types": [
14
  "full_attention",
15
  "full_attention",
 
38
  "full_attention",
39
  "full_attention",
40
  "full_attention",
 
 
 
 
 
 
 
 
41
  "full_attention"
42
  ],
43
+ "max_position_embeddings": 32768,
44
+ "max_window_layers": 28,
45
+ "model_type": "qwen2",
46
+ "num_attention_heads": 28,
47
+ "num_hidden_layers": 28,
48
+ "num_key_value_heads": 4,
49
  "pad_token_id": 151643,
50
+ "quantization_config": {
51
+ "bnb_4bit_compute_dtype": "bfloat16",
52
+ "bnb_4bit_quant_type": "nf4",
53
+ "bnb_4bit_use_double_quant": true,
54
+ "llm_int8_enable_fp32_cpu_offload": false,
55
+ "llm_int8_has_fp16_weight": false,
56
+ "llm_int8_skip_modules": null,
57
+ "llm_int8_threshold": 6.0,
58
+ "load_in_4bit": true,
59
+ "load_in_8bit": false,
60
+ "quant_method": "bitsandbytes"
61
+ },
62
  "rms_norm_eps": 1e-06,
63
  "rope_scaling": null,
64
+ "rope_theta": 1000000.0,
65
  "sliding_window": null,
66
+ "tie_word_embeddings": false,
67
  "transformers_version": "4.56.2",
68
  "unsloth_version": "2025.12.7",
69
  "use_cache": true,
70
  "use_sliding_window": false,
71
+ "vocab_size": 152064
72
  }
generation_config.json CHANGED
@@ -5,8 +5,9 @@
5
  151645,
6
  151643
7
  ],
8
- "max_length": 262144,
9
  "pad_token_id": 151643,
 
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
 
5
  151645,
6
  151643
7
  ],
8
+ "max_length": 32768,
9
  "pad_token_id": 151643,
10
+ "repetition_penalty": 1.05,
11
  "temperature": 0.7,
12
  "top_k": 20,
13
  "top_p": 0.8,
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cefd4badaab8bcee450265fe24d75db0aeeed662f4a376ba9aa537a1fdde6c8d
3
- size 4967215360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a1f28277ef98e7ea657e3eb1ffc98406f803377acc3da66c202f6d4eb1c07d3
3
+ size 4457259586
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8efe05035e4bf3bf6094ab92b81bfa6e29d7b8ba936fcffcd0b7b3e74a6fc8d9
3
- size 3077766632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06006972c3be88e8a44fe21cfe2b0472b130780c781a741f8f90f1fe5ba3aae2
3
+ size 1089994880
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
- size 11422654
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json CHANGED
@@ -177,38 +177,6 @@
177
  "rstrip": false,
178
  "single_word": false,
179
  "special": false
180
- },
181
- "151665": {
182
- "content": "<tool_response>",
183
- "lstrip": false,
184
- "normalized": false,
185
- "rstrip": false,
186
- "single_word": false,
187
- "special": false
188
- },
189
- "151666": {
190
- "content": "</tool_response>",
191
- "lstrip": false,
192
- "normalized": false,
193
- "rstrip": false,
194
- "single_word": false,
195
- "special": false
196
- },
197
- "151667": {
198
- "content": "<think>",
199
- "lstrip": false,
200
- "normalized": false,
201
- "rstrip": false,
202
- "single_word": false,
203
- "special": false
204
- },
205
- "151668": {
206
- "content": "</think>",
207
- "lstrip": false,
208
- "normalized": false,
209
- "rstrip": false,
210
- "single_word": false,
211
- "special": false
212
  }
213
  },
214
  "additional_special_tokens": [
@@ -231,7 +199,7 @@
231
  "eos_token": "<|im_end|>",
232
  "errors": "replace",
233
  "extra_special_tokens": {},
234
- "model_max_length": 262144,
235
  "pad_token": "<|endoftext|>",
236
  "padding_side": "right",
237
  "split_special_tokens": false,
 
177
  "rstrip": false,
178
  "single_word": false,
179
  "special": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  }
181
  },
182
  "additional_special_tokens": [
 
199
  "eos_token": "<|im_end|>",
200
  "errors": "replace",
201
  "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
  "pad_token": "<|endoftext|>",
204
  "padding_side": "right",
205
  "split_special_tokens": false,