Training in progress, step 500
Browse files- adapter_config.json +46 -0
- adapter_model.safetensors +3 -0
- config.json +8 -3
- model.safetensors +1 -1
- training_args.bin +1 -1
adapter_config.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alora_invocation_tokens": null,
|
| 3 |
+
"alpha_pattern": {},
|
| 4 |
+
"arrow_config": null,
|
| 5 |
+
"auto_mapping": null,
|
| 6 |
+
"base_model_name_or_path": "Qwen/Qwen3-0.6B",
|
| 7 |
+
"bias": "none",
|
| 8 |
+
"corda_config": null,
|
| 9 |
+
"ensure_weight_tying": false,
|
| 10 |
+
"eva_config": null,
|
| 11 |
+
"exclude_modules": null,
|
| 12 |
+
"fan_in_fan_out": false,
|
| 13 |
+
"inference_mode": true,
|
| 14 |
+
"init_lora_weights": true,
|
| 15 |
+
"layer_replication": null,
|
| 16 |
+
"layers_pattern": null,
|
| 17 |
+
"layers_to_transform": null,
|
| 18 |
+
"loftq_config": {},
|
| 19 |
+
"lora_alpha": 128,
|
| 20 |
+
"lora_bias": false,
|
| 21 |
+
"lora_dropout": 0.05,
|
| 22 |
+
"megatron_config": null,
|
| 23 |
+
"megatron_core": "megatron.core",
|
| 24 |
+
"modules_to_save": null,
|
| 25 |
+
"peft_type": "LORA",
|
| 26 |
+
"peft_version": "0.18.0",
|
| 27 |
+
"qalora_group_size": 16,
|
| 28 |
+
"r": 64,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"revision": null,
|
| 31 |
+
"target_modules": [
|
| 32 |
+
"v_proj",
|
| 33 |
+
"k_proj",
|
| 34 |
+
"up_proj",
|
| 35 |
+
"q_proj",
|
| 36 |
+
"gate_proj",
|
| 37 |
+
"down_proj",
|
| 38 |
+
"o_proj"
|
| 39 |
+
],
|
| 40 |
+
"target_parameters": null,
|
| 41 |
+
"task_type": "CAUSAL_LM",
|
| 42 |
+
"trainable_token_indices": null,
|
| 43 |
+
"use_dora": false,
|
| 44 |
+
"use_qalora": false,
|
| 45 |
+
"use_rslora": false
|
| 46 |
+
}
|
adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:45f81fc01229bdb45b7841fc44354325ba01f23bb0906129f38f5aaa70f27bc6
|
| 3 |
+
size 161533160
|
config.json
CHANGED
|
@@ -161,12 +161,17 @@
|
|
| 161 |
"label_smoothing": 0.0,
|
| 162 |
"length_penalty": 1.0,
|
| 163 |
"llm_dim": 1024,
|
| 164 |
-
"lora_alpha":
|
| 165 |
-
"lora_dropout": 0.
|
| 166 |
"lora_rank": 64,
|
| 167 |
"lora_target_modules": [
|
| 168 |
"q_proj",
|
| 169 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
],
|
| 171 |
"mask_feature_length": 10,
|
| 172 |
"mask_feature_min_masks": 0,
|
|
|
|
| 161 |
"label_smoothing": 0.0,
|
| 162 |
"length_penalty": 1.0,
|
| 163 |
"llm_dim": 1024,
|
| 164 |
+
"lora_alpha": 128,
|
| 165 |
+
"lora_dropout": 0.05,
|
| 166 |
"lora_rank": 64,
|
| 167 |
"lora_target_modules": [
|
| 168 |
"q_proj",
|
| 169 |
+
"k_proj",
|
| 170 |
+
"v_proj",
|
| 171 |
+
"o_proj",
|
| 172 |
+
"gate_proj",
|
| 173 |
+
"up_proj",
|
| 174 |
+
"down_proj"
|
| 175 |
],
|
| 176 |
"mask_feature_length": 10,
|
| 177 |
"mask_feature_min_masks": 0,
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 25172384
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e47c1dad96a0b9e1a85a0a0545ce0fda86d3a8a26bccf2306db9e3de0d48abcc
|
| 3 |
size 25172384
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 5201
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:377c38bfe13b6360191d15c7738a527800ce4460e7b8ec2bd2ed801919793c27
|
| 3 |
size 5201
|