upload lora
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_192/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_192/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_256/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_256/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_320/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_320/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_384/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_384/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_448/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_448/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_512/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_512/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_64/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_64/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_192/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_192/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_256/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_256/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_320/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_320/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_384/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_384/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_448/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_448/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_512/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_512/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_64/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_64/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_192/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_192/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_256/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_256/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_320/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_320/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_384/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_384/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_448/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_448/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_512/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_512/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_64/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_64/actor/lora_adapter/adapter_model.safetensors +3 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank8/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
- qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank8/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_128/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 16,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"up_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"o_proj",
|
| 15 |
+
"gate_proj",
|
| 16 |
+
"k_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 32,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_128/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f822a3c589425866b8e43570095c11e50db20fcba48e4a5a0e7a235b7c40527
|
| 3 |
+
size 119801496
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_192/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 16,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"up_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"o_proj",
|
| 15 |
+
"gate_proj",
|
| 16 |
+
"k_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 32,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_192/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f552989eece9f876e5767a916e82da6f6d8037a2cfe1276ea0c5b2d59a6b4977
|
| 3 |
+
size 119801496
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_256/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 16,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"up_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"o_proj",
|
| 15 |
+
"gate_proj",
|
| 16 |
+
"k_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 32,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_256/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:209855544cc22542ddf222d1ca8f421bf107ceaca61e678191a99b65d5fff3cf
|
| 3 |
+
size 119801496
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_320/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 16,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"up_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"o_proj",
|
| 15 |
+
"gate_proj",
|
| 16 |
+
"k_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 32,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_320/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e5953d8c176e4bbf04085616f43e7ee8872553e393d7e6281779c2b7cf6fbca
|
| 3 |
+
size 119801496
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_384/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 16,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"up_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"o_proj",
|
| 15 |
+
"gate_proj",
|
| 16 |
+
"k_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 32,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_384/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb67622d2db0eda89f89c15d5107d6f79b83697c5714e2866ca481986e029ecf
|
| 3 |
+
size 119801496
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_448/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 16,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"up_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"o_proj",
|
| 15 |
+
"gate_proj",
|
| 16 |
+
"k_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 32,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_448/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b58e383eabfdf47630c4e95c903b9929efe156acebc80b668b5bf296040cad9
|
| 3 |
+
size 119801496
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_512/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 16,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"up_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"o_proj",
|
| 15 |
+
"gate_proj",
|
| 16 |
+
"k_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 32,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_512/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00bec09157f2ee7dbce109867a22419be66fd7a570c9ae7df6349338632173e7
|
| 3 |
+
size 119801496
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_64/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 16,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"up_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"o_proj",
|
| 15 |
+
"gate_proj",
|
| 16 |
+
"k_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 32,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank16/global_step_64/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ecbe1f57a24c8880112a4f30c52682930041296406739a8ef3875b673dce9320
|
| 3 |
+
size 119801496
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_128/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 32,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"down_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"k_proj",
|
| 15 |
+
"up_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 64,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_128/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8679bb391f69a1a137426889ecd2a592d673ce4461fab94ca19af74e9bf64c39
|
| 3 |
+
size 239536248
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_192/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 32,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"down_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"k_proj",
|
| 15 |
+
"up_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 64,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_192/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:665c58e6c5367d50ceef9e419ece12469ead9c6b5944d84195ee11d5e7ee99cc
|
| 3 |
+
size 239536248
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_256/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 32,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"down_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"k_proj",
|
| 15 |
+
"up_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 64,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_256/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b567bbd1225cee5dcfbc1e8de6f541011d67f6d3f729d2dbd98b5661c5f08da6
|
| 3 |
+
size 239536248
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_320/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 32,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"down_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"k_proj",
|
| 15 |
+
"up_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 64,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_320/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e941eec492fefda234a9da89b908c7fda18ba9ae3b5108e29cb723abf7e191a
|
| 3 |
+
size 239536248
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_384/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 32,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"down_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"k_proj",
|
| 15 |
+
"up_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 64,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_384/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b7118e7a2f7010c18b62ce0f6bd8ed7e9657373e5a3728cb825feedd79e1b4b2
|
| 3 |
+
size 239536248
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_448/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 32,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"down_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"k_proj",
|
| 15 |
+
"up_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 64,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_448/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:620bd7650389babaf6e5076e1c2675f6c95d9138d9c317a2b91215bc978ea65b
|
| 3 |
+
size 239536248
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_512/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 32,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"down_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"k_proj",
|
| 15 |
+
"up_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 64,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_512/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0d1378877f1380822865cd69e3aa520b4653e57f2744e60a57be5ae86a627a6c
|
| 3 |
+
size 239536248
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_64/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 32,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"down_proj",
|
| 13 |
+
"q_proj",
|
| 14 |
+
"k_proj",
|
| 15 |
+
"up_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 64,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank32/global_step_64/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9507ee9b3ca871711aea06b38f44e40fbac7392133a6a2222ede283f4c1376f0
|
| 3 |
+
size 239536248
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_128/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 64,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"k_proj",
|
| 13 |
+
"v_proj",
|
| 14 |
+
"down_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"up_proj",
|
| 17 |
+
"o_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 128,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_128/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d560e44898365c51a026af8774bed301c94201088a500971f98b8c9701e91bd
|
| 3 |
+
size 479005032
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_192/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 64,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"k_proj",
|
| 13 |
+
"v_proj",
|
| 14 |
+
"down_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"up_proj",
|
| 17 |
+
"o_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 128,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_192/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5950026a5e2bdcbd20f764182ddf53aafbb9a9a38d50daf6b92fe5f41210828f
|
| 3 |
+
size 479005032
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_256/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 64,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"k_proj",
|
| 13 |
+
"v_proj",
|
| 14 |
+
"down_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"up_proj",
|
| 17 |
+
"o_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 128,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_256/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29d3dd11e2f379d49325d4cfb88c9ce50c42ee3c2857c070e7d1b363fd59666d
|
| 3 |
+
size 479005032
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_320/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 64,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"k_proj",
|
| 13 |
+
"v_proj",
|
| 14 |
+
"down_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"up_proj",
|
| 17 |
+
"o_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 128,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_320/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c77eeb1471d65ac214f4ec52ed3343ca093145712ffe4fbf946694dc7fac838
|
| 3 |
+
size 479005032
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_384/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 64,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"k_proj",
|
| 13 |
+
"v_proj",
|
| 14 |
+
"down_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"up_proj",
|
| 17 |
+
"o_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 128,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_384/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7c7f5a8736495fca81a8121790f780718aade8683e965c1911c8dfe0a2ada6b4
|
| 3 |
+
size 479005032
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_448/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 64,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"k_proj",
|
| 13 |
+
"v_proj",
|
| 14 |
+
"down_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"up_proj",
|
| 17 |
+
"o_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 128,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_448/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f19a4b8876b799ba6d699d3ea4ad887c57995f5c08c89590b6f966eeaae07e4
|
| 3 |
+
size 479005032
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_512/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 64,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"k_proj",
|
| 13 |
+
"v_proj",
|
| 14 |
+
"down_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"up_proj",
|
| 17 |
+
"o_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 128,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_512/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59d0459898f25508e73b421d187806d3ec076d4b321d7c31e54c7074bc3d6754
|
| 3 |
+
size 479005032
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_64/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 64,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"gate_proj",
|
| 12 |
+
"k_proj",
|
| 13 |
+
"v_proj",
|
| 14 |
+
"down_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"up_proj",
|
| 17 |
+
"o_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 128,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank64/global_step_64/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:066386917aeb9d85b6963d30b3100a8d62dcb4b49fd9ab2d4cc3bd12ff5bb2b6
|
| 3 |
+
size 479005032
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank8/global_step_128/actor/lora_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"task_type": "CAUSAL_LM",
|
| 3 |
+
"peft_type": "LORA",
|
| 4 |
+
"auto_mapping": null,
|
| 5 |
+
"peft_version": "0.18.1",
|
| 6 |
+
"base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
|
| 7 |
+
"revision": null,
|
| 8 |
+
"inference_mode": false,
|
| 9 |
+
"r": 8,
|
| 10 |
+
"target_modules": [
|
| 11 |
+
"down_proj",
|
| 12 |
+
"gate_proj",
|
| 13 |
+
"k_proj",
|
| 14 |
+
"up_proj",
|
| 15 |
+
"q_proj",
|
| 16 |
+
"o_proj",
|
| 17 |
+
"v_proj"
|
| 18 |
+
],
|
| 19 |
+
"exclude_modules": null,
|
| 20 |
+
"lora_alpha": 16,
|
| 21 |
+
"lora_dropout": 0.0,
|
| 22 |
+
"fan_in_fan_out": false,
|
| 23 |
+
"bias": "none",
|
| 24 |
+
"use_rslora": false,
|
| 25 |
+
"modules_to_save": null,
|
| 26 |
+
"init_lora_weights": true,
|
| 27 |
+
"layers_to_transform": null,
|
| 28 |
+
"layers_pattern": null,
|
| 29 |
+
"rank_pattern": {},
|
| 30 |
+
"alpha_pattern": {},
|
| 31 |
+
"megatron_config": null,
|
| 32 |
+
"megatron_core": "megatron.core",
|
| 33 |
+
"trainable_token_indices": null,
|
| 34 |
+
"loftq_config": {},
|
| 35 |
+
"eva_config": null,
|
| 36 |
+
"corda_config": null,
|
| 37 |
+
"use_dora": false,
|
| 38 |
+
"alora_invocation_tokens": null,
|
| 39 |
+
"use_qalora": false,
|
| 40 |
+
"qalora_group_size": 16,
|
| 41 |
+
"layer_replication": null,
|
| 42 |
+
"runtime_config": {
|
| 43 |
+
"ephemeral_gpu_offload": false
|
| 44 |
+
},
|
| 45 |
+
"lora_bias": false,
|
| 46 |
+
"target_parameters": null,
|
| 47 |
+
"arrow_config": null,
|
| 48 |
+
"ensure_weight_tying": false
|
| 49 |
+
}
|
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-math-GRPO-LoRA-rank8/global_step_128/actor/lora_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14e2e6ad8893cb2eea88c870d5e2c36f89a7adc6ce069cb458a155f8723cafa4
|
| 3 |
+
size 59933600
|