DatPySci commited on
Commit
1e57f69
·
verified ·
1 Parent(s): 6eb2cb0

upload lora

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
  2. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
  3. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_192/actor/lora_adapter/adapter_config.json +49 -0
  4. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_192/actor/lora_adapter/adapter_model.safetensors +3 -0
  5. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_256/actor/lora_adapter/adapter_config.json +49 -0
  6. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_256/actor/lora_adapter/adapter_model.safetensors +3 -0
  7. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_320/actor/lora_adapter/adapter_config.json +49 -0
  8. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_320/actor/lora_adapter/adapter_model.safetensors +3 -0
  9. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_384/actor/lora_adapter/adapter_config.json +49 -0
  10. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_384/actor/lora_adapter/adapter_model.safetensors +3 -0
  11. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_448/actor/lora_adapter/adapter_config.json +49 -0
  12. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_448/actor/lora_adapter/adapter_model.safetensors +3 -0
  13. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_512/actor/lora_adapter/adapter_config.json +49 -0
  14. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_512/actor/lora_adapter/adapter_model.safetensors +3 -0
  15. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_64/actor/lora_adapter/adapter_config.json +49 -0
  16. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_64/actor/lora_adapter/adapter_model.safetensors +3 -0
  17. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
  18. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
  19. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_192/actor/lora_adapter/adapter_config.json +49 -0
  20. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_192/actor/lora_adapter/adapter_model.safetensors +3 -0
  21. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_256/actor/lora_adapter/adapter_config.json +49 -0
  22. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_256/actor/lora_adapter/adapter_model.safetensors +3 -0
  23. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_320/actor/lora_adapter/adapter_config.json +49 -0
  24. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_320/actor/lora_adapter/adapter_model.safetensors +3 -0
  25. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_384/actor/lora_adapter/adapter_config.json +49 -0
  26. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_384/actor/lora_adapter/adapter_model.safetensors +3 -0
  27. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_448/actor/lora_adapter/adapter_config.json +49 -0
  28. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_448/actor/lora_adapter/adapter_model.safetensors +3 -0
  29. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_512/actor/lora_adapter/adapter_config.json +49 -0
  30. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_512/actor/lora_adapter/adapter_model.safetensors +3 -0
  31. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_64/actor/lora_adapter/adapter_config.json +49 -0
  32. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_64/actor/lora_adapter/adapter_model.safetensors +3 -0
  33. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
  34. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
  35. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_192/actor/lora_adapter/adapter_config.json +49 -0
  36. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_192/actor/lora_adapter/adapter_model.safetensors +3 -0
  37. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_256/actor/lora_adapter/adapter_config.json +49 -0
  38. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_256/actor/lora_adapter/adapter_model.safetensors +3 -0
  39. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_320/actor/lora_adapter/adapter_config.json +49 -0
  40. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_320/actor/lora_adapter/adapter_model.safetensors +3 -0
  41. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_384/actor/lora_adapter/adapter_config.json +49 -0
  42. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_384/actor/lora_adapter/adapter_model.safetensors +3 -0
  43. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_448/actor/lora_adapter/adapter_config.json +49 -0
  44. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_448/actor/lora_adapter/adapter_model.safetensors +3 -0
  45. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_512/actor/lora_adapter/adapter_config.json +49 -0
  46. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_512/actor/lora_adapter/adapter_model.safetensors +3 -0
  47. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_64/actor/lora_adapter/adapter_config.json +49 -0
  48. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_64/actor/lora_adapter/adapter_model.safetensors +3 -0
  49. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank8/global_step_128/actor/lora_adapter/adapter_config.json +49 -0
  50. qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank8/global_step_128/actor/lora_adapter/adapter_model.safetensors +3 -0
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_128/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 16,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "o_proj",
13
+ "down_proj",
14
+ "gate_proj",
15
+ "q_proj",
16
+ "up_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_128/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f1fb99524d62fb79af445666801a84e91e21c6b661b2dec1cffb3e8b622c19c
3
+ size 119801496
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_192/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 16,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "o_proj",
13
+ "down_proj",
14
+ "gate_proj",
15
+ "q_proj",
16
+ "up_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_192/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a451728cf477be4a4e15b4fe2d64302710961042989101c16480abfe62b8c26
3
+ size 119801496
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_256/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 16,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "q_proj",
13
+ "up_proj",
14
+ "k_proj",
15
+ "v_proj",
16
+ "gate_proj",
17
+ "down_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_256/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbfd16f689eb5119e11269f253dc9eb1504a96f73091e1d6932d8c462108a762
3
+ size 119801496
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_320/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 16,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "q_proj",
13
+ "up_proj",
14
+ "k_proj",
15
+ "v_proj",
16
+ "gate_proj",
17
+ "down_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_320/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d50ef6d9c84b34bdda22be8f3d0989baa73604480b42c2e1bbc74da24af7632
3
+ size 119801496
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_384/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 16,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "q_proj",
13
+ "up_proj",
14
+ "k_proj",
15
+ "v_proj",
16
+ "gate_proj",
17
+ "down_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_384/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a64b26eec62e2b067f83546607c803010d245e51fab76a260968b944ef1ea14e
3
+ size 119801496
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_448/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 16,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "q_proj",
13
+ "up_proj",
14
+ "k_proj",
15
+ "v_proj",
16
+ "gate_proj",
17
+ "down_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_448/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d768ffde3ce3412b87033edf42df9896bf5a964c16885a1c9ab6eb6a74375123
3
+ size 119801496
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_512/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 16,
10
+ "target_modules": [
11
+ "o_proj",
12
+ "q_proj",
13
+ "up_proj",
14
+ "k_proj",
15
+ "v_proj",
16
+ "gate_proj",
17
+ "down_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_512/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fcbae757ee3552da3cdc0e7775969069cd616937d3d9413bfeec09be238ba52
3
+ size 119801496
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_64/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 16,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "o_proj",
13
+ "down_proj",
14
+ "gate_proj",
15
+ "q_proj",
16
+ "up_proj",
17
+ "v_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 32,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank16/global_step_64/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2578414694c142126fdba73a11718d68178ca5b6f4f6dbb851a58e561f7eaca7
3
+ size 119801496
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_128/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "up_proj",
13
+ "down_proj",
14
+ "v_proj",
15
+ "q_proj",
16
+ "o_proj",
17
+ "gate_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_128/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fe6185fe4d2ee1e35ffacfd2e07c7a58583bfd4d83e8f31d09cb046b165a3a7
3
+ size 239536248
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_192/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "up_proj",
13
+ "down_proj",
14
+ "v_proj",
15
+ "q_proj",
16
+ "o_proj",
17
+ "gate_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_192/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecabed98da06e091980486ca97ffd64bcefca86da6da62c84d6fd89ce91e29be
3
+ size 239536248
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_256/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "up_proj",
13
+ "down_proj",
14
+ "v_proj",
15
+ "q_proj",
16
+ "o_proj",
17
+ "gate_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_256/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c697eea35c14797c8141135eb4d6dbb3baf5f6be92ac69134e7bce9874e01deb
3
+ size 239536248
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_320/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "up_proj",
13
+ "down_proj",
14
+ "v_proj",
15
+ "q_proj",
16
+ "o_proj",
17
+ "gate_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_320/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:278824427889e589d4fecd55b33fb38189408f865295f8e2649ca8d2d97b99f1
3
+ size 239536248
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_384/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "up_proj",
13
+ "down_proj",
14
+ "v_proj",
15
+ "q_proj",
16
+ "o_proj",
17
+ "gate_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_384/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:497e9f4fb588330bde941e871067729c531f1f19b99b622ef6ee7f1fadcbfbc6
3
+ size 239536248
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_448/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "up_proj",
13
+ "down_proj",
14
+ "v_proj",
15
+ "q_proj",
16
+ "o_proj",
17
+ "gate_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_448/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:776e22339116930f3018a2f3b8fb3c2697ef89123100f610aaaeecdcf42a291d
3
+ size 239536248
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_512/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "up_proj",
13
+ "down_proj",
14
+ "v_proj",
15
+ "q_proj",
16
+ "o_proj",
17
+ "gate_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_512/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52afade49505cacb116c3c7c814606d6775ef04d16cb73588036b135f4221c0b
3
+ size 239536248
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_64/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "k_proj",
12
+ "up_proj",
13
+ "down_proj",
14
+ "v_proj",
15
+ "q_proj",
16
+ "o_proj",
17
+ "gate_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank32/global_step_64/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2a7cdcd365628213d2cfa6afbdbaaa5b34e1c55c4a5151c57bb13aa64856509
3
+ size 239536248
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_128/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "k_proj",
13
+ "o_proj",
14
+ "v_proj",
15
+ "up_proj",
16
+ "down_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 128,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_128/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7b9b3c8427ecaff63b4a3b32591942eb14858e5c67f3255a7a9e6acbb3800fd
3
+ size 479005032
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_192/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "k_proj",
13
+ "o_proj",
14
+ "v_proj",
15
+ "up_proj",
16
+ "down_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 128,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_192/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf347ef2c78a2814e2ee63f9822ded3cccfa0c9aaf3c1334705c5fe54f255ccf
3
+ size 479005032
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_256/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "k_proj",
13
+ "o_proj",
14
+ "v_proj",
15
+ "up_proj",
16
+ "down_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 128,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_256/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13a6e06cf3d61d65e5e63e48a105986493761c23d3906e901f79836b4bd85faa
3
+ size 479005032
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_320/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "k_proj",
13
+ "o_proj",
14
+ "v_proj",
15
+ "up_proj",
16
+ "down_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 128,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_320/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fef5d4bfd5ed243c1ac6ae94430c697b6d0e5cba2f589f132720fb3afd8b8d19
3
+ size 479005032
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_384/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "k_proj",
13
+ "o_proj",
14
+ "v_proj",
15
+ "up_proj",
16
+ "down_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 128,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_384/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcf380ab14b47f94e7bed10897afba3e9e3e87fe7081aae6839d2d4ae939e008
3
+ size 479005032
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_448/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "k_proj",
13
+ "o_proj",
14
+ "v_proj",
15
+ "up_proj",
16
+ "down_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 128,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_448/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b63300a374fa4a7f0f060c811c0f4ea0671f39a97b985f27d058115353531bae
3
+ size 479005032
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_512/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "k_proj",
13
+ "o_proj",
14
+ "v_proj",
15
+ "up_proj",
16
+ "down_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 128,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_512/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78e0bb7767cbde6789b99110af8b00a98c6bdd147d23e9f31e13bb078519dc78
3
+ size 479005032
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_64/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 64,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "k_proj",
13
+ "o_proj",
14
+ "v_proj",
15
+ "up_proj",
16
+ "down_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 128,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank64/global_step_64/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d71fd9a27da282dd2122f197317a221938aaa9f391f506020a8daa0408722ff
3
+ size 479005032
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank8/global_step_128/actor/lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/b3c28c1b99a08b84eb28d5733b49c01c/aa8e72537993ba99e69dfaafa59ed015b17504d1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 8,
10
+ "target_modules": [
11
+ "up_proj",
12
+ "k_proj",
13
+ "gate_proj",
14
+ "o_proj",
15
+ "q_proj",
16
+ "v_proj",
17
+ "down_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 16,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
qwen-3b-lora/Qwen/Qwen2.5-3B-Instruct-polaris-GRPO-LoRA-rank8/global_step_128/actor/lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2baeef11332894cb3bc6cc15b91c60c0e3581dad844a84a0ed9befe37740788
3
+ size 59933600