skar0 commited on
Commit
745950e
·
verified ·
1 Parent(s): d9a44b7

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. adapter_config.json +46 -0
  2. adapter_model.bin +3 -0
  3. chat_template.jinja +109 -0
  4. final/adapter_config.json +46 -0
  5. final/adapter_model.bin +3 -0
  6. special_tokens_map.json +30 -0
  7. step_0/adapter_config.json +46 -0
  8. step_0/adapter_model.bin +3 -0
  9. step_1000/adapter_config.json +46 -0
  10. step_1000/adapter_model.bin +3 -0
  11. step_1008/adapter_config.json +46 -0
  12. step_1008/adapter_model.bin +3 -0
  13. step_1016/adapter_config.json +46 -0
  14. step_1016/adapter_model.bin +3 -0
  15. step_1024/adapter_config.json +46 -0
  16. step_1024/adapter_model.bin +3 -0
  17. step_1032/adapter_config.json +46 -0
  18. step_1032/adapter_model.bin +3 -0
  19. step_104/adapter_config.json +46 -0
  20. step_104/adapter_model.bin +3 -0
  21. step_1040/adapter_config.json +46 -0
  22. step_1040/adapter_model.bin +3 -0
  23. step_1048/adapter_config.json +46 -0
  24. step_1048/adapter_model.bin +3 -0
  25. step_1056/adapter_config.json +46 -0
  26. step_1056/adapter_model.bin +3 -0
  27. step_1064/adapter_config.json +46 -0
  28. step_1064/adapter_model.bin +3 -0
  29. step_1072/adapter_config.json +46 -0
  30. step_1072/adapter_model.bin +3 -0
  31. step_1080/adapter_config.json +46 -0
  32. step_1080/adapter_model.bin +3 -0
  33. step_1088/adapter_config.json +46 -0
  34. step_1088/adapter_model.bin +3 -0
  35. step_1096/adapter_config.json +46 -0
  36. step_1096/adapter_model.bin +3 -0
  37. step_1104/adapter_config.json +46 -0
  38. step_1104/adapter_model.bin +3 -0
  39. step_1112/adapter_config.json +46 -0
  40. step_1112/adapter_model.bin +3 -0
  41. step_112/adapter_config.json +46 -0
  42. step_112/adapter_model.bin +3 -0
  43. step_1120/adapter_config.json +46 -0
  44. step_1120/adapter_model.bin +3 -0
  45. step_1128/adapter_config.json +46 -0
  46. step_1128/adapter_model.bin +3 -0
  47. step_1136/adapter_config.json +46 -0
  48. step_1136/adapter_model.bin +3 -0
  49. step_1144/adapter_config.json +46 -0
  50. step_1144/adapter_model.bin +3 -0
adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e9e0027bbdf7ccaff18bbb895264e1610d96dd30b2b0398f1011146d515d7c7
3
+ size 18098267
chat_template.jinja ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{- bos_token }}
2
+ {%- if custom_tools is defined %}
3
+ {%- set tools = custom_tools %}
4
+ {%- endif %}
5
+ {%- if not tools_in_user_message is defined %}
6
+ {%- set tools_in_user_message = true %}
7
+ {%- endif %}
8
+ {%- if not date_string is defined %}
9
+ {%- set date_string = "26 Jul 2024" %}
10
+ {%- endif %}
11
+ {%- if not tools is defined %}
12
+ {%- set tools = none %}
13
+ {%- endif %}
14
+
15
+ {#- This block extracts the system message, so we can slot it into the right place. #}
16
+ {%- if messages[0]['role'] == 'system' %}
17
+ {%- set system_message = messages[0]['content']|trim %}
18
+ {%- set messages = messages[1:] %}
19
+ {%- else %}
20
+ {%- set system_message = "" %}
21
+ {%- endif %}
22
+
23
+ {#- System message + builtin tools #}
24
+ {{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
25
+ {%- if builtin_tools is defined or tools is not none %}
26
+ {{- "Environment: ipython\n" }}
27
+ {%- endif %}
28
+ {%- if builtin_tools is defined %}
29
+ {{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}}
30
+ {%- endif %}
31
+ {{- "Cutting Knowledge Date: December 2023\n" }}
32
+ {{- "Today Date: " + date_string + "\n\n" }}
33
+ {%- if tools is not none and not tools_in_user_message %}
34
+ {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
35
+ {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
36
+ {{- "Do not use variables.\n\n" }}
37
+ {%- for t in tools %}
38
+ {{- t | tojson(indent=4) }}
39
+ {{- "\n\n" }}
40
+ {%- endfor %}
41
+ {%- endif %}
42
+ {{- system_message }}
43
+ {{- "<|eot_id|>" }}
44
+
45
+ {#- Custom tools are passed in a user message with some extra guidance #}
46
+ {%- if tools_in_user_message and not tools is none %}
47
+ {#- Extract the first user message so we can plug it in here #}
48
+ {%- if messages | length != 0 %}
49
+ {%- set first_user_message = messages[0]['content']|trim %}
50
+ {%- set messages = messages[1:] %}
51
+ {%- else %}
52
+ {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
53
+ {%- endif %}
54
+ {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
55
+ {{- "Given the following functions, please respond with a JSON for a function call " }}
56
+ {{- "with its proper arguments that best answers the given prompt.\n\n" }}
57
+ {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
58
+ {{- "Do not use variables.\n\n" }}
59
+ {%- for t in tools %}
60
+ {{- t | tojson(indent=4) }}
61
+ {{- "\n\n" }}
62
+ {%- endfor %}
63
+ {{- first_user_message + "<|eot_id|>"}}
64
+ {%- endif %}
65
+
66
+ {%- for message in messages %}
67
+ {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
68
+ {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
69
+ {%- elif 'tool_calls' in message %}
70
+ {%- if not message.tool_calls|length == 1 %}
71
+ {{- raise_exception("This model only supports single tool-calls at once!") }}
72
+ {%- endif %}
73
+ {%- set tool_call = message.tool_calls[0].function %}
74
+ {%- if builtin_tools is defined and tool_call.name in builtin_tools %}
75
+ {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
76
+ {{- "<|python_tag|>" + tool_call.name + ".call(" }}
77
+ {%- for arg_name, arg_val in tool_call.arguments | items %}
78
+ {{- arg_name + '="' + arg_val + '"' }}
79
+ {%- if not loop.last %}
80
+ {{- ", " }}
81
+ {%- endif %}
82
+ {%- endfor %}
83
+ {{- ")" }}
84
+ {%- else %}
85
+ {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
86
+ {{- '{"name": "' + tool_call.name + '", ' }}
87
+ {{- '"parameters": ' }}
88
+ {{- tool_call.arguments | tojson }}
89
+ {{- "}" }}
90
+ {%- endif %}
91
+ {%- if builtin_tools is defined %}
92
+ {#- This means we're in ipython mode #}
93
+ {{- "<|eom_id|>" }}
94
+ {%- else %}
95
+ {{- "<|eot_id|>" }}
96
+ {%- endif %}
97
+ {%- elif message.role == "tool" or message.role == "ipython" %}
98
+ {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
99
+ {%- if message.content is mapping or message.content is iterable %}
100
+ {{- message.content | tojson }}
101
+ {%- else %}
102
+ {{- message.content }}
103
+ {%- endif %}
104
+ {{- "<|eot_id|>" }}
105
+ {%- endif %}
106
+ {%- endfor %}
107
+ {%- if add_generation_prompt %}
108
+ {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
109
+ {%- endif %}
final/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
final/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e9e0027bbdf7ccaff18bbb895264e1610d96dd30b2b0398f1011146d515d7c7
3
+ size 18098267
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
step_0/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_0/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a255dc47e5ab8a4cd30d6b741c61321b2efc141ea76ffb12f2ccec5f81f548c3
3
+ size 18098267
step_1000/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee98385d0b4f54ea8f39b45e877e4a686fa1460d7eb7e3eda425b74847d9f7ba
3
+ size 18098267
step_1008/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1008/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:535a251bcd5bba37fd9130ca63d4a61f5a2d21c36efc0f90938248615aa3b298
3
+ size 18098267
step_1016/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1016/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6efc008ffc994b56eef2fc999bf17b56244c2a6fad7e62c898207bdf0cf6413a
3
+ size 18098267
step_1024/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1024/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1c6303f6e823186242760de2ad7cbcc1a5b2b6fa469f6f30e3e0c34f3803e91
3
+ size 18098267
step_1032/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1032/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bf9fa05f45c5d9408a1d77f4e8035e262a7069c16ddecca54e291067ba4eff5
3
+ size 18098267
step_104/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_104/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3477931800dd7a8f1b2f3e5c7e5e208de119cfdd4d9fb66401db1e52146fa192
3
+ size 18098267
step_1040/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1040/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92c339b7a1f2ab41fc42933c3d46843b0c76bc012764ee169e1ca7a7cb02e14b
3
+ size 18098267
step_1048/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1048/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59031984b5628fb81e8e813889ac6c5b87651715955a3ebb2e585fceeb386f2e
3
+ size 18098267
step_1056/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1056/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0658e26c45226ddd1c5526d845acb1b7c361d5d049a203a10497741d1860d74b
3
+ size 18098267
step_1064/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1064/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3224415d0ad8893517476a0ab336c05b3f57985c7bbd3849694a9e945b957e08
3
+ size 18098267
step_1072/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1072/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56133eecced379b1a043b464d42ddba58d1dbc6071223908620bca71fda48542
3
+ size 18098267
step_1080/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1080/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:723925478d50014008c6289d566a0e4a8a6ff24a8e2435fb434ed9b4f1b2c2f5
3
+ size 18098267
step_1088/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1088/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed69d75a216116ae37ff689732a8c63570d34fcfa41763c4335e6ec936d125b9
3
+ size 18098267
step_1096/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1096/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f249704ad30b478ea164acaa57465322470f5ea9573a1dafd6466857466cdd84
3
+ size 18098267
step_1104/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1104/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5de74099d4f7d680cfe59086a1477ad47211b6b48e9126013f291cb0d169779b
3
+ size 18098267
step_1112/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1112/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd5db267dd20fc0da5b1fc20411708b5fc3fcacf94928b581f6ee9692a622212
3
+ size 18098267
step_112/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_112/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b744a915cbfa6335d12a63fa8feb9846c82f2e093c2cd0da047e1680f31867e6
3
+ size 18098267
step_1120/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1120/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a5c1c0400c197d5f0be334d183e03d67cc5cde1a66077cdc0f5b0461e52180a
3
+ size 18098267
step_1128/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1128/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7437cc53e6cf01271bf34b5f93962e263eb96855debb42a69c2f3fd90ea7de0
3
+ size 18098267
step_1136/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1136/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55a1b92bb1559dd4961bf58aa4353bd72913d0480d081ee7fcbf9a82e379cc68
3
+ size 18098267
step_1144/adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "AlignmentResearch/Llama-3.3-Tiny-Instruct-boolq",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": false,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 256,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 128,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.1.mlp.gate_proj",
28
+ "model.layers.1.mlp.down_proj",
29
+ "model.layers.0.self_attn.o_proj",
30
+ "model.layers.1.self_attn.v_proj",
31
+ "model.layers.1.self_attn.q_proj",
32
+ "model.layers.0.mlp.up_proj",
33
+ "model.layers.0.mlp.gate_proj",
34
+ "model.layers.0.mlp.down_proj",
35
+ "model.layers.1.self_attn.o_proj",
36
+ "model.layers.0.self_attn.v_proj",
37
+ "model.layers.1.self_attn.k_proj",
38
+ "model.layers.0.self_attn.k_proj",
39
+ "model.layers.1.mlp.up_proj",
40
+ "model.layers.0.self_attn.q_proj"
41
+ ],
42
+ "task_type": "CAUSAL_LM",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
step_1144/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3086cc8cbf006eb447141df61c5fd2ac9a1b29a2e7fe4d2b14887ab4a7dddb8d
3
+ size 18098267