auphong2707 commited on
Commit
3b4d74a
·
verified ·
1 Parent(s): 4804193

Add files using upload-large-folder tool

Browse files
Files changed (30) hide show
  1. qwen25_0.5b_ia3_official_1e-05/adapter_config.json +4 -4
  2. qwen25_0.5b_ia3_official_1e-05/adapter_model.safetensors +1 -1
  3. qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/README.md +206 -0
  4. qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/adapter_config.json +25 -0
  5. qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/adapter_model.safetensors +3 -0
  6. qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/optimizer.pt +3 -0
  7. qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/rng_state.pth +3 -0
  8. qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/scheduler.pt +3 -0
  9. qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/trainer_state.json +0 -0
  10. qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/training_args.bin +3 -0
  11. qwen25_0.5b_ia3_official_1e-05/checkpoint-117744/adapter_config.json +4 -4
  12. qwen25_0.5b_ia3_official_1e-05/checkpoint-117744/adapter_model.safetensors +1 -1
  13. qwen25_0.5b_ia3_official_1e-05/checkpoint-117744/optimizer.pt +1 -1
  14. qwen25_0.5b_ia3_official_1e-05/checkpoint-117744/trainer_state.json +0 -0
  15. qwen25_0.5b_ia3_official_1e-05/complete_results.json +0 -0
  16. qwen25_0.5b_lora_official_5e-05/checkpoint-14718/adapter_config.json +4 -4
  17. qwen25_0.5b_lora_official_5e-05/checkpoint-14718/adapter_model.safetensors +1 -1
  18. qwen25_0.5b_lora_official_5e-05/checkpoint-14718/optimizer.pt +1 -1
  19. qwen25_0.5b_lora_official_5e-05/checkpoint-14718/trainer_state.json +0 -0
  20. qwen25_0.5b_lora_official_5e-05/checkpoint-29436/README.md +207 -0
  21. qwen25_0.5b_lora_official_5e-05/checkpoint-29436/adapter_config.json +40 -0
  22. qwen25_0.5b_lora_official_5e-05/checkpoint-29436/adapter_model.safetensors +3 -0
  23. qwen25_0.5b_lora_official_5e-05/checkpoint-29436/optimizer.pt +3 -0
  24. qwen25_0.5b_lora_official_5e-05/checkpoint-29436/rng_state.pth +3 -0
  25. qwen25_0.5b_lora_official_5e-05/checkpoint-29436/trainer_state.json +0 -0
  26. qwen25_0.5b_lora_official_5e-05/checkpoint-29436/training_args.bin +3 -0
  27. qwen25_0.5b_qlora_official_5e-05/checkpoint-14718/adapter_config.json +4 -4
  28. qwen25_0.5b_qlora_official_5e-05/checkpoint-14718/adapter_model.safetensors +1 -1
  29. qwen25_0.5b_qlora_official_5e-05/checkpoint-14718/optimizer.pt +1 -1
  30. qwen25_0.5b_qlora_official_5e-05/checkpoint-14718/trainer_state.json +0 -0
qwen25_0.5b_ia3_official_1e-05/adapter_config.json CHANGED
@@ -14,12 +14,12 @@
14
  "peft_type": "IA3",
15
  "revision": null,
16
  "target_modules": [
17
- "up_proj",
18
- "k_proj",
19
- "v_proj",
20
  "q_proj",
21
  "gate_proj",
22
- "down_proj"
 
 
 
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
 
14
  "peft_type": "IA3",
15
  "revision": null,
16
  "target_modules": [
 
 
 
17
  "q_proj",
18
  "gate_proj",
19
+ "down_proj",
20
+ "up_proj",
21
+ "k_proj",
22
+ "v_proj"
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
qwen25_0.5b_ia3_official_1e-05/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2c03a8f05365a1d181d6d98253816cfe112249f233f1123ccd7aa0aa305cf5e
3
  size 544334280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39162dc0161cada05724624c58eb19a2e4fccd0694133ce841e6119f7df75a81
3
  size 544334280
qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-0.5B-Instruct
7
+ - transformers
8
+ ---
9
+
10
+ # Model Card for Model ID
11
+
12
+ <!-- Provide a quick summary of what the model is/does. -->
13
+
14
+
15
+
16
+ ## Model Details
17
+
18
+ ### Model Description
19
+
20
+ <!-- Provide a longer summary of what this model is. -->
21
+
22
+
23
+
24
+ - **Developed by:** [More Information Needed]
25
+ - **Funded by [optional]:** [More Information Needed]
26
+ - **Shared by [optional]:** [More Information Needed]
27
+ - **Model type:** [More Information Needed]
28
+ - **Language(s) (NLP):** [More Information Needed]
29
+ - **License:** [More Information Needed]
30
+ - **Finetuned from model [optional]:** [More Information Needed]
31
+
32
+ ### Model Sources [optional]
33
+
34
+ <!-- Provide the basic links for the model. -->
35
+
36
+ - **Repository:** [More Information Needed]
37
+ - **Paper [optional]:** [More Information Needed]
38
+ - **Demo [optional]:** [More Information Needed]
39
+
40
+ ## Uses
41
+
42
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
43
+
44
+ ### Direct Use
45
+
46
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
47
+
48
+ [More Information Needed]
49
+
50
+ ### Downstream Use [optional]
51
+
52
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
53
+
54
+ [More Information Needed]
55
+
56
+ ### Out-of-Scope Use
57
+
58
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
59
+
60
+ [More Information Needed]
61
+
62
+ ## Bias, Risks, and Limitations
63
+
64
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
65
+
66
+ [More Information Needed]
67
+
68
+ ### Recommendations
69
+
70
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
71
+
72
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
73
+
74
+ ## How to Get Started with the Model
75
+
76
+ Use the code below to get started with the model.
77
+
78
+ [More Information Needed]
79
+
80
+ ## Training Details
81
+
82
+ ### Training Data
83
+
84
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
85
+
86
+ [More Information Needed]
87
+
88
+ ### Training Procedure
89
+
90
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
91
+
92
+ #### Preprocessing [optional]
93
+
94
+ [More Information Needed]
95
+
96
+
97
+ #### Training Hyperparameters
98
+
99
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
100
+
101
+ #### Speeds, Sizes, Times [optional]
102
+
103
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
104
+
105
+ [More Information Needed]
106
+
107
+ ## Evaluation
108
+
109
+ <!-- This section describes the evaluation protocols and provides the results. -->
110
+
111
+ ### Testing Data, Factors & Metrics
112
+
113
+ #### Testing Data
114
+
115
+ <!-- This should link to a Dataset Card if possible. -->
116
+
117
+ [More Information Needed]
118
+
119
+ #### Factors
120
+
121
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
122
+
123
+ [More Information Needed]
124
+
125
+ #### Metrics
126
+
127
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
128
+
129
+ [More Information Needed]
130
+
131
+ ### Results
132
+
133
+ [More Information Needed]
134
+
135
+ #### Summary
136
+
137
+
138
+
139
+ ## Model Examination [optional]
140
+
141
+ <!-- Relevant interpretability work for the model goes here -->
142
+
143
+ [More Information Needed]
144
+
145
+ ## Environmental Impact
146
+
147
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
148
+
149
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
150
+
151
+ - **Hardware Type:** [More Information Needed]
152
+ - **Hours used:** [More Information Needed]
153
+ - **Cloud Provider:** [More Information Needed]
154
+ - **Compute Region:** [More Information Needed]
155
+ - **Carbon Emitted:** [More Information Needed]
156
+
157
+ ## Technical Specifications [optional]
158
+
159
+ ### Model Architecture and Objective
160
+
161
+ [More Information Needed]
162
+
163
+ ### Compute Infrastructure
164
+
165
+ [More Information Needed]
166
+
167
+ #### Hardware
168
+
169
+ [More Information Needed]
170
+
171
+ #### Software
172
+
173
+ [More Information Needed]
174
+
175
+ ## Citation [optional]
176
+
177
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
178
+
179
+ **BibTeX:**
180
+
181
+ [More Information Needed]
182
+
183
+ **APA:**
184
+
185
+ [More Information Needed]
186
+
187
+ ## Glossary [optional]
188
+
189
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
190
+
191
+ [More Information Needed]
192
+
193
+ ## More Information [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Authors [optional]
198
+
199
+ [More Information Needed]
200
+
201
+ ## Model Card Contact
202
+
203
+ [More Information Needed]
204
+ ### Framework versions
205
+
206
+ - PEFT 0.16.0
qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
4
+ "exclude_modules": null,
5
+ "fan_in_fan_out": false,
6
+ "feedforward_modules": [
7
+ "gate_proj",
8
+ "up_proj",
9
+ "down_proj"
10
+ ],
11
+ "inference_mode": true,
12
+ "init_ia3_weights": true,
13
+ "modules_to_save": null,
14
+ "peft_type": "IA3",
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "gate_proj",
19
+ "down_proj",
20
+ "up_proj",
21
+ "k_proj",
22
+ "v_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39162dc0161cada05724624c58eb19a2e4fccd0694133ce841e6119f7df75a81
3
+ size 544334280
qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2bc318a30530afff315783ea8954729e45a56a8e10f7ec5dfb412c57b5d765
3
+ size 1620427
qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01a5666c8bdad2bd9e0dfe12379fbcd6b491cd265d8f083888a23f9d53351019
3
+ size 14645
qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a04e1e4ac5405fcbe48e27fae82ab2767bad2f8c5aa147969bfb2b2cc03973c
3
+ size 1465
qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen25_0.5b_ia3_official_1e-05/checkpoint-103026/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65de5d851382453c5df3073991ab0e379097f446374b45a39e151203d8b3375d
3
+ size 5841
qwen25_0.5b_ia3_official_1e-05/checkpoint-117744/adapter_config.json CHANGED
@@ -14,12 +14,12 @@
14
  "peft_type": "IA3",
15
  "revision": null,
16
  "target_modules": [
17
- "up_proj",
18
- "k_proj",
19
- "v_proj",
20
  "q_proj",
21
  "gate_proj",
22
- "down_proj"
 
 
 
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
 
14
  "peft_type": "IA3",
15
  "revision": null,
16
  "target_modules": [
 
 
 
17
  "q_proj",
18
  "gate_proj",
19
+ "down_proj",
20
+ "up_proj",
21
+ "k_proj",
22
+ "v_proj"
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
qwen25_0.5b_ia3_official_1e-05/checkpoint-117744/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6acdb06748cc62d0f6fb64d6263b0f83f6a456572b47109b7c6e13ce039a3844
3
  size 544334280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9780520445264d3ff70e7a69e27380eacde25b3f44c1d0d80dae419d38c26521
3
  size 544334280
qwen25_0.5b_ia3_official_1e-05/checkpoint-117744/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c5a146592cd1c7f2c589ad79479421f0036b970cdb3f594155260e736fb3d6e
3
  size 1620427
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f31a5d22e370da1f3881a8527b5f0290657634f9c67f9c262de3eb05f5ee44e3
3
  size 1620427
qwen25_0.5b_ia3_official_1e-05/checkpoint-117744/trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
qwen25_0.5b_ia3_official_1e-05/complete_results.json CHANGED
The diff for this file is too large to render. See raw diff
 
qwen25_0.5b_lora_official_5e-05/checkpoint-14718/adapter_config.json CHANGED
@@ -25,12 +25,12 @@
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
- "down_proj",
29
- "v_proj",
30
  "q_proj",
31
- "k_proj",
32
  "up_proj",
33
- "gate_proj"
 
 
34
  ],
35
  "task_type": "CAUSAL_LM",
36
  "trainable_token_indices": null,
 
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "gate_proj",
 
29
  "q_proj",
 
30
  "up_proj",
31
+ "v_proj",
32
+ "k_proj",
33
+ "down_proj"
34
  ],
35
  "task_type": "CAUSAL_LM",
36
  "trainable_token_indices": null,
qwen25_0.5b_lora_official_5e-05/checkpoint-14718/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a590d93194183b7ecc702e7aee6dd5973cc692961742910c4032e256fe99f5ee
3
  size 576045776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:916ca9a560ead60c3dd2df7ee393de5af345160f06700cfb554aa3318666a7f3
3
  size 576045776
qwen25_0.5b_lora_official_5e-05/checkpoint-14718/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:284f1a523fa3fc7e6269095638cc652e8c900b503d2518486b8724e0e9362782
3
  size 65122891
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b768c5dbeb714773988d4ec49874edb914e029b9c91cfd45d2a68d47ff29521
3
  size 65122891
qwen25_0.5b_lora_official_5e-05/checkpoint-14718/trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
qwen25_0.5b_lora_official_5e-05/checkpoint-29436/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-0.5B-Instruct
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:Qwen/Qwen2.5-0.5B-Instruct
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.16.0
qwen25_0.5b_lora_official_5e-05/checkpoint-29436/adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 16,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "gate_proj",
29
+ "q_proj",
30
+ "up_proj",
31
+ "v_proj",
32
+ "k_proj",
33
+ "down_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_qalora": false,
39
+ "use_rslora": false
40
+ }
qwen25_0.5b_lora_official_5e-05/checkpoint-29436/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2811766a06e1b7100757892236fb1054545124bbf2dd8a85aee940cfa89d7cb
3
+ size 576045776
qwen25_0.5b_lora_official_5e-05/checkpoint-29436/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ec6b600121fbb30b81410ccd0b14ae7fa6763d7e90c57750a869cfa5638ca78
3
+ size 65122891
qwen25_0.5b_lora_official_5e-05/checkpoint-29436/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1009f86977016e2187a99d9b78a55d0007077534db6d15f90497d1af8c6fc005
3
+ size 14645
qwen25_0.5b_lora_official_5e-05/checkpoint-29436/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
qwen25_0.5b_lora_official_5e-05/checkpoint-29436/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b59ca6e1a1671e648a905eb83d93ac5794eda339a2f869361ea8c9fcd8f7bdee
3
+ size 5841
qwen25_0.5b_qlora_official_5e-05/checkpoint-14718/adapter_config.json CHANGED
@@ -25,12 +25,12 @@
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
- "q_proj",
29
- "up_proj",
30
  "down_proj",
31
- "v_proj",
 
 
32
  "gate_proj",
33
- "k_proj"
34
  ],
35
  "task_type": "CAUSAL_LM",
36
  "trainable_token_indices": null,
 
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
 
 
28
  "down_proj",
29
+ "up_proj",
30
+ "k_proj",
31
+ "q_proj",
32
  "gate_proj",
33
+ "v_proj"
34
  ],
35
  "task_type": "CAUSAL_LM",
36
  "trainable_token_indices": null,
qwen25_0.5b_qlora_official_5e-05/checkpoint-14718/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1475b50ee3b0ff8fecb449a91b79bb7873e5c1a5183a29432132514f0665c5dd
3
  size 32478192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:016376fc5ac49df4a639959a765f00648ff125876fb1eeca3c5aaef71be8fffe
3
  size 32478192
qwen25_0.5b_qlora_official_5e-05/checkpoint-14718/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea70852590bf410e30bcaaa3e11e817e9b21e797996a8ea47b413ad903165804
3
  size 65122891
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51abeab7240fdbef17e0ac74e6e733c1f010826de2f5f18f6abb00036484f1e9
3
  size 65122891
qwen25_0.5b_qlora_official_5e-05/checkpoint-14718/trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff