DangMinh21 commited on
Commit
d14f48f
·
verified ·
1 Parent(s): 11f5e94

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: a8cheng/SpatialRGPT-VILA1.5-8B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.9.0
adapter_config.json ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 32,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "llm.model.layers.2.self_attn.k_proj",
23
+ "llm.model.layers.22.mlp.up_proj",
24
+ "llm.model.layers.9.self_attn.v_proj",
25
+ "llm.model.layers.17.self_attn.o_proj",
26
+ "llm.model.layers.21.self_attn.q_proj",
27
+ "llm.model.layers.17.self_attn.k_proj",
28
+ "llm.model.layers.15.mlp.up_proj",
29
+ "llm.model.layers.21.self_attn.o_proj",
30
+ "llm.model.layers.24.self_attn.o_proj",
31
+ "llm.model.layers.30.mlp.gate_proj",
32
+ "llm.model.layers.2.self_attn.o_proj",
33
+ "llm.model.layers.7.self_attn.v_proj",
34
+ "llm.model.layers.8.self_attn.k_proj",
35
+ "llm.model.layers.23.self_attn.o_proj",
36
+ "llm.model.layers.11.self_attn.q_proj",
37
+ "llm.model.layers.18.mlp.down_proj",
38
+ "llm.model.layers.1.self_attn.k_proj",
39
+ "llm.model.layers.26.self_attn.k_proj",
40
+ "llm.model.layers.19.mlp.up_proj",
41
+ "llm.model.layers.16.self_attn.v_proj",
42
+ "llm.model.layers.20.mlp.gate_proj",
43
+ "llm.model.layers.22.self_attn.o_proj",
44
+ "llm.model.layers.14.mlp.down_proj",
45
+ "llm.model.layers.0.self_attn.v_proj",
46
+ "llm.model.layers.18.self_attn.k_proj",
47
+ "llm.model.layers.6.self_attn.v_proj",
48
+ "llm.model.layers.12.mlp.gate_proj",
49
+ "llm.model.layers.5.self_attn.o_proj",
50
+ "llm.model.layers.0.mlp.down_proj",
51
+ "llm.model.layers.20.mlp.up_proj",
52
+ "llm.model.layers.31.mlp.up_proj",
53
+ "llm.model.layers.6.self_attn.k_proj",
54
+ "llm.model.layers.11.self_attn.k_proj",
55
+ "llm.model.layers.26.mlp.up_proj",
56
+ "llm.model.layers.0.self_attn.q_proj",
57
+ "llm.model.layers.9.self_attn.o_proj",
58
+ "llm.model.layers.3.mlp.up_proj",
59
+ "llm.model.layers.27.mlp.up_proj",
60
+ "llm.model.layers.18.self_attn.v_proj",
61
+ "llm.model.layers.28.self_attn.q_proj",
62
+ "llm.model.layers.4.mlp.gate_proj",
63
+ "llm.model.layers.1.self_attn.q_proj",
64
+ "llm.model.layers.17.mlp.gate_proj",
65
+ "llm.model.layers.31.self_attn.v_proj",
66
+ "llm.model.layers.18.self_attn.o_proj",
67
+ "llm.model.layers.20.self_attn.k_proj",
68
+ "llm.model.layers.10.mlp.down_proj",
69
+ "llm.model.layers.23.mlp.gate_proj",
70
+ "llm.model.layers.4.self_attn.o_proj",
71
+ "llm.model.layers.9.mlp.up_proj",
72
+ "llm.model.layers.17.mlp.up_proj",
73
+ "llm.model.layers.22.self_attn.v_proj",
74
+ "llm.model.layers.13.mlp.down_proj",
75
+ "llm.model.layers.31.mlp.gate_proj",
76
+ "llm.model.layers.21.self_attn.v_proj",
77
+ "llm.model.layers.12.self_attn.o_proj",
78
+ "llm.model.layers.9.self_attn.q_proj",
79
+ "llm.model.layers.15.self_attn.o_proj",
80
+ "llm.model.layers.25.mlp.up_proj",
81
+ "llm.model.layers.2.mlp.gate_proj",
82
+ "llm.model.layers.13.self_attn.o_proj",
83
+ "llm.model.layers.25.mlp.gate_proj",
84
+ "llm.model.layers.19.mlp.down_proj",
85
+ "llm.model.layers.20.self_attn.v_proj",
86
+ "llm.model.layers.31.self_attn.q_proj",
87
+ "llm.model.layers.19.self_attn.o_proj",
88
+ "llm.model.layers.10.self_attn.k_proj",
89
+ "llm.model.layers.23.self_attn.v_proj",
90
+ "llm.model.layers.6.self_attn.o_proj",
91
+ "llm.model.layers.27.mlp.down_proj",
92
+ "llm.model.layers.26.mlp.down_proj",
93
+ "llm.model.layers.4.self_attn.q_proj",
94
+ "llm.model.layers.5.self_attn.q_proj",
95
+ "llm.model.layers.10.mlp.up_proj",
96
+ "llm.model.layers.22.self_attn.k_proj",
97
+ "llm.model.layers.13.mlp.gate_proj",
98
+ "llm.model.layers.28.mlp.down_proj",
99
+ "llm.model.layers.29.mlp.down_proj",
100
+ "llm.model.layers.29.self_attn.o_proj",
101
+ "llm.model.layers.8.self_attn.o_proj",
102
+ "llm.model.layers.7.self_attn.q_proj",
103
+ "llm.model.layers.19.self_attn.q_proj",
104
+ "llm.model.layers.12.self_attn.k_proj",
105
+ "llm.model.layers.3.mlp.gate_proj",
106
+ "llm.model.layers.16.self_attn.k_proj",
107
+ "llm.model.layers.13.self_attn.v_proj",
108
+ "llm.model.layers.1.mlp.up_proj",
109
+ "llm.model.layers.27.self_attn.o_proj",
110
+ "llm.model.layers.4.mlp.down_proj",
111
+ "llm.model.layers.25.mlp.down_proj",
112
+ "llm.model.layers.5.mlp.down_proj",
113
+ "llm.model.layers.3.self_attn.k_proj",
114
+ "llm.model.layers.4.self_attn.k_proj",
115
+ "llm.model.layers.14.self_attn.o_proj",
116
+ "llm.model.layers.23.self_attn.k_proj",
117
+ "llm.model.layers.7.self_attn.k_proj",
118
+ "llm.model.layers.6.mlp.gate_proj",
119
+ "llm.model.layers.19.self_attn.k_proj",
120
+ "llm.model.layers.12.self_attn.q_proj",
121
+ "llm.model.layers.21.mlp.down_proj",
122
+ "llm.model.layers.28.self_attn.v_proj",
123
+ "llm.model.layers.27.self_attn.v_proj",
124
+ "llm.model.layers.11.mlp.down_proj",
125
+ "region_extractor.rgb_projector",
126
+ "llm.model.layers.30.self_attn.q_proj",
127
+ "llm.model.layers.21.mlp.up_proj",
128
+ "llm.model.layers.7.self_attn.o_proj",
129
+ "llm.model.layers.29.self_attn.k_proj",
130
+ "llm.model.layers.8.self_attn.q_proj",
131
+ "llm.model.layers.28.self_attn.o_proj",
132
+ "llm.model.layers.27.self_attn.q_proj",
133
+ "llm.model.layers.22.mlp.gate_proj",
134
+ "llm.model.layers.22.self_attn.q_proj",
135
+ "llm.model.layers.14.mlp.gate_proj",
136
+ "llm.model.layers.25.self_attn.k_proj",
137
+ "llm.model.layers.28.mlp.gate_proj",
138
+ "llm.model.layers.15.self_attn.k_proj",
139
+ "llm.model.layers.2.mlp.down_proj",
140
+ "llm.model.layers.28.mlp.up_proj",
141
+ "llm.model.layers.8.mlp.gate_proj",
142
+ "llm.model.layers.7.mlp.down_proj",
143
+ "llm.model.layers.1.mlp.gate_proj",
144
+ "llm.model.layers.0.self_attn.o_proj",
145
+ "region_extractor.depth_projector",
146
+ "llm.model.layers.6.mlp.up_proj",
147
+ "llm.model.layers.5.self_attn.k_proj",
148
+ "llm.model.layers.17.self_attn.q_proj",
149
+ "llm.model.layers.27.self_attn.k_proj",
150
+ "llm.model.layers.31.mlp.down_proj",
151
+ "llm.model.layers.1.mlp.down_proj",
152
+ "llm.model.layers.4.mlp.up_proj",
153
+ "llm.model.layers.26.self_attn.q_proj",
154
+ "llm.model.layers.0.mlp.gate_proj",
155
+ "llm.model.layers.30.self_attn.v_proj",
156
+ "llm.model.layers.15.self_attn.q_proj",
157
+ "llm.model.layers.16.mlp.up_proj",
158
+ "llm.model.layers.14.self_attn.k_proj",
159
+ "llm.model.layers.16.self_attn.o_proj",
160
+ "llm.model.layers.0.mlp.up_proj",
161
+ "llm.model.layers.24.mlp.gate_proj",
162
+ "llm.model.layers.29.self_attn.q_proj",
163
+ "llm.model.layers.2.mlp.up_proj",
164
+ "llm.model.layers.21.self_attn.k_proj",
165
+ "llm.model.layers.4.self_attn.v_proj",
166
+ "llm.model.layers.28.self_attn.k_proj",
167
+ "llm.model.layers.18.mlp.up_proj",
168
+ "llm.model.layers.9.mlp.down_proj",
169
+ "llm.model.layers.16.mlp.gate_proj",
170
+ "llm.model.layers.23.mlp.up_proj",
171
+ "llm.model.layers.25.self_attn.q_proj",
172
+ "llm.model.layers.30.self_attn.k_proj",
173
+ "llm.model.layers.26.mlp.gate_proj",
174
+ "llm.model.layers.19.self_attn.v_proj",
175
+ "llm.model.layers.21.mlp.gate_proj",
176
+ "llm.model.layers.3.self_attn.o_proj",
177
+ "llm.model.layers.3.self_attn.v_proj",
178
+ "llm.model.layers.3.self_attn.q_proj",
179
+ "llm.model.layers.12.self_attn.v_proj",
180
+ "llm.model.layers.24.mlp.down_proj",
181
+ "llm.model.layers.1.self_attn.v_proj",
182
+ "llm.model.layers.9.mlp.gate_proj",
183
+ "llm.model.layers.17.self_attn.v_proj",
184
+ "llm.model.layers.14.mlp.up_proj",
185
+ "llm.model.layers.15.self_attn.v_proj",
186
+ "llm.model.layers.24.self_attn.q_proj",
187
+ "llm.model.layers.3.mlp.down_proj",
188
+ "llm.model.layers.2.self_attn.v_proj",
189
+ "llm.model.layers.16.mlp.down_proj",
190
+ "llm.model.layers.8.mlp.down_proj",
191
+ "llm.model.layers.5.self_attn.v_proj",
192
+ "llm.model.layers.10.self_attn.o_proj",
193
+ "llm.model.layers.29.mlp.gate_proj",
194
+ "llm.model.layers.24.mlp.up_proj",
195
+ "llm.model.layers.6.mlp.down_proj",
196
+ "llm.model.layers.9.self_attn.k_proj",
197
+ "llm.model.layers.0.self_attn.k_proj",
198
+ "llm.model.layers.13.self_attn.q_proj",
199
+ "llm.model.layers.25.self_attn.v_proj",
200
+ "llm.model.layers.20.mlp.down_proj",
201
+ "llm.model.layers.2.self_attn.q_proj",
202
+ "llm.model.layers.29.mlp.up_proj",
203
+ "llm.model.layers.16.self_attn.q_proj",
204
+ "llm.model.layers.10.self_attn.q_proj",
205
+ "llm.model.layers.1.self_attn.o_proj",
206
+ "llm.model.layers.8.self_attn.v_proj",
207
+ "llm.model.layers.20.self_attn.o_proj",
208
+ "llm.model.layers.11.self_attn.v_proj",
209
+ "llm.model.layers.7.mlp.gate_proj",
210
+ "llm.model.layers.15.mlp.down_proj",
211
+ "llm.model.layers.30.mlp.up_proj",
212
+ "llm.model.layers.24.self_attn.v_proj",
213
+ "llm.model.layers.26.self_attn.o_proj",
214
+ "llm.model.layers.14.self_attn.q_proj",
215
+ "llm.model.layers.11.self_attn.o_proj",
216
+ "llm.model.layers.29.self_attn.v_proj",
217
+ "llm.model.layers.10.self_attn.v_proj",
218
+ "llm.model.layers.18.mlp.gate_proj",
219
+ "llm.model.layers.8.mlp.up_proj",
220
+ "llm.model.layers.23.self_attn.q_proj",
221
+ "llm.model.layers.23.mlp.down_proj",
222
+ "llm.model.layers.31.self_attn.o_proj",
223
+ "llm.model.layers.13.mlp.up_proj",
224
+ "llm.model.layers.11.mlp.up_proj",
225
+ "llm.model.layers.7.mlp.up_proj",
226
+ "llm.model.layers.31.self_attn.k_proj",
227
+ "llm.model.layers.15.mlp.gate_proj",
228
+ "llm.model.layers.27.mlp.gate_proj",
229
+ "llm.model.layers.12.mlp.down_proj",
230
+ "llm.model.layers.6.self_attn.q_proj",
231
+ "llm.model.layers.25.self_attn.o_proj",
232
+ "llm.model.layers.11.mlp.gate_proj",
233
+ "llm.model.layers.24.self_attn.k_proj",
234
+ "llm.model.layers.19.mlp.gate_proj",
235
+ "llm.model.layers.5.mlp.gate_proj",
236
+ "llm.model.layers.22.mlp.down_proj",
237
+ "llm.model.layers.12.mlp.up_proj",
238
+ "llm.model.layers.18.self_attn.q_proj",
239
+ "llm.model.layers.5.mlp.up_proj",
240
+ "llm.model.layers.14.self_attn.v_proj",
241
+ "llm.model.layers.20.self_attn.q_proj",
242
+ "llm.model.layers.30.self_attn.o_proj",
243
+ "llm.model.layers.17.mlp.down_proj",
244
+ "llm.model.layers.10.mlp.gate_proj",
245
+ "llm.model.layers.30.mlp.down_proj",
246
+ "llm.model.layers.26.self_attn.v_proj",
247
+ "llm.model.layers.13.self_attn.k_proj"
248
+ ],
249
+ "task_type": "CAUSAL_LM",
250
+ "use_dora": false,
251
+ "use_rslora": false
252
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:881fd87ea2efbc494730285fd9f041504f1e768472a75afd4cbc1835a4d52f7c
3
+ size 168506768
checkpoint-14985/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: ''
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.9.0
checkpoint-14985/adapter_config.json ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 32,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "llm.model.layers.2.self_attn.k_proj",
23
+ "llm.model.layers.22.mlp.up_proj",
24
+ "llm.model.layers.9.self_attn.v_proj",
25
+ "llm.model.layers.17.self_attn.o_proj",
26
+ "llm.model.layers.21.self_attn.q_proj",
27
+ "llm.model.layers.17.self_attn.k_proj",
28
+ "llm.model.layers.15.mlp.up_proj",
29
+ "llm.model.layers.21.self_attn.o_proj",
30
+ "llm.model.layers.24.self_attn.o_proj",
31
+ "llm.model.layers.30.mlp.gate_proj",
32
+ "llm.model.layers.2.self_attn.o_proj",
33
+ "llm.model.layers.7.self_attn.v_proj",
34
+ "llm.model.layers.8.self_attn.k_proj",
35
+ "llm.model.layers.23.self_attn.o_proj",
36
+ "llm.model.layers.11.self_attn.q_proj",
37
+ "llm.model.layers.18.mlp.down_proj",
38
+ "llm.model.layers.1.self_attn.k_proj",
39
+ "llm.model.layers.26.self_attn.k_proj",
40
+ "llm.model.layers.19.mlp.up_proj",
41
+ "llm.model.layers.16.self_attn.v_proj",
42
+ "llm.model.layers.20.mlp.gate_proj",
43
+ "llm.model.layers.22.self_attn.o_proj",
44
+ "llm.model.layers.14.mlp.down_proj",
45
+ "llm.model.layers.0.self_attn.v_proj",
46
+ "llm.model.layers.18.self_attn.k_proj",
47
+ "llm.model.layers.6.self_attn.v_proj",
48
+ "llm.model.layers.12.mlp.gate_proj",
49
+ "llm.model.layers.5.self_attn.o_proj",
50
+ "llm.model.layers.0.mlp.down_proj",
51
+ "llm.model.layers.20.mlp.up_proj",
52
+ "llm.model.layers.31.mlp.up_proj",
53
+ "llm.model.layers.6.self_attn.k_proj",
54
+ "llm.model.layers.11.self_attn.k_proj",
55
+ "llm.model.layers.26.mlp.up_proj",
56
+ "llm.model.layers.0.self_attn.q_proj",
57
+ "llm.model.layers.9.self_attn.o_proj",
58
+ "llm.model.layers.3.mlp.up_proj",
59
+ "llm.model.layers.27.mlp.up_proj",
60
+ "llm.model.layers.18.self_attn.v_proj",
61
+ "llm.model.layers.28.self_attn.q_proj",
62
+ "llm.model.layers.4.mlp.gate_proj",
63
+ "llm.model.layers.1.self_attn.q_proj",
64
+ "llm.model.layers.17.mlp.gate_proj",
65
+ "llm.model.layers.31.self_attn.v_proj",
66
+ "llm.model.layers.18.self_attn.o_proj",
67
+ "llm.model.layers.20.self_attn.k_proj",
68
+ "llm.model.layers.10.mlp.down_proj",
69
+ "llm.model.layers.23.mlp.gate_proj",
70
+ "llm.model.layers.4.self_attn.o_proj",
71
+ "llm.model.layers.9.mlp.up_proj",
72
+ "llm.model.layers.17.mlp.up_proj",
73
+ "llm.model.layers.22.self_attn.v_proj",
74
+ "llm.model.layers.13.mlp.down_proj",
75
+ "llm.model.layers.31.mlp.gate_proj",
76
+ "llm.model.layers.21.self_attn.v_proj",
77
+ "llm.model.layers.12.self_attn.o_proj",
78
+ "llm.model.layers.9.self_attn.q_proj",
79
+ "llm.model.layers.15.self_attn.o_proj",
80
+ "llm.model.layers.25.mlp.up_proj",
81
+ "llm.model.layers.2.mlp.gate_proj",
82
+ "llm.model.layers.13.self_attn.o_proj",
83
+ "llm.model.layers.25.mlp.gate_proj",
84
+ "llm.model.layers.19.mlp.down_proj",
85
+ "llm.model.layers.20.self_attn.v_proj",
86
+ "llm.model.layers.31.self_attn.q_proj",
87
+ "llm.model.layers.19.self_attn.o_proj",
88
+ "llm.model.layers.10.self_attn.k_proj",
89
+ "llm.model.layers.23.self_attn.v_proj",
90
+ "llm.model.layers.6.self_attn.o_proj",
91
+ "llm.model.layers.27.mlp.down_proj",
92
+ "llm.model.layers.26.mlp.down_proj",
93
+ "llm.model.layers.4.self_attn.q_proj",
94
+ "llm.model.layers.5.self_attn.q_proj",
95
+ "llm.model.layers.10.mlp.up_proj",
96
+ "llm.model.layers.22.self_attn.k_proj",
97
+ "llm.model.layers.13.mlp.gate_proj",
98
+ "llm.model.layers.28.mlp.down_proj",
99
+ "llm.model.layers.29.mlp.down_proj",
100
+ "llm.model.layers.29.self_attn.o_proj",
101
+ "llm.model.layers.8.self_attn.o_proj",
102
+ "llm.model.layers.7.self_attn.q_proj",
103
+ "llm.model.layers.19.self_attn.q_proj",
104
+ "llm.model.layers.12.self_attn.k_proj",
105
+ "llm.model.layers.3.mlp.gate_proj",
106
+ "llm.model.layers.16.self_attn.k_proj",
107
+ "llm.model.layers.13.self_attn.v_proj",
108
+ "llm.model.layers.1.mlp.up_proj",
109
+ "llm.model.layers.27.self_attn.o_proj",
110
+ "llm.model.layers.4.mlp.down_proj",
111
+ "llm.model.layers.25.mlp.down_proj",
112
+ "llm.model.layers.5.mlp.down_proj",
113
+ "llm.model.layers.3.self_attn.k_proj",
114
+ "llm.model.layers.4.self_attn.k_proj",
115
+ "llm.model.layers.14.self_attn.o_proj",
116
+ "llm.model.layers.23.self_attn.k_proj",
117
+ "llm.model.layers.7.self_attn.k_proj",
118
+ "llm.model.layers.6.mlp.gate_proj",
119
+ "llm.model.layers.19.self_attn.k_proj",
120
+ "llm.model.layers.12.self_attn.q_proj",
121
+ "llm.model.layers.21.mlp.down_proj",
122
+ "llm.model.layers.28.self_attn.v_proj",
123
+ "llm.model.layers.27.self_attn.v_proj",
124
+ "llm.model.layers.11.mlp.down_proj",
125
+ "region_extractor.rgb_projector",
126
+ "llm.model.layers.30.self_attn.q_proj",
127
+ "llm.model.layers.21.mlp.up_proj",
128
+ "llm.model.layers.7.self_attn.o_proj",
129
+ "llm.model.layers.29.self_attn.k_proj",
130
+ "llm.model.layers.8.self_attn.q_proj",
131
+ "llm.model.layers.28.self_attn.o_proj",
132
+ "llm.model.layers.27.self_attn.q_proj",
133
+ "llm.model.layers.22.mlp.gate_proj",
134
+ "llm.model.layers.22.self_attn.q_proj",
135
+ "llm.model.layers.14.mlp.gate_proj",
136
+ "llm.model.layers.25.self_attn.k_proj",
137
+ "llm.model.layers.28.mlp.gate_proj",
138
+ "llm.model.layers.15.self_attn.k_proj",
139
+ "llm.model.layers.2.mlp.down_proj",
140
+ "llm.model.layers.28.mlp.up_proj",
141
+ "llm.model.layers.8.mlp.gate_proj",
142
+ "llm.model.layers.7.mlp.down_proj",
143
+ "llm.model.layers.1.mlp.gate_proj",
144
+ "llm.model.layers.0.self_attn.o_proj",
145
+ "region_extractor.depth_projector",
146
+ "llm.model.layers.6.mlp.up_proj",
147
+ "llm.model.layers.5.self_attn.k_proj",
148
+ "llm.model.layers.17.self_attn.q_proj",
149
+ "llm.model.layers.27.self_attn.k_proj",
150
+ "llm.model.layers.31.mlp.down_proj",
151
+ "llm.model.layers.1.mlp.down_proj",
152
+ "llm.model.layers.4.mlp.up_proj",
153
+ "llm.model.layers.26.self_attn.q_proj",
154
+ "llm.model.layers.0.mlp.gate_proj",
155
+ "llm.model.layers.30.self_attn.v_proj",
156
+ "llm.model.layers.15.self_attn.q_proj",
157
+ "llm.model.layers.16.mlp.up_proj",
158
+ "llm.model.layers.14.self_attn.k_proj",
159
+ "llm.model.layers.16.self_attn.o_proj",
160
+ "llm.model.layers.0.mlp.up_proj",
161
+ "llm.model.layers.24.mlp.gate_proj",
162
+ "llm.model.layers.29.self_attn.q_proj",
163
+ "llm.model.layers.2.mlp.up_proj",
164
+ "llm.model.layers.21.self_attn.k_proj",
165
+ "llm.model.layers.4.self_attn.v_proj",
166
+ "llm.model.layers.28.self_attn.k_proj",
167
+ "llm.model.layers.18.mlp.up_proj",
168
+ "llm.model.layers.9.mlp.down_proj",
169
+ "llm.model.layers.16.mlp.gate_proj",
170
+ "llm.model.layers.23.mlp.up_proj",
171
+ "llm.model.layers.25.self_attn.q_proj",
172
+ "llm.model.layers.30.self_attn.k_proj",
173
+ "llm.model.layers.26.mlp.gate_proj",
174
+ "llm.model.layers.19.self_attn.v_proj",
175
+ "llm.model.layers.21.mlp.gate_proj",
176
+ "llm.model.layers.3.self_attn.o_proj",
177
+ "llm.model.layers.3.self_attn.v_proj",
178
+ "llm.model.layers.3.self_attn.q_proj",
179
+ "llm.model.layers.12.self_attn.v_proj",
180
+ "llm.model.layers.24.mlp.down_proj",
181
+ "llm.model.layers.1.self_attn.v_proj",
182
+ "llm.model.layers.9.mlp.gate_proj",
183
+ "llm.model.layers.17.self_attn.v_proj",
184
+ "llm.model.layers.14.mlp.up_proj",
185
+ "llm.model.layers.15.self_attn.v_proj",
186
+ "llm.model.layers.24.self_attn.q_proj",
187
+ "llm.model.layers.3.mlp.down_proj",
188
+ "llm.model.layers.2.self_attn.v_proj",
189
+ "llm.model.layers.16.mlp.down_proj",
190
+ "llm.model.layers.8.mlp.down_proj",
191
+ "llm.model.layers.5.self_attn.v_proj",
192
+ "llm.model.layers.10.self_attn.o_proj",
193
+ "llm.model.layers.29.mlp.gate_proj",
194
+ "llm.model.layers.24.mlp.up_proj",
195
+ "llm.model.layers.6.mlp.down_proj",
196
+ "llm.model.layers.9.self_attn.k_proj",
197
+ "llm.model.layers.0.self_attn.k_proj",
198
+ "llm.model.layers.13.self_attn.q_proj",
199
+ "llm.model.layers.25.self_attn.v_proj",
200
+ "llm.model.layers.20.mlp.down_proj",
201
+ "llm.model.layers.2.self_attn.q_proj",
202
+ "llm.model.layers.29.mlp.up_proj",
203
+ "llm.model.layers.16.self_attn.q_proj",
204
+ "llm.model.layers.10.self_attn.q_proj",
205
+ "llm.model.layers.1.self_attn.o_proj",
206
+ "llm.model.layers.8.self_attn.v_proj",
207
+ "llm.model.layers.20.self_attn.o_proj",
208
+ "llm.model.layers.11.self_attn.v_proj",
209
+ "llm.model.layers.7.mlp.gate_proj",
210
+ "llm.model.layers.15.mlp.down_proj",
211
+ "llm.model.layers.30.mlp.up_proj",
212
+ "llm.model.layers.24.self_attn.v_proj",
213
+ "llm.model.layers.26.self_attn.o_proj",
214
+ "llm.model.layers.14.self_attn.q_proj",
215
+ "llm.model.layers.11.self_attn.o_proj",
216
+ "llm.model.layers.29.self_attn.v_proj",
217
+ "llm.model.layers.10.self_attn.v_proj",
218
+ "llm.model.layers.18.mlp.gate_proj",
219
+ "llm.model.layers.8.mlp.up_proj",
220
+ "llm.model.layers.23.self_attn.q_proj",
221
+ "llm.model.layers.23.mlp.down_proj",
222
+ "llm.model.layers.31.self_attn.o_proj",
223
+ "llm.model.layers.13.mlp.up_proj",
224
+ "llm.model.layers.11.mlp.up_proj",
225
+ "llm.model.layers.7.mlp.up_proj",
226
+ "llm.model.layers.31.self_attn.k_proj",
227
+ "llm.model.layers.15.mlp.gate_proj",
228
+ "llm.model.layers.27.mlp.gate_proj",
229
+ "llm.model.layers.12.mlp.down_proj",
230
+ "llm.model.layers.6.self_attn.q_proj",
231
+ "llm.model.layers.25.self_attn.o_proj",
232
+ "llm.model.layers.11.mlp.gate_proj",
233
+ "llm.model.layers.24.self_attn.k_proj",
234
+ "llm.model.layers.19.mlp.gate_proj",
235
+ "llm.model.layers.5.mlp.gate_proj",
236
+ "llm.model.layers.22.mlp.down_proj",
237
+ "llm.model.layers.12.mlp.up_proj",
238
+ "llm.model.layers.18.self_attn.q_proj",
239
+ "llm.model.layers.5.mlp.up_proj",
240
+ "llm.model.layers.14.self_attn.v_proj",
241
+ "llm.model.layers.20.self_attn.q_proj",
242
+ "llm.model.layers.30.self_attn.o_proj",
243
+ "llm.model.layers.17.mlp.down_proj",
244
+ "llm.model.layers.10.mlp.gate_proj",
245
+ "llm.model.layers.30.mlp.down_proj",
246
+ "llm.model.layers.26.self_attn.v_proj",
247
+ "llm.model.layers.13.self_attn.k_proj"
248
+ ],
249
+ "task_type": "CAUSAL_LM",
250
+ "use_dora": false,
251
+ "use_rslora": false
252
+ }
checkpoint-14985/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf1aef50c2ad42c863339a84e0298b5563b0204308a01209cdf2feb5fc4c840a
3
+ size 168506768
checkpoint-14985/global_step14985/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f058a9ba6ed7c3bc0fbebbc9e0921332ef62d8861cea8ed8cdee9b39591aa8c6
3
+ size 1438748336
checkpoint-14985/global_step14985/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:687127868baffafeb2e6f11b2eac86bd9686801238c6a4fb1b31db40632c5711
3
+ size 13217124518
checkpoint-14985/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step14985
checkpoint-14985/non_lora_trainables.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a1cd99dd954a185936134534dbed8b9d6c1897e80ffd62097ba78bfda2aadc0
3
+ size 71340800
checkpoint-14985/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:243afa74317471681b96048372def099149905a5ee634b86dca7aa9c9e78becb
3
+ size 14244
checkpoint-14985/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc0b6bc2b0a2d06697575b3dc0b1fc981eac40da760fc92c1ac8632729acfbfa
3
+ size 1064
checkpoint-14985/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-14985/zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage == 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage == 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage == 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage == 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
config.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./checkpoints/SpatialRGPT-VILA1.5-8B-SFT-SpatialWarehouse",
3
+ "architectures": [
4
+ "LlavaLlamaModel"
5
+ ],
6
+ "drop_path_rate": 0.0,
7
+ "enable_depth": true,
8
+ "enable_region": true,
9
+ "fps": 0.0,
10
+ "hidden_size": 4096,
11
+ "image_aspect_ratio": "resize",
12
+ "interpolate_mode": "linear",
13
+ "llm_cfg": {
14
+ "_name_or_path": "./checkpoints/SpatialRGPT-VILA1.5-8B/llm",
15
+ "add_cross_attention": false,
16
+ "architectures": [
17
+ "LlamaForCausalLM"
18
+ ],
19
+ "attention_bias": false,
20
+ "attention_dropout": 0.0,
21
+ "bad_words_ids": null,
22
+ "begin_suppress_tokens": null,
23
+ "bos_token_id": 128000,
24
+ "chunk_size_feed_forward": 0,
25
+ "cross_attention_hidden_size": null,
26
+ "decoder_start_token_id": null,
27
+ "diversity_penalty": 0.0,
28
+ "do_sample": false,
29
+ "early_stopping": false,
30
+ "encoder_no_repeat_ngram_size": 0,
31
+ "eos_token_id": 128009,
32
+ "exponential_decay_length_penalty": null,
33
+ "finetuning_task": null,
34
+ "forced_bos_token_id": null,
35
+ "forced_eos_token_id": null,
36
+ "hidden_act": "silu",
37
+ "hidden_size": 4096,
38
+ "id2label": {
39
+ "0": "LABEL_0",
40
+ "1": "LABEL_1"
41
+ },
42
+ "initializer_range": 0.02,
43
+ "intermediate_size": 14336,
44
+ "is_decoder": false,
45
+ "is_encoder_decoder": false,
46
+ "label2id": {
47
+ "LABEL_0": 0,
48
+ "LABEL_1": 1
49
+ },
50
+ "length_penalty": 1.0,
51
+ "max_length": 20,
52
+ "max_position_embeddings": 8192,
53
+ "min_length": 0,
54
+ "model_max_length": 4096,
55
+ "model_type": "llama",
56
+ "no_repeat_ngram_size": 0,
57
+ "num_attention_heads": 32,
58
+ "num_beam_groups": 1,
59
+ "num_beams": 1,
60
+ "num_hidden_layers": 32,
61
+ "num_key_value_heads": 8,
62
+ "num_return_sequences": 1,
63
+ "output_attentions": false,
64
+ "output_hidden_states": false,
65
+ "output_scores": false,
66
+ "pad_token_id": null,
67
+ "prefix": null,
68
+ "pretraining_tp": 1,
69
+ "problem_type": null,
70
+ "pruned_heads": {},
71
+ "remove_invalid_values": false,
72
+ "repetition_penalty": 1.0,
73
+ "return_dict": true,
74
+ "return_dict_in_generate": false,
75
+ "rms_norm_eps": 1e-05,
76
+ "rope_scaling": null,
77
+ "rope_theta": 500000.0,
78
+ "sep_token_id": null,
79
+ "suppress_tokens": null,
80
+ "task_specific_params": null,
81
+ "temperature": 1.0,
82
+ "tf_legacy_loss": false,
83
+ "tie_encoder_decoder": false,
84
+ "tie_word_embeddings": false,
85
+ "tokenizer_class": null,
86
+ "tokenizer_model_max_length": 4096,
87
+ "tokenizer_padding_side": "right",
88
+ "top_k": 50,
89
+ "top_p": 1.0,
90
+ "torch_dtype": "bfloat16",
91
+ "torchscript": false,
92
+ "typical_p": 1.0,
93
+ "use_bfloat16": false,
94
+ "use_cache": true,
95
+ "vocab_size": 128259
96
+ },
97
+ "mm_hidden_size": 1152,
98
+ "mm_projector_cfg": {
99
+ "_name_or_path": "./checkpoints/SpatialRGPT-VILA1.5-8B/mm_projector",
100
+ "add_cross_attention": false,
101
+ "architectures": [
102
+ "MultimodalProjector"
103
+ ],
104
+ "bad_words_ids": null,
105
+ "begin_suppress_tokens": null,
106
+ "bos_token_id": null,
107
+ "chunk_size_feed_forward": 0,
108
+ "cross_attention_hidden_size": null,
109
+ "decoder_start_token_id": null,
110
+ "diversity_penalty": 0.0,
111
+ "do_sample": false,
112
+ "early_stopping": false,
113
+ "encoder_no_repeat_ngram_size": 0,
114
+ "eos_token_id": null,
115
+ "exponential_decay_length_penalty": null,
116
+ "finetuning_task": null,
117
+ "forced_bos_token_id": null,
118
+ "forced_eos_token_id": null,
119
+ "id2label": {
120
+ "0": "LABEL_0",
121
+ "1": "LABEL_1"
122
+ },
123
+ "is_decoder": false,
124
+ "is_encoder_decoder": false,
125
+ "label2id": {
126
+ "LABEL_0": 0,
127
+ "LABEL_1": 1
128
+ },
129
+ "length_penalty": 1.0,
130
+ "max_length": 20,
131
+ "min_length": 0,
132
+ "mm_projector_type": "mlp_downsample",
133
+ "model_type": "v2l_projector",
134
+ "no_repeat_ngram_size": 0,
135
+ "num_beam_groups": 1,
136
+ "num_beams": 1,
137
+ "num_return_sequences": 1,
138
+ "output_attentions": false,
139
+ "output_hidden_states": false,
140
+ "output_scores": false,
141
+ "pad_token_id": null,
142
+ "prefix": null,
143
+ "problem_type": null,
144
+ "pruned_heads": {},
145
+ "remove_invalid_values": false,
146
+ "repetition_penalty": 1.0,
147
+ "return_dict": true,
148
+ "return_dict_in_generate": false,
149
+ "sep_token_id": null,
150
+ "suppress_tokens": null,
151
+ "task_specific_params": null,
152
+ "temperature": 1.0,
153
+ "tf_legacy_loss": false,
154
+ "tie_encoder_decoder": false,
155
+ "tie_word_embeddings": true,
156
+ "tokenizer_class": null,
157
+ "top_k": 50,
158
+ "top_p": 1.0,
159
+ "torch_dtype": "bfloat16",
160
+ "torchscript": false,
161
+ "typical_p": 1.0,
162
+ "use_bfloat16": false
163
+ },
164
+ "mm_projector_lr": null,
165
+ "mm_use_im_patch_token": false,
166
+ "mm_use_im_start_end": false,
167
+ "mm_vision_select_feature": "cls_patch",
168
+ "mm_vision_select_layer": -2,
169
+ "model_dtype": "torch.bfloat16",
170
+ "model_type": "llava_llama",
171
+ "num_video_frames": 8,
172
+ "region_extractor_cfg": {
173
+ "_name_or_path": "./checkpoints/SpatialRGPT-VILA1.5-8B/region_extractor",
174
+ "add_cross_attention": false,
175
+ "architectures": [
176
+ "RegionExtractor"
177
+ ],
178
+ "bad_words_ids": null,
179
+ "begin_suppress_tokens": null,
180
+ "bos_token_id": null,
181
+ "chunk_size_feed_forward": 0,
182
+ "cross_attention_hidden_size": null,
183
+ "decoder_start_token_id": null,
184
+ "diversity_penalty": 0.0,
185
+ "do_sample": false,
186
+ "early_stopping": false,
187
+ "encoder_no_repeat_ngram_size": 0,
188
+ "eos_token_id": null,
189
+ "exponential_decay_length_penalty": null,
190
+ "finetuning_task": null,
191
+ "forced_bos_token_id": null,
192
+ "forced_eos_token_id": null,
193
+ "id2label": {
194
+ "0": "LABEL_0",
195
+ "1": "LABEL_1"
196
+ },
197
+ "is_decoder": false,
198
+ "is_encoder_decoder": false,
199
+ "label2id": {
200
+ "LABEL_0": 0,
201
+ "LABEL_1": 1
202
+ },
203
+ "length_penalty": 1.0,
204
+ "max_length": 20,
205
+ "min_length": 0,
206
+ "model_type": "region_extractor",
207
+ "no_repeat_ngram_size": 0,
208
+ "num_beam_groups": 1,
209
+ "num_beams": 1,
210
+ "num_return_sequences": 1,
211
+ "output_attentions": false,
212
+ "output_hidden_states": false,
213
+ "output_scores": false,
214
+ "pad_token_id": null,
215
+ "prefix": null,
216
+ "problem_type": null,
217
+ "pruned_heads": {},
218
+ "region_extractor_type": "regiongpt",
219
+ "remove_invalid_values": false,
220
+ "repetition_penalty": 1.0,
221
+ "return_dict": true,
222
+ "return_dict_in_generate": false,
223
+ "sep_token_id": null,
224
+ "suppress_tokens": null,
225
+ "task_specific_params": null,
226
+ "temperature": 1.0,
227
+ "tf_legacy_loss": false,
228
+ "tie_encoder_decoder": false,
229
+ "tie_word_embeddings": true,
230
+ "tokenizer_class": null,
231
+ "top_k": 50,
232
+ "top_p": 1.0,
233
+ "torch_dtype": "bfloat16",
234
+ "torchscript": false,
235
+ "typical_p": 1.0,
236
+ "use_bfloat16": false
237
+ },
238
+ "resume_path": "./checkpoints/SpatialRGPT-VILA1.5-8B-SFT-SpatialWarehouse",
239
+ "s2": false,
240
+ "s2_max_split_size": 336,
241
+ "s2_scales": "336,672,1008",
242
+ "transformers_version": "4.37.2",
243
+ "tune_language_model": false,
244
+ "tune_mm_projector": true,
245
+ "tune_region_extractor": true,
246
+ "tune_vision_tower": false,
247
+ "vision_resolution": -1,
248
+ "vision_tower_cfg": {
249
+ "_name_or_path": "./checkpoints/SpatialRGPT-VILA1.5-8B/vision_tower",
250
+ "add_cross_attention": false,
251
+ "architectures": [
252
+ "SiglipVisionModel"
253
+ ],
254
+ "attention_dropout": 0.0,
255
+ "bad_words_ids": null,
256
+ "begin_suppress_tokens": null,
257
+ "bos_token_id": null,
258
+ "chunk_size_feed_forward": 0,
259
+ "cross_attention_hidden_size": null,
260
+ "decoder_start_token_id": null,
261
+ "diversity_penalty": 0.0,
262
+ "do_sample": false,
263
+ "early_stopping": false,
264
+ "encoder_no_repeat_ngram_size": 0,
265
+ "eos_token_id": null,
266
+ "exponential_decay_length_penalty": null,
267
+ "finetuning_task": null,
268
+ "forced_bos_token_id": null,
269
+ "forced_eos_token_id": null,
270
+ "hidden_act": "gelu_pytorch_tanh",
271
+ "hidden_size": 1152,
272
+ "id2label": {
273
+ "0": "LABEL_0",
274
+ "1": "LABEL_1"
275
+ },
276
+ "image_size": 384,
277
+ "intermediate_size": 4304,
278
+ "is_decoder": false,
279
+ "is_encoder_decoder": false,
280
+ "label2id": {
281
+ "LABEL_0": 0,
282
+ "LABEL_1": 1
283
+ },
284
+ "layer_norm_eps": 1e-06,
285
+ "length_penalty": 1.0,
286
+ "llm_depth_token_id": 128258,
287
+ "llm_mask_token_id": 128257,
288
+ "max_length": 20,
289
+ "min_length": 0,
290
+ "model_type": "siglip_vision_model",
291
+ "no_repeat_ngram_size": 0,
292
+ "num_attention_heads": 16,
293
+ "num_beam_groups": 1,
294
+ "num_beams": 1,
295
+ "num_channels": 3,
296
+ "num_hidden_layers": 27,
297
+ "num_return_sequences": 1,
298
+ "output_attentions": false,
299
+ "output_hidden_states": false,
300
+ "output_scores": false,
301
+ "pad_token_id": null,
302
+ "patch_size": 14,
303
+ "prefix": null,
304
+ "problem_type": null,
305
+ "pruned_heads": {},
306
+ "remove_invalid_values": false,
307
+ "repetition_penalty": 1.0,
308
+ "return_dict": true,
309
+ "return_dict_in_generate": false,
310
+ "sep_token_id": null,
311
+ "suppress_tokens": null,
312
+ "task_specific_params": null,
313
+ "temperature": 1.0,
314
+ "tf_legacy_loss": false,
315
+ "tie_encoder_decoder": false,
316
+ "tie_word_embeddings": true,
317
+ "tokenizer_class": null,
318
+ "top_k": 50,
319
+ "top_p": 1.0,
320
+ "torch_dtype": "bfloat16",
321
+ "torchscript": false,
322
+ "typical_p": 1.0,
323
+ "use_bfloat16": false
324
+ }
325
+ }
non_lora_trainables.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:135dd02104f0989ebcea265bef6786dd5c6f441f0d153a7686adee132ec2f69e
3
+ size 71340800
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff