DavidNguyen commited on
Commit
1aa2000
·
verified ·
1 Parent(s): 2a1eb16

caa1ef0fada7a0f4ac4c3841ef9e84332d3a30f97434d6a475f6618b724f7d5c

Browse files
Files changed (20) hide show
  1. .gitattributes +3 -0
  2. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmerealworld_lite.json +3 -0
  3. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmmu_val.json +0 -0
  4. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmstar.json +0 -0
  5. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/rank0_metric_eval_done.txt +1 -0
  6. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/rank1_metric_eval_done.txt +1 -0
  7. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/rank2_metric_eval_done.txt +1 -0
  8. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/results.json +683 -0
  9. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/scienceqa_img.json +0 -0
  10. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/seedbench_2_plus.json +0 -0
  11. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/submissions/mmbench_en_dev_results.json +1 -0
  12. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/submissions/mmbench_en_dev_results.xlsx +3 -0
  13. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/submissions/textvqa_submission_2025-06-28-13-59-24.json +0 -0
  14. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/textvqa_val.json +3 -0
  15. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/ocrbench.json +0 -0
  16. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/rank0_metric_eval_done.txt +1 -0
  17. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/rank1_metric_eval_done.txt +1 -0
  18. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/rank2_metric_eval_done.txt +1 -0
  19. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/results.json +67 -0
  20. sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/results/ocrbench_results.txt +18 -0
.gitattributes CHANGED
@@ -229,3 +229,6 @@ sft/1M3/Full_smoe/checkpoint-20679/logs/0627_1245_llava...l_mme_llava_model_args
229
  sft/1M3/Full_smoe/checkpoint-20679/logs/0627_1245_llava...l_mme_llava_model_args_18fef8/textvqa_val.json filter=lfs diff=lfs merge=lfs -text
230
  sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/infovqa_val.json filter=lfs diff=lfs merge=lfs -text
231
  sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmbench_en_dev.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
229
  sft/1M3/Full_smoe/checkpoint-20679/logs/0627_1245_llava...l_mme_llava_model_args_18fef8/textvqa_val.json filter=lfs diff=lfs merge=lfs -text
230
  sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/infovqa_val.json filter=lfs diff=lfs merge=lfs -text
231
  sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmbench_en_dev.json filter=lfs diff=lfs merge=lfs -text
232
+ sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmerealworld_lite.json filter=lfs diff=lfs merge=lfs -text
233
+ sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/submissions/mmbench_en_dev_results.xlsx filter=lfs diff=lfs merge=lfs -text
234
+ sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/textvqa_val.json filter=lfs diff=lfs merge=lfs -text
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmerealworld_lite.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d1954b2f8f69527a69c05206e95c9cd8ff1c5134f091bb69ebdbff0b082dcc4
3
+ size 1994104291
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmmu_val.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/mmstar.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/rank2_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 2 eval done
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/results.json ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "ai2d": {
4
+ "exact_match,flexible-extract": 0.677461139896373,
5
+ "exact_match_stderr,flexible-extract": 0.008413271478449879,
6
+ "alias": "ai2d"
7
+ },
8
+ "infovqa_val": {
9
+ "anls,none": 0.27990003570153515,
10
+ "anls_stderr,none": 0.008484355470844385,
11
+ "alias": "infovqa_val"
12
+ },
13
+ "mmbench_en_dev": {
14
+ "gpt_eval_score,none": 68.29896907216495,
15
+ "gpt_eval_score_stderr,none": "N/A",
16
+ "submission,none": null,
17
+ "submission_stderr,none": "N/A",
18
+ "alias": "mmbench_en_dev"
19
+ },
20
+ "mme": {
21
+ "mme_cognition_score,none": 315.0,
22
+ "mme_cognition_score_stderr,none": "N/A",
23
+ "mme_percetion_score,none": 1191.5826330532213,
24
+ "mme_percetion_score_stderr,none": "N/A",
25
+ "alias": "mme"
26
+ },
27
+ "mmerealworld_lite": {
28
+ "mme_realworld_score,none": 0.30015633142261594,
29
+ "mme_realworld_score_stderr,none": "N/A",
30
+ "alias": "mmerealworld_lite"
31
+ },
32
+ "mmmu_val": {
33
+ "mmmu_acc,none": 0.41556,
34
+ "mmmu_acc_stderr,none": "N/A",
35
+ "alias": "mmmu_val"
36
+ },
37
+ "mmstar": {
38
+ "coarse perception,none": 0.6289471986108911,
39
+ "coarse perception_stderr,none": "N/A",
40
+ "fine-grained perception,none": 0.31227585359862436,
41
+ "fine-grained perception_stderr,none": "N/A",
42
+ "instance reasoning,none": 0.5168685770353041,
43
+ "instance reasoning_stderr,none": "N/A",
44
+ "logical reasoning,none": 0.3484233038688484,
45
+ "logical reasoning_stderr,none": "N/A",
46
+ "math,none": 0.390119620422347,
47
+ "math_stderr,none": "N/A",
48
+ "science & technology,none": 0.331204260883257,
49
+ "science & technology_stderr,none": "N/A",
50
+ "alias": "mmstar"
51
+ },
52
+ "scienceqa_img": {
53
+ "exact_match,none": 0.8041646008924145,
54
+ "exact_match_stderr,none": 0.008838382866139713,
55
+ "alias": "scienceqa_img"
56
+ },
57
+ "seedbench_2_plus": {
58
+ "seedbench_2_plus_Chart,none": 0.48641975308641977,
59
+ "seedbench_2_plus_Chart_stderr,none": "N/A",
60
+ "seedbench_2_plus_all,none": 0.4857268335529205,
61
+ "seedbench_2_plus_all_stderr,none": "N/A",
62
+ "seedbench_2_plus_Web,none": 0.5212121212121212,
63
+ "seedbench_2_plus_Web_stderr,none": "N/A",
64
+ "seedbench_2_plus_Map,none": 0.4560099132589839,
65
+ "seedbench_2_plus_Map_stderr,none": "N/A",
66
+ "alias": "seedbench_2_plus"
67
+ },
68
+ "textvqa_val": {
69
+ "exact_match,none": 0.38544000025987624,
70
+ "exact_match_stderr,none": 0.0066797698348715715,
71
+ "submission,none": null,
72
+ "submission_stderr,none": "N/A",
73
+ "alias": "textvqa_val"
74
+ }
75
+ },
76
+ "configs": {
77
+ "ai2d": {
78
+ "task": "ai2d",
79
+ "dataset_path": "lmms-lab/ai2d",
80
+ "dataset_kwargs": {
81
+ "token": true
82
+ },
83
+ "test_split": "test",
84
+ "doc_to_visual": "<function ai2d_doc_to_visual at 0x7f4ce8beaca0>",
85
+ "doc_to_text": "<function ai2d_doc_to_text at 0x7f4ce8beaf70>",
86
+ "doc_to_target": "<function ai2d_doc_to_target at 0x7f4ce8bf1550>",
87
+ "description": "",
88
+ "target_delimiter": " ",
89
+ "fewshot_delimiter": "\n\n",
90
+ "metric_list": [
91
+ {
92
+ "metric": "exact_match",
93
+ "aggregation": "mean",
94
+ "higher_is_better": true,
95
+ "ignore_case": true,
96
+ "ignore_punctuation": true
97
+ }
98
+ ],
99
+ "output_type": "generate_until",
100
+ "generation_kwargs": {
101
+ "max_new_tokens": 512,
102
+ "temperature": 0.0,
103
+ "do_sample": false,
104
+ "until": [
105
+ "\n\n"
106
+ ]
107
+ },
108
+ "repeats": 1,
109
+ "filter_list": [
110
+ {
111
+ "name": "flexible-extract",
112
+ "filter": [
113
+ {
114
+ "function": "<class 'utils.MultiChoiceRegexFilter'>",
115
+ "group_select": 0,
116
+ "ignore_case": true,
117
+ "ignore_punctuation": true,
118
+ "regex_pattern": "([A-Z])\\."
119
+ }
120
+ ]
121
+ }
122
+ ],
123
+ "should_decontaminate": false,
124
+ "metadata": [
125
+ {
126
+ "version": 0.0
127
+ }
128
+ ],
129
+ "model_specific_prompt_kwargs": {
130
+ "default": {
131
+ "prompt_format": "mcq",
132
+ "pre_prompt": "",
133
+ "post_prompt": "\nAnswer with the option's letter from the given choices directly."
134
+ },
135
+ "gpt4v": {
136
+ "prompt_format": "mcq",
137
+ "pre_prompt": "",
138
+ "post_prompt": "\nAbove choices are given in {option}. {content} format.\nPlease answer with the option letter from the given choices directly."
139
+ },
140
+ "qwen_vl": {
141
+ "prompt_format": "qa",
142
+ "pre_prompt": "",
143
+ "post_prompt": " Answer:"
144
+ },
145
+ "xcomposer2_4khd": {
146
+ "prompt_format": "mcq_xcomposer",
147
+ "pre_prompt": "[UNUSED_TOKEN_146]user\nQuestion: ",
148
+ "post_prompt": "[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\nThe answer is"
149
+ }
150
+ },
151
+ "model_specific_target_kwargs": {
152
+ "default": "mcq",
153
+ "qwen_vl": "qa"
154
+ }
155
+ },
156
+ "infovqa_val": {
157
+ "task": "infovqa_val",
158
+ "dataset_path": "lmms-lab/DocVQA",
159
+ "dataset_name": "InfographicVQA",
160
+ "dataset_kwargs": {
161
+ "token": true
162
+ },
163
+ "test_split": "validation",
164
+ "doc_to_visual": "<function infovqa_doc_to_visual at 0x7f4d1a345b80>",
165
+ "doc_to_text": "<function infovqa_doc_to_text at 0x7f4d1a345e50>",
166
+ "doc_to_target": "answers",
167
+ "description": "",
168
+ "target_delimiter": " ",
169
+ "fewshot_delimiter": "\n\n",
170
+ "metric_list": [
171
+ {
172
+ "metric": "anls",
173
+ "aggregation": "mean",
174
+ "higher_is_better": true
175
+ }
176
+ ],
177
+ "output_type": "generate_until",
178
+ "generation_kwargs": {
179
+ "max_new_tokens": 32,
180
+ "temperature": 0.0,
181
+ "do_sample": false,
182
+ "until": [
183
+ "\n\n"
184
+ ]
185
+ },
186
+ "repeats": 1,
187
+ "should_decontaminate": false,
188
+ "model_specific_prompt_kwargs": {
189
+ "default": {
190
+ "pre_prompt": "",
191
+ "post_prompt": "\nAnswer the question using a single word or phrase."
192
+ }
193
+ }
194
+ },
195
+ "mmbench_en_dev": {
196
+ "task": "mmbench_en_dev",
197
+ "dataset_path": "lmms-lab/MMBench",
198
+ "dataset_name": "en",
199
+ "dataset_kwargs": {
200
+ "token": true
201
+ },
202
+ "test_split": "dev",
203
+ "doc_to_visual": "<function mmbench_doc_to_visual at 0x7f4d487d55e0>",
204
+ "doc_to_text": "<function mmbench_doc_to_text at 0x7f4d487d5af0>",
205
+ "doc_to_target": "answer",
206
+ "process_results": "<function mmbench_process_results at 0x7f4d487e1040>",
207
+ "description": "",
208
+ "target_delimiter": " ",
209
+ "fewshot_delimiter": "\n\n",
210
+ "metric_list": [
211
+ {
212
+ "metric": "gpt_eval_score",
213
+ "aggregation": "<function mmbench_aggregate_dev_results_eval at 0x7f4d4884be50>",
214
+ "higher_is_better": true
215
+ },
216
+ {
217
+ "metric": "submission",
218
+ "aggregation": "<function mmbench_aggregate_dev_results_submission at 0x7f4d487d53a0>",
219
+ "higher_is_better": true
220
+ }
221
+ ],
222
+ "output_type": "generate_until",
223
+ "generation_kwargs": {
224
+ "until": [
225
+ "ASSISTANT:"
226
+ ],
227
+ "max_new_tokens": 1024,
228
+ "temperature": 0.0,
229
+ "top_p": 1.0,
230
+ "num_beams": 1,
231
+ "do_sample": false,
232
+ "image_aspect_ratio": "original"
233
+ },
234
+ "repeats": 1,
235
+ "should_decontaminate": false,
236
+ "model_specific_prompt_kwargs": {
237
+ "default": {
238
+ "pre_prompt": "",
239
+ "post_prompt": "\nAnswer with the option's letter from the given choices directly."
240
+ }
241
+ },
242
+ "model_specific_generation_kwargs": {
243
+ "llava": {
244
+ "image_aspect_ratio": "original"
245
+ }
246
+ }
247
+ },
248
+ "mme": {
249
+ "task": "mme",
250
+ "dataset_path": "lmms-lab/MME",
251
+ "dataset_kwargs": {
252
+ "token": false
253
+ },
254
+ "test_split": "test",
255
+ "doc_to_visual": "<function mme_doc_to_visual at 0x7f4d498f4a60>",
256
+ "doc_to_text": "<function mme_doc_to_text at 0x7f4d48856310>",
257
+ "doc_to_target": "answer",
258
+ "process_results": "<function mme_process_results at 0x7f4d488568b0>",
259
+ "description": "",
260
+ "target_delimiter": " ",
261
+ "fewshot_delimiter": "\n\n",
262
+ "metric_list": [
263
+ {
264
+ "metric": "mme_percetion_score",
265
+ "aggregation": "<function mme_aggregate_results at 0x7f4d48856dc0>",
266
+ "higher_is_better": true
267
+ },
268
+ {
269
+ "metric": "mme_cognition_score",
270
+ "aggregation": "<function mme_aggregate_results at 0x7f4d4885f280>",
271
+ "higher_is_better": true
272
+ }
273
+ ],
274
+ "output_type": "generate_until",
275
+ "generation_kwargs": {
276
+ "max_new_tokens": 16,
277
+ "temperature": 0.0,
278
+ "top_p": 1.0,
279
+ "num_beams": 1,
280
+ "do_sample": false,
281
+ "until": [
282
+ "\n\n"
283
+ ]
284
+ },
285
+ "repeats": 1,
286
+ "should_decontaminate": false,
287
+ "metadata": [
288
+ {
289
+ "version": 0.0
290
+ }
291
+ ],
292
+ "model_specific_prompt_kwargs": {
293
+ "default": {
294
+ "pre_prompt": "",
295
+ "post_prompt": "\nAnswer the question using a single word or phrase."
296
+ },
297
+ "gpt4v": {
298
+ "pre_prompt": "",
299
+ "post_prompt": "\nAnswer the question with Yes or No."
300
+ },
301
+ "qwen_vl": {
302
+ "pre_prompt": "",
303
+ "post_prompt": " Answer:"
304
+ },
305
+ "otterhd": {
306
+ "pre_prompt": "",
307
+ "post_prompt": " Answer:"
308
+ },
309
+ "xcomposer2_4khd": {
310
+ "pre_prompt": "[UNUSED_TOKEN_146]user\n",
311
+ "post_prompt": " Answer this question briefly[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n"
312
+ }
313
+ }
314
+ },
315
+ "mmerealworld_lite": {
316
+ "task": "mmerealworld_lite",
317
+ "dataset_path": "yifanzhang114/MME-RealWorld-lite-lmms-eval",
318
+ "dataset_kwargs": {
319
+ "token": true
320
+ },
321
+ "test_split": "train",
322
+ "doc_to_visual": "<function mme_realworld_doc_to_visual at 0x7f4ce9392e50>",
323
+ "doc_to_text": "<function mme_realworld_doc_to_text at 0x7f4ce9396940>",
324
+ "doc_to_target": "answer",
325
+ "process_results": "<function mme_realworld_process_results at 0x7f4ce939b5e0>",
326
+ "description": "",
327
+ "target_delimiter": " ",
328
+ "fewshot_delimiter": "\n\n",
329
+ "metric_list": [
330
+ {
331
+ "metric": "mme_realworld_score",
332
+ "aggregation": "<function mme_realworld_aggregate_results at 0x7f4ce93a1160>",
333
+ "higher_is_better": true
334
+ }
335
+ ],
336
+ "output_type": "generate_until",
337
+ "generation_kwargs": {
338
+ "max_new_tokens": 16,
339
+ "temperature": 0.0,
340
+ "top_p": 1.0,
341
+ "num_beams": 1,
342
+ "do_sample": false,
343
+ "until": [
344
+ "\n\n"
345
+ ]
346
+ },
347
+ "repeats": 1,
348
+ "should_decontaminate": false,
349
+ "metadata": [
350
+ {
351
+ "version": 0.0
352
+ }
353
+ ],
354
+ "model_specific_prompt_kwargs": {
355
+ "default": {
356
+ "pre_prompt": "",
357
+ "post_prompt": "\nSelect the best answer to the above multiple-choice question based on the image. Respond with only the letter (A, B, C, D, or E) of the correct option."
358
+ },
359
+ "gpt4v": {
360
+ "pre_prompt": "",
361
+ "post_prompt": "\nSelect the best answer to the above multiple-choice question based on the image. Respond with only the letter (A, B, C, D, or E) of the correct option."
362
+ },
363
+ "xcomposer2_4khd": {
364
+ "pre_prompt": "[UNUSED_TOKEN_146]user\n",
365
+ "post_prompt": " Answer this question with A, B, C, or D.[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n"
366
+ }
367
+ }
368
+ },
369
+ "mmmu_val": {
370
+ "task": "mmmu_val",
371
+ "dataset_path": "lmms-lab/MMMU",
372
+ "test_split": "validation",
373
+ "doc_to_visual": "<function mmmu_doc_to_visual at 0x7f4cfc1328b0>",
374
+ "doc_to_text": "<function mmmu_doc_to_text at 0x7f4cfc0b85e0>",
375
+ "doc_to_target": "answer",
376
+ "process_results": "<function mmmu_process_results at 0x7f4cfbfe24c0>",
377
+ "description": "",
378
+ "target_delimiter": " ",
379
+ "fewshot_delimiter": "\n\n",
380
+ "metric_list": [
381
+ {
382
+ "metric": "mmmu_acc",
383
+ "aggregation": "<function mmmu_aggregate_results at 0x7f4cfbf04430>",
384
+ "higher_is_better": true
385
+ }
386
+ ],
387
+ "output_type": "generate_until",
388
+ "generation_kwargs": {
389
+ "max_new_tokens": 128,
390
+ "until": [
391
+ "\n\n"
392
+ ],
393
+ "image_aspect_ratio": "original"
394
+ },
395
+ "repeats": 1,
396
+ "should_decontaminate": false,
397
+ "metadata": [
398
+ {
399
+ "version": 0.0
400
+ }
401
+ ],
402
+ "model_specific_generation_kwargs": {
403
+ "llava": {
404
+ "image_aspect_ratio": "original"
405
+ }
406
+ }
407
+ },
408
+ "mmstar": {
409
+ "task": "mmstar",
410
+ "dataset_path": "Lin-Chen/MMStar",
411
+ "dataset_kwargs": {
412
+ "token": true
413
+ },
414
+ "test_split": "val",
415
+ "doc_to_visual": "<function mmstar_doc_to_visual at 0x7f4ce8b47310>",
416
+ "doc_to_text": "<function mmstar_doc_to_text at 0x7f4ce8b47790>",
417
+ "doc_to_target": "answer",
418
+ "process_results": "<function mmstar_process_results at 0x7f4ce8b47ca0>",
419
+ "description": "",
420
+ "target_delimiter": " ",
421
+ "fewshot_delimiter": "\n\n",
422
+ "metric_list": [
423
+ {
424
+ "metric": "coarse perception",
425
+ "aggregation": "<function mmstar_aggregate_results at 0x7f4ce8b50160>",
426
+ "higher_is_better": true
427
+ },
428
+ {
429
+ "metric": "fine-grained perception",
430
+ "aggregation": "<function mmstar_aggregate_results at 0x7f4ce8b50550>",
431
+ "higher_is_better": true
432
+ },
433
+ {
434
+ "metric": "instance reasoning",
435
+ "aggregation": "<function mmstar_aggregate_results at 0x7f4ce8b50940>",
436
+ "higher_is_better": true
437
+ },
438
+ {
439
+ "metric": "logical reasoning",
440
+ "aggregation": "<function mmstar_aggregate_results at 0x7f4ce8b50d30>",
441
+ "higher_is_better": true
442
+ },
443
+ {
444
+ "metric": "science & technology",
445
+ "aggregation": "<function mmstar_aggregate_results at 0x7f4ce8b58160>",
446
+ "higher_is_better": true
447
+ },
448
+ {
449
+ "metric": "math",
450
+ "aggregation": "<function mmstar_aggregate_results at 0x7f4ce8b58550>",
451
+ "higher_is_better": true
452
+ }
453
+ ],
454
+ "output_type": "generate_until",
455
+ "generation_kwargs": {
456
+ "until": [
457
+ "\n\n"
458
+ ],
459
+ "do_sample": false
460
+ },
461
+ "repeats": 1,
462
+ "should_decontaminate": false,
463
+ "metadata": [
464
+ {
465
+ "version": 0.0
466
+ }
467
+ ],
468
+ "model_specific_prompt_kwargs": {
469
+ "default": {
470
+ "pre_prompt": "",
471
+ "post_prompt": "\nAnswer with the option's letter from the given choices directly"
472
+ }
473
+ }
474
+ },
475
+ "scienceqa_img": {
476
+ "task": "scienceqa_img",
477
+ "dataset_path": "lmms-lab/ScienceQA",
478
+ "dataset_name": "ScienceQA-IMG",
479
+ "dataset_kwargs": {
480
+ "token": true
481
+ },
482
+ "test_split": "test",
483
+ "doc_to_visual": "<function sqa_doc_to_visual at 0x7f4d13f14d30>",
484
+ "doc_to_text": "<function sqa_doc_to_text at 0x7f4d13f14f70>",
485
+ "doc_to_target": "<function sqa_doc_to_target at 0x7f4d13ea53a0>",
486
+ "process_results": "<function sqa_process_results at 0x7f4d13ea5700>",
487
+ "description": "",
488
+ "target_delimiter": " ",
489
+ "fewshot_delimiter": "\n\n",
490
+ "metric_list": [
491
+ {
492
+ "metric": "exact_match",
493
+ "aggregation": "mean",
494
+ "higher_is_better": true,
495
+ "ignore_case": true,
496
+ "ignore_punctuation": true
497
+ }
498
+ ],
499
+ "output_type": "generate_until",
500
+ "generation_kwargs": {
501
+ "max_new_tokens": 16,
502
+ "temperature": 0.0,
503
+ "do_sample": false,
504
+ "until": [
505
+ "\n\n"
506
+ ],
507
+ "image_aspect_ratio": "original"
508
+ },
509
+ "repeats": 1,
510
+ "should_decontaminate": false,
511
+ "metadata": [
512
+ {
513
+ "version": 0.0
514
+ }
515
+ ],
516
+ "model_specific_prompt_kwargs": {
517
+ "default": {
518
+ "format": "default",
519
+ "pre_prompt": "",
520
+ "post_prompt": "\nAnswer with the option's letter from the given choices directly."
521
+ },
522
+ "qwen_vl": {
523
+ "format": "qwen_vl"
524
+ },
525
+ "idefics2": {
526
+ "format": "default",
527
+ "pre_prompt": "",
528
+ "post_prompt": "\nAnswer:"
529
+ }
530
+ },
531
+ "model_specific_generation_kwargs": {
532
+ "llava": {
533
+ "image_aspect_ratio": "original"
534
+ }
535
+ }
536
+ },
537
+ "seedbench_2_plus": {
538
+ "task": "seedbench_2_plus",
539
+ "dataset_path": "doolayer/SEED-Bench-2-Plus",
540
+ "dataset_kwargs": {
541
+ "token": true
542
+ },
543
+ "test_split": "test",
544
+ "doc_to_visual": "<function seed_doc_to_visual at 0x7f4d11559e50>",
545
+ "doc_to_text": "<function seed_doc_to_text at 0x7f4d1157d4c0>",
546
+ "doc_to_target": "answer",
547
+ "process_results": "<function seed_process_result at 0x7f4d1157d9d0>",
548
+ "description": "",
549
+ "target_delimiter": " ",
550
+ "fewshot_delimiter": "\n\n",
551
+ "metric_list": [
552
+ {
553
+ "metric": "seedbench_2_plus_Chart",
554
+ "aggregation": "<function seed_aggregation_result at 0x7f4d1157dee0>",
555
+ "higher_is_better": true
556
+ },
557
+ {
558
+ "metric": "seedbench_2_plus_Map",
559
+ "aggregation": "<function seed_aggregation_result at 0x7f4d115293a0>",
560
+ "higher_is_better": true
561
+ },
562
+ {
563
+ "metric": "seedbench_2_plus_Web",
564
+ "aggregation": "<function seed_aggregation_result at 0x7f4d11529820>",
565
+ "higher_is_better": true
566
+ },
567
+ {
568
+ "metric": "seedbench_2_plus_all",
569
+ "aggregation": "<function seed_aggregation_result at 0x7f4d11529ca0>",
570
+ "higher_is_better": true
571
+ }
572
+ ],
573
+ "output_type": "generate_until",
574
+ "generation_kwargs": {
575
+ "until": [
576
+ "ASSISTANT:"
577
+ ],
578
+ "max_new_tokens": 16,
579
+ "image_aspect_ratio": "original"
580
+ },
581
+ "repeats": 1,
582
+ "should_decontaminate": false,
583
+ "metadata": [
584
+ {
585
+ "version": 0.0
586
+ }
587
+ ],
588
+ "model_specific_prompt_kwargs": {
589
+ "llava": {
590
+ "img_token": "<image>",
591
+ "post_prompt": "Answer with the option's letter from the given choices directly."
592
+ },
593
+ "gpt4V": {
594
+ "img_token": "<image>",
595
+ "post_prompt": "Answer with the option's letter from the given choices directly."
596
+ },
597
+ "default": {
598
+ "img_token": "<image>",
599
+ "post_prompt": "Answer with the option's letter from the given choices directly."
600
+ }
601
+ }
602
+ },
603
+ "textvqa_val": {
604
+ "task": "textvqa_val",
605
+ "dataset_path": "lmms-lab/textvqa",
606
+ "test_split": "validation",
607
+ "doc_to_visual": "<function textvqa_doc_to_visual at 0x7f4d15055940>",
608
+ "doc_to_text": "<function textvqa_doc_to_text at 0x7f4d15055dc0>",
609
+ "doc_to_target": "answer",
610
+ "process_results": "<function textvqa_process_results at 0x7f4d150790d0>",
611
+ "description": "",
612
+ "target_delimiter": " ",
613
+ "fewshot_delimiter": "\n\n",
614
+ "metric_list": [
615
+ {
616
+ "metric": "exact_match",
617
+ "aggregation": "mean",
618
+ "higher_is_better": true,
619
+ "ignore_case": true,
620
+ "ignore_punctuation": true
621
+ },
622
+ {
623
+ "metric": "submission",
624
+ "aggregation": "<function textvqa_aggregate_submissions at 0x7f4d15055700>",
625
+ "higher_is_better": true
626
+ }
627
+ ],
628
+ "output_type": "generate_until",
629
+ "generation_kwargs": {
630
+ "until": [
631
+ "ASSISTANT:"
632
+ ]
633
+ },
634
+ "repeats": 1,
635
+ "should_decontaminate": false,
636
+ "model_specific_prompt_kwargs": {
637
+ "default": {
638
+ "pre_prompt": "",
639
+ "post_prompt": "\nAnswer the question using a single word or phrase.",
640
+ "ocr": false
641
+ },
642
+ "qwen_vl": {
643
+ "pre_prompt": "",
644
+ "post_prompt": " Answer:"
645
+ }
646
+ }
647
+ }
648
+ },
649
+ "versions": {
650
+ "ai2d": "Yaml",
651
+ "infovqa_val": "Yaml",
652
+ "mmbench_en_dev": "Yaml",
653
+ "mme": "Yaml",
654
+ "mmerealworld_lite": "Yaml",
655
+ "mmmu_val": "Yaml",
656
+ "mmstar": "Yaml",
657
+ "scienceqa_img": "Yaml",
658
+ "seedbench_2_plus": "Yaml",
659
+ "textvqa_val": "Yaml"
660
+ },
661
+ "n-shot": {
662
+ "ai2d": 0,
663
+ "infovqa_val": 0,
664
+ "mmbench_en_dev": 0,
665
+ "mme": 0,
666
+ "mmerealworld_lite": 0,
667
+ "mmmu_val": 0,
668
+ "mmstar": 0,
669
+ "scienceqa_img": 0,
670
+ "seedbench_2_plus": 0,
671
+ "textvqa_val": 0
672
+ },
673
+ "model_configs": {
674
+ "model": "llava",
675
+ "model_args": "pretrained=/cm/archive/namnv78/checkpoints/Xphi35-siglip224/SMOE/1M3/Full_smoe_share/checkpoint-13786,conv_template=phi35",
676
+ "batch_size": "1",
677
+ "device": null,
678
+ "limit": null,
679
+ "bootstrap_iters": 100000,
680
+ "gen_kwargs": ""
681
+ },
682
+ "git_hash": "289c7fe5"
683
+ }
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/scienceqa_img.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/seedbench_2_plus.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/submissions/mmbench_en_dev_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"overall_acc": 0.6829896907216495, "category_acc": {"action_recognition": 0.8888888888888888, "attribute_comparison": 0.6590909090909091, "attribute_recognition": 0.8648648648648649, "celebrity_recognition": 0.7575757575757576, "function_reasoning": 0.8734177215189873, "future_prediction": 0.475, "identity_reasoning": 0.9555555555555556, "image_emotion": 0.76, "image_quality": 0.37735849056603776, "image_scene": 0.9519230769230769, "image_style": 0.9056603773584906, "image_topic": 0.8333333333333334, "nature_relation": 0.5833333333333334, "object_localization": 0.35802469135802467, "ocr": 0.6923076923076923, "physical_property_reasoning": 0.44, "physical_relation": 0.5833333333333334, "social_relation": 0.813953488372093, "spatial_relationship": 0.2, "structuralized_imagetext_understanding": 0.48717948717948717}, "l2_category_acc": {"attribute_reasoning": 0.7286432160804021, "coarse_perception": 0.793918918918919, "finegrained_perception (cross-instance)": 0.6013986013986014, "finegrained_perception (instance-level)": 0.6655290102389079, "logic_reasoning": 0.4830508474576271, "relation_reasoning": 0.6695652173913044}}
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/submissions/mmbench_en_dev_results.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85a036f15518bb0e2e9dc7d42305d7910a51559a9b8fee67570ea67dd3117aec
3
+ size 843234
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/submissions/textvqa_submission_2025-06-28-13-59-24.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1431_llava...l_mme_llava_model_args_d81bcd/textvqa_val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a972d3a868c9d0fec3942e19e850909364165be767bd0ebb6b603dad4f619ea
3
+ size 13182227
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/ocrbench.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/rank2_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 2 eval done
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/results.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "ocrbench": {
4
+ "ocrbench_accuracy,none": 0.375,
5
+ "ocrbench_accuracy_stderr,none": "N/A",
6
+ "alias": "ocrbench"
7
+ }
8
+ },
9
+ "configs": {
10
+ "ocrbench": {
11
+ "task": "ocrbench",
12
+ "dataset_path": "echo840/OCRBench",
13
+ "dataset_kwargs": {
14
+ "token": true
15
+ },
16
+ "test_split": "test",
17
+ "doc_to_visual": "<function ocrbench_doc_to_visual at 0x7f06efca6dc0>",
18
+ "doc_to_text": "<function ocrbench_doc_to_text at 0x7f06ef54d3a0>",
19
+ "doc_to_target": "answer",
20
+ "process_results": "<function ocrbench_process_results at 0x7f06ef54d700>",
21
+ "description": "",
22
+ "target_delimiter": " ",
23
+ "fewshot_delimiter": "\n\n",
24
+ "metric_list": [
25
+ {
26
+ "metric": "ocrbench_accuracy",
27
+ "aggregation": "<function ocrbench_aggregate_accuracy at 0x7f06ef54da60>",
28
+ "higher_is_better": true
29
+ }
30
+ ],
31
+ "output_type": "generate_until",
32
+ "generation_kwargs": {
33
+ "max_new_tokens": 128,
34
+ "temperature": 0.0,
35
+ "top_p": 1.0,
36
+ "num_beams": 1,
37
+ "do_sample": false,
38
+ "until": [
39
+ "\n\n"
40
+ ]
41
+ },
42
+ "repeats": 1,
43
+ "should_decontaminate": false,
44
+ "metadata": [
45
+ {
46
+ "version": 0.0
47
+ }
48
+ ]
49
+ }
50
+ },
51
+ "versions": {
52
+ "ocrbench": "Yaml"
53
+ },
54
+ "n-shot": {
55
+ "ocrbench": 0
56
+ },
57
+ "model_configs": {
58
+ "model": "llava",
59
+ "model_args": "pretrained=/cm/archive/namnv78/checkpoints/Xphi35-siglip224/SMOE/1M3/Full_smoe_share/checkpoint-13786,conv_template=phi35",
60
+ "batch_size": "1",
61
+ "device": null,
62
+ "limit": null,
63
+ "bootstrap_iters": 100000,
64
+ "gen_kwargs": ""
65
+ },
66
+ "git_hash": "289c7fe5"
67
+ }
sft/1M3/Full_smoe_share/checkpoint-13786/logs/0628_1500_llava...bench_llava_model_args_d81bcd/results/ocrbench_results.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################### OCRBench #############################
2
+ Text Recognition(Total 300): 211
3
+ ---------------- Details of Recognition Score ------------------
4
+ Regular Text Recognition(Total 50): 44
5
+ Irregular Text Recognition(Total 50): 42
6
+ Artistic Text Recognition(Total 50): 45
7
+ Handwriting Recognition(Total 50): 18
8
+ Digit String Recognition(Total 50): 36
9
+ Non-Semantic Text Recognition(Total 50): 26
10
+ ----------------------------------------------------------------
11
+ Scene Text-centric VQA(Total 200): 103
12
+ ----------------------------------------------------------------
13
+ Doc-oriented VQA(Total 200): 20
14
+ ----------------------------------------------------------------
15
+ Key Information Extraction(Total 200): 16
16
+ Handwritten Mathematical Expression Recognition(Total 100): 25
17
+ --------------------- Final Score ------------------------------
18
+ Final Score(Total 1000): 375