DavidNguyen commited on
Commit
4bc1216
·
verified ·
1 Parent(s): 9e24805

de1b5e67dfafa8cfd2dcccac80916769b96c2546b53f3f867389ec98ef93d6ef

Browse files
Files changed (26) hide show
  1. .gitattributes +3 -0
  2. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmerealworld_lite.json +3 -0
  3. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmmu_pro_standard.json +0 -0
  4. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmmu_val.json +0 -0
  5. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmstar.json +0 -0
  6. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/rank0_metric_eval_done.txt +1 -0
  7. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/rank1_metric_eval_done.txt +1 -0
  8. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/rank2_metric_eval_done.txt +1 -0
  9. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/rank3_metric_eval_done.txt +1 -0
  10. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/results.json +729 -0
  11. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/scienceqa_img.json +0 -0
  12. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/seedbench_2_plus.json +0 -0
  13. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/submissions/mmbench_en_dev_results.json +1 -0
  14. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/submissions/mmbench_en_dev_results.xlsx +3 -0
  15. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/submissions/textvqa_submission_2025-07-17-19-29-53.json +0 -0
  16. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/textvqa_val.json +3 -0
  17. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2030_llava...bench_llava_model_args_82420a/rank0_metric_eval_done.txt +1 -0
  18. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2030_llava...bench_llava_model_args_82420a/results/ocrbench_results.txt +18 -0
  19. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2031_llava...bench_llava_model_args_82420a/rank1_metric_eval_done.txt +1 -0
  20. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2031_llava...bench_llava_model_args_82420a/rank2_metric_eval_done.txt +1 -0
  21. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2031_llava...bench_llava_model_args_82420a/rank3_metric_eval_done.txt +1 -0
  22. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/rank0_metric_eval_done.txt +1 -0
  23. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/rank1_metric_eval_done.txt +1 -0
  24. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/rank2_metric_eval_done.txt +1 -0
  25. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/rank3_metric_eval_done.txt +1 -0
  26. sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/results/ocrbench_results.txt +18 -0
.gitattributes CHANGED
@@ -356,3 +356,6 @@ sft/665K36/Full_smoe_sharev3/checkpoint-20791/trainer_state.json filter=lfs diff
356
  sft/665K36/revise_Full_smoe_sharev3/checkpoint-20791/trainer_state.json filter=lfs diff=lfs merge=lfs -text
357
  sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/infovqa_val.json filter=lfs diff=lfs merge=lfs -text
358
  sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmbench_en_dev.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
356
  sft/665K36/revise_Full_smoe_sharev3/checkpoint-20791/trainer_state.json filter=lfs diff=lfs merge=lfs -text
357
  sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/infovqa_val.json filter=lfs diff=lfs merge=lfs -text
358
  sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmbench_en_dev.json filter=lfs diff=lfs merge=lfs -text
359
+ sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmerealworld_lite.json filter=lfs diff=lfs merge=lfs -text
360
+ sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/submissions/mmbench_en_dev_results.xlsx filter=lfs diff=lfs merge=lfs -text
361
+ sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/textvqa_val.json filter=lfs diff=lfs merge=lfs -text
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmerealworld_lite.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02ff02798d3f02973ef781787110743f077761182c5110bc44c9cb38ec63698c
3
+ size 1994104355
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmmu_pro_standard.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmmu_val.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/mmstar.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/rank2_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 2 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/rank3_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 3 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/results.json ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "ai2d": {
4
+ "exact_match,flexible-extract": 0.6544689119170984,
5
+ "exact_match_stderr,flexible-extract": 0.008558935628342142,
6
+ "alias": "ai2d"
7
+ },
8
+ "infovqa_val": {
9
+ "anls,none": 0.26847554444841126,
10
+ "anls_stderr,none": 0.008375057067642373,
11
+ "alias": "infovqa_val"
12
+ },
13
+ "mmbench_en_dev": {
14
+ "gpt_eval_score,none": 71.21993127147766,
15
+ "gpt_eval_score_stderr,none": "N/A",
16
+ "submission,none": null,
17
+ "submission_stderr,none": "N/A",
18
+ "alias": "mmbench_en_dev"
19
+ },
20
+ "mme": {
21
+ "mme_cognition_score,none": 321.7857142857143,
22
+ "mme_cognition_score_stderr,none": "N/A",
23
+ "mme_percetion_score,none": 1418.2278911564626,
24
+ "mme_percetion_score_stderr,none": "N/A",
25
+ "alias": "mme"
26
+ },
27
+ "mmerealworld_lite": {
28
+ "mme_realworld_score,none": 0.30484627410109433,
29
+ "mme_realworld_score_stderr,none": "N/A",
30
+ "alias": "mmerealworld_lite"
31
+ },
32
+ "mmmu_pro_standard": {
33
+ "mmmu_acc,none": 0.25896,
34
+ "mmmu_acc_stderr,none": "N/A",
35
+ "alias": "mmmu_pro_standard"
36
+ },
37
+ "mmmu_val": {
38
+ "mmmu_acc,none": 0.41222,
39
+ "mmmu_acc_stderr,none": "N/A",
40
+ "alias": "mmmu_val"
41
+ },
42
+ "mmstar": {
43
+ "coarse perception,none": 0.6918706627011363,
44
+ "coarse perception_stderr,none": "N/A",
45
+ "fine-grained perception,none": 0.3625644804716286,
46
+ "fine-grained perception_stderr,none": "N/A",
47
+ "instance reasoning,none": 0.5205089434882838,
48
+ "instance reasoning_stderr,none": "N/A",
49
+ "logical reasoning,none": 0.3660535284297661,
50
+ "logical reasoning_stderr,none": "N/A",
51
+ "math,none": 0.28080727078321305,
52
+ "math_stderr,none": "N/A",
53
+ "science & technology,none": 0.19842818316868963,
54
+ "science & technology_stderr,none": "N/A",
55
+ "alias": "mmstar"
56
+ },
57
+ "scienceqa_img": {
58
+ "exact_match,none": 0.7416955875061974,
59
+ "exact_match_stderr,none": 0.009748403485997436,
60
+ "alias": "scienceqa_img"
61
+ },
62
+ "seedbench_2_plus": {
63
+ "seedbench_2_plus_Chart,none": 0.5061728395061729,
64
+ "seedbench_2_plus_Chart_stderr,none": "N/A",
65
+ "seedbench_2_plus_all,none": 0.48967940272288096,
66
+ "seedbench_2_plus_all_stderr,none": "N/A",
67
+ "seedbench_2_plus_Web,none": 0.5075757575757576,
68
+ "seedbench_2_plus_Web_stderr,none": "N/A",
69
+ "seedbench_2_plus_Map,none": 0.45848822800495664,
70
+ "seedbench_2_plus_Map_stderr,none": "N/A",
71
+ "alias": "seedbench_2_plus"
72
+ },
73
+ "textvqa_val": {
74
+ "exact_match,none": 0.4134800003051758,
75
+ "exact_match_stderr,none": 0.006747678419171048,
76
+ "submission,none": null,
77
+ "submission_stderr,none": "N/A",
78
+ "alias": "textvqa_val"
79
+ }
80
+ },
81
+ "configs": {
82
+ "ai2d": {
83
+ "task": "ai2d",
84
+ "dataset_path": "lmms-lab/ai2d",
85
+ "dataset_kwargs": {
86
+ "token": true
87
+ },
88
+ "test_split": "test",
89
+ "doc_to_visual": "<function ai2d_doc_to_visual at 0x7f7862c37700>",
90
+ "doc_to_text": "<function ai2d_doc_to_text at 0x7f7862c379d0>",
91
+ "doc_to_target": "<function ai2d_doc_to_target at 0x7f7862c37f70>",
92
+ "description": "",
93
+ "target_delimiter": " ",
94
+ "fewshot_delimiter": "\n\n",
95
+ "metric_list": [
96
+ {
97
+ "metric": "exact_match",
98
+ "aggregation": "mean",
99
+ "higher_is_better": true,
100
+ "ignore_case": true,
101
+ "ignore_punctuation": true
102
+ }
103
+ ],
104
+ "output_type": "generate_until",
105
+ "generation_kwargs": {
106
+ "max_new_tokens": 512,
107
+ "temperature": 0.0,
108
+ "do_sample": false,
109
+ "until": [
110
+ "\n\n"
111
+ ]
112
+ },
113
+ "repeats": 1,
114
+ "filter_list": [
115
+ {
116
+ "name": "flexible-extract",
117
+ "filter": [
118
+ {
119
+ "function": "<class 'utils.MultiChoiceRegexFilter'>",
120
+ "group_select": 0,
121
+ "ignore_case": true,
122
+ "ignore_punctuation": true,
123
+ "regex_pattern": "([A-Z])\\."
124
+ }
125
+ ]
126
+ }
127
+ ],
128
+ "should_decontaminate": false,
129
+ "metadata": [
130
+ {
131
+ "version": 0.0
132
+ }
133
+ ],
134
+ "model_specific_prompt_kwargs": {
135
+ "default": {
136
+ "prompt_format": "mcq",
137
+ "pre_prompt": "",
138
+ "post_prompt": "\nAnswer with the option's letter from the given choices directly."
139
+ },
140
+ "gpt4v": {
141
+ "prompt_format": "mcq",
142
+ "pre_prompt": "",
143
+ "post_prompt": "\nAbove choices are given in {option}. {content} format.\nPlease answer with the option letter from the given choices directly."
144
+ },
145
+ "qwen_vl": {
146
+ "prompt_format": "qa",
147
+ "pre_prompt": "",
148
+ "post_prompt": " Answer:"
149
+ },
150
+ "xcomposer2_4khd": {
151
+ "prompt_format": "mcq_xcomposer",
152
+ "pre_prompt": "[UNUSED_TOKEN_146]user\nQuestion: ",
153
+ "post_prompt": "[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\nThe answer is"
154
+ }
155
+ },
156
+ "model_specific_target_kwargs": {
157
+ "default": "mcq",
158
+ "qwen_vl": "qa"
159
+ }
160
+ },
161
+ "infovqa_val": {
162
+ "task": "infovqa_val",
163
+ "dataset_path": "lmms-lab/DocVQA",
164
+ "dataset_name": "InfographicVQA",
165
+ "dataset_kwargs": {
166
+ "token": true
167
+ },
168
+ "test_split": "validation",
169
+ "doc_to_visual": "<function infovqa_doc_to_visual at 0x7f78950b2790>",
170
+ "doc_to_text": "<function infovqa_doc_to_text at 0x7f78950b2a60>",
171
+ "doc_to_target": "answers",
172
+ "description": "",
173
+ "target_delimiter": " ",
174
+ "fewshot_delimiter": "\n\n",
175
+ "metric_list": [
176
+ {
177
+ "metric": "anls",
178
+ "aggregation": "mean",
179
+ "higher_is_better": true
180
+ }
181
+ ],
182
+ "output_type": "generate_until",
183
+ "generation_kwargs": {
184
+ "max_new_tokens": 32,
185
+ "temperature": 0.0,
186
+ "do_sample": false,
187
+ "until": [
188
+ "\n\n"
189
+ ]
190
+ },
191
+ "repeats": 1,
192
+ "should_decontaminate": false,
193
+ "model_specific_prompt_kwargs": {
194
+ "default": {
195
+ "pre_prompt": "",
196
+ "post_prompt": "\nAnswer the question using a single word or phrase."
197
+ }
198
+ }
199
+ },
200
+ "mmbench_en_dev": {
201
+ "task": "mmbench_en_dev",
202
+ "dataset_path": "lmms-lab/MMBench",
203
+ "dataset_name": "en",
204
+ "dataset_kwargs": {
205
+ "token": true
206
+ },
207
+ "test_split": "dev",
208
+ "doc_to_visual": "<function mmbench_doc_to_visual at 0x7f78c1ed0430>",
209
+ "doc_to_text": "<function mmbench_doc_to_text at 0x7f78c1ed0940>",
210
+ "doc_to_target": "answer",
211
+ "process_results": "<function mmbench_process_results at 0x7f78c1ed0e50>",
212
+ "description": "",
213
+ "target_delimiter": " ",
214
+ "fewshot_delimiter": "\n\n",
215
+ "metric_list": [
216
+ {
217
+ "metric": "gpt_eval_score",
218
+ "aggregation": "<function mmbench_aggregate_dev_results_eval at 0x7f78c1ec5ca0>",
219
+ "higher_is_better": true
220
+ },
221
+ {
222
+ "metric": "submission",
223
+ "aggregation": "<function mmbench_aggregate_dev_results_submission at 0x7f78c1ed01f0>",
224
+ "higher_is_better": true
225
+ }
226
+ ],
227
+ "output_type": "generate_until",
228
+ "generation_kwargs": {
229
+ "until": [
230
+ "ASSISTANT:"
231
+ ],
232
+ "max_new_tokens": 1024,
233
+ "temperature": 0.0,
234
+ "top_p": 1.0,
235
+ "num_beams": 1,
236
+ "do_sample": false,
237
+ "image_aspect_ratio": "original"
238
+ },
239
+ "repeats": 1,
240
+ "should_decontaminate": false,
241
+ "model_specific_prompt_kwargs": {
242
+ "default": {
243
+ "pre_prompt": "",
244
+ "post_prompt": "\nAnswer with the option's letter from the given choices directly."
245
+ }
246
+ },
247
+ "model_specific_generation_kwargs": {
248
+ "llava": {
249
+ "image_aspect_ratio": "original"
250
+ }
251
+ }
252
+ },
253
+ "mme": {
254
+ "task": "mme",
255
+ "dataset_path": "lmms-lab/MME",
256
+ "dataset_kwargs": {
257
+ "token": false
258
+ },
259
+ "test_split": "test",
260
+ "doc_to_visual": "<function mme_doc_to_visual at 0x7f78c39a88b0>",
261
+ "doc_to_text": "<function mme_doc_to_text at 0x7f78c1f4f160>",
262
+ "doc_to_target": "answer",
263
+ "process_results": "<function mme_process_results at 0x7f78c1f4f700>",
264
+ "description": "",
265
+ "target_delimiter": " ",
266
+ "fewshot_delimiter": "\n\n",
267
+ "metric_list": [
268
+ {
269
+ "metric": "mme_percetion_score",
270
+ "aggregation": "<function mme_aggregate_results at 0x7f78c1f4fc10>",
271
+ "higher_is_better": true
272
+ },
273
+ {
274
+ "metric": "mme_cognition_score",
275
+ "aggregation": "<function mme_aggregate_results at 0x7f78c1f560d0>",
276
+ "higher_is_better": true
277
+ }
278
+ ],
279
+ "output_type": "generate_until",
280
+ "generation_kwargs": {
281
+ "max_new_tokens": 16,
282
+ "temperature": 0.0,
283
+ "top_p": 1.0,
284
+ "num_beams": 1,
285
+ "do_sample": false,
286
+ "until": [
287
+ "\n\n"
288
+ ]
289
+ },
290
+ "repeats": 1,
291
+ "should_decontaminate": false,
292
+ "metadata": [
293
+ {
294
+ "version": 0.0
295
+ }
296
+ ],
297
+ "model_specific_prompt_kwargs": {
298
+ "default": {
299
+ "pre_prompt": "",
300
+ "post_prompt": "\nAnswer the question using a single word or phrase."
301
+ },
302
+ "gpt4v": {
303
+ "pre_prompt": "",
304
+ "post_prompt": "\nAnswer the question with Yes or No."
305
+ },
306
+ "qwen_vl": {
307
+ "pre_prompt": "",
308
+ "post_prompt": " Answer:"
309
+ },
310
+ "otterhd": {
311
+ "pre_prompt": "",
312
+ "post_prompt": " Answer:"
313
+ },
314
+ "xcomposer2_4khd": {
315
+ "pre_prompt": "[UNUSED_TOKEN_146]user\n",
316
+ "post_prompt": " Answer this question briefly[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n"
317
+ }
318
+ }
319
+ },
320
+ "mmerealworld_lite": {
321
+ "task": "mmerealworld_lite",
322
+ "dataset_path": "yifanzhang114/MME-RealWorld-lite-lmms-eval",
323
+ "dataset_kwargs": {
324
+ "token": true
325
+ },
326
+ "test_split": "train",
327
+ "doc_to_visual": "<function mme_realworld_doc_to_visual at 0x7f78633e08b0>",
328
+ "doc_to_text": "<function mme_realworld_doc_to_text at 0x7f78633e53a0>",
329
+ "doc_to_target": "answer",
330
+ "process_results": "<function mme_realworld_process_results at 0x7f78633e9040>",
331
+ "description": "",
332
+ "target_delimiter": " ",
333
+ "fewshot_delimiter": "\n\n",
334
+ "metric_list": [
335
+ {
336
+ "metric": "mme_realworld_score",
337
+ "aggregation": "<function mme_realworld_aggregate_results at 0x7f78633e9b80>",
338
+ "higher_is_better": true
339
+ }
340
+ ],
341
+ "output_type": "generate_until",
342
+ "generation_kwargs": {
343
+ "max_new_tokens": 16,
344
+ "temperature": 0.0,
345
+ "top_p": 1.0,
346
+ "num_beams": 1,
347
+ "do_sample": false,
348
+ "until": [
349
+ "\n\n"
350
+ ]
351
+ },
352
+ "repeats": 1,
353
+ "should_decontaminate": false,
354
+ "metadata": [
355
+ {
356
+ "version": 0.0
357
+ }
358
+ ],
359
+ "model_specific_prompt_kwargs": {
360
+ "default": {
361
+ "pre_prompt": "",
362
+ "post_prompt": "\nSelect the best answer to the above multiple-choice question based on the image. Respond with only the letter (A, B, C, D, or E) of the correct option."
363
+ },
364
+ "gpt4v": {
365
+ "pre_prompt": "",
366
+ "post_prompt": "\nSelect the best answer to the above multiple-choice question based on the image. Respond with only the letter (A, B, C, D, or E) of the correct option."
367
+ },
368
+ "xcomposer2_4khd": {
369
+ "pre_prompt": "[UNUSED_TOKEN_146]user\n",
370
+ "post_prompt": " Answer this question with A, B, C, or D.[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n"
371
+ }
372
+ }
373
+ },
374
+ "mmmu_pro_standard": {
375
+ "task": "mmmu_pro_standard",
376
+ "dataset_path": "MMMU/MMMU_Pro",
377
+ "dataset_name": "standard (10 options)",
378
+ "test_split": "test",
379
+ "doc_to_visual": "<function mmmu_pro_doc_to_visual at 0x7f78c1e9d790>",
380
+ "doc_to_text": "<function mmmu_pro_doc_to_text at 0x7f78c1ea9550>",
381
+ "doc_to_target": "{{answer}}",
382
+ "process_results": "<function mmmu_pro_process_results at 0x7f78c1eae4c0>",
383
+ "description": "",
384
+ "target_delimiter": " ",
385
+ "fewshot_delimiter": "\n\n",
386
+ "metric_list": [
387
+ {
388
+ "metric": "mmmu_acc",
389
+ "aggregation": "<function mmmu_pro_aggregate_results at 0x7f78c1eb6430>",
390
+ "higher_is_better": true
391
+ }
392
+ ],
393
+ "output_type": "generate_until",
394
+ "generation_kwargs": {
395
+ "max_new_tokens": 256,
396
+ "until": [
397
+ "\n\n"
398
+ ]
399
+ },
400
+ "repeats": 1,
401
+ "should_decontaminate": false,
402
+ "metadata": {
403
+ "version": 0.0,
404
+ "interleaved_format": false
405
+ },
406
+ "model_specific_prompt_kwargs": {
407
+ "default": {
408
+ "pre_prompt": "",
409
+ "post_prompt": "Answer with the option letter from the given choices directly."
410
+ }
411
+ }
412
+ },
413
+ "mmmu_val": {
414
+ "task": "mmmu_val",
415
+ "dataset_path": "lmms-lab/MMMU",
416
+ "test_split": "validation",
417
+ "doc_to_visual": "<function mmmu_doc_to_visual at 0x7f7875483700>",
418
+ "doc_to_text": "<function mmmu_doc_to_text at 0x7f7875367430>",
419
+ "doc_to_target": "answer",
420
+ "process_results": "<function mmmu_process_results at 0x7f787534c310>",
421
+ "description": "",
422
+ "target_delimiter": " ",
423
+ "fewshot_delimiter": "\n\n",
424
+ "metric_list": [
425
+ {
426
+ "metric": "mmmu_acc",
427
+ "aggregation": "<function mmmu_aggregate_results at 0x7f7875267280>",
428
+ "higher_is_better": true
429
+ }
430
+ ],
431
+ "output_type": "generate_until",
432
+ "generation_kwargs": {
433
+ "max_new_tokens": 128,
434
+ "until": [
435
+ "\n\n"
436
+ ],
437
+ "image_aspect_ratio": "original"
438
+ },
439
+ "repeats": 1,
440
+ "should_decontaminate": false,
441
+ "metadata": [
442
+ {
443
+ "version": 0.0
444
+ }
445
+ ],
446
+ "model_specific_generation_kwargs": {
447
+ "llava": {
448
+ "image_aspect_ratio": "original"
449
+ }
450
+ }
451
+ },
452
+ "mmstar": {
453
+ "task": "mmstar",
454
+ "dataset_path": "Lin-Chen/MMStar",
455
+ "dataset_kwargs": {
456
+ "token": true
457
+ },
458
+ "test_split": "val",
459
+ "doc_to_visual": "<function mmstar_doc_to_visual at 0x7f7862b91d30>",
460
+ "doc_to_text": "<function mmstar_doc_to_text at 0x7f7862b991f0>",
461
+ "doc_to_target": "answer",
462
+ "process_results": "<function mmstar_process_results at 0x7f7862b99700>",
463
+ "description": "",
464
+ "target_delimiter": " ",
465
+ "fewshot_delimiter": "\n\n",
466
+ "metric_list": [
467
+ {
468
+ "metric": "coarse perception",
469
+ "aggregation": "<function mmstar_aggregate_results at 0x7f7862b99b80>",
470
+ "higher_is_better": true
471
+ },
472
+ {
473
+ "metric": "fine-grained perception",
474
+ "aggregation": "<function mmstar_aggregate_results at 0x7f7862b99f70>",
475
+ "higher_is_better": true
476
+ },
477
+ {
478
+ "metric": "instance reasoning",
479
+ "aggregation": "<function mmstar_aggregate_results at 0x7f7862b1f3a0>",
480
+ "higher_is_better": true
481
+ },
482
+ {
483
+ "metric": "logical reasoning",
484
+ "aggregation": "<function mmstar_aggregate_results at 0x7f7862b1f790>",
485
+ "higher_is_better": true
486
+ },
487
+ {
488
+ "metric": "science & technology",
489
+ "aggregation": "<function mmstar_aggregate_results at 0x7f7862b1fb80>",
490
+ "higher_is_better": true
491
+ },
492
+ {
493
+ "metric": "math",
494
+ "aggregation": "<function mmstar_aggregate_results at 0x7f7862b1ff70>",
495
+ "higher_is_better": true
496
+ }
497
+ ],
498
+ "output_type": "generate_until",
499
+ "generation_kwargs": {
500
+ "until": [
501
+ "\n\n"
502
+ ],
503
+ "do_sample": false
504
+ },
505
+ "repeats": 1,
506
+ "should_decontaminate": false,
507
+ "metadata": [
508
+ {
509
+ "version": 0.0
510
+ }
511
+ ],
512
+ "model_specific_prompt_kwargs": {
513
+ "default": {
514
+ "pre_prompt": "",
515
+ "post_prompt": "\nAnswer with the option's letter from the given choices directly"
516
+ }
517
+ }
518
+ },
519
+ "scienceqa_img": {
520
+ "task": "scienceqa_img",
521
+ "dataset_path": "lmms-lab/ScienceQA",
522
+ "dataset_name": "ScienceQA-IMG",
523
+ "dataset_kwargs": {
524
+ "token": true
525
+ },
526
+ "test_split": "test",
527
+ "doc_to_visual": "<function sqa_doc_to_visual at 0x7f788e521dc0>",
528
+ "doc_to_text": "<function sqa_doc_to_text at 0x7f788e4f2040>",
529
+ "doc_to_target": "<function sqa_doc_to_target at 0x7f788e4f2430>",
530
+ "process_results": "<function sqa_process_results at 0x7f788e4f2790>",
531
+ "description": "",
532
+ "target_delimiter": " ",
533
+ "fewshot_delimiter": "\n\n",
534
+ "metric_list": [
535
+ {
536
+ "metric": "exact_match",
537
+ "aggregation": "mean",
538
+ "higher_is_better": true,
539
+ "ignore_case": true,
540
+ "ignore_punctuation": true
541
+ }
542
+ ],
543
+ "output_type": "generate_until",
544
+ "generation_kwargs": {
545
+ "max_new_tokens": 16,
546
+ "temperature": 0.0,
547
+ "do_sample": false,
548
+ "until": [
549
+ "\n\n"
550
+ ],
551
+ "image_aspect_ratio": "original"
552
+ },
553
+ "repeats": 1,
554
+ "should_decontaminate": false,
555
+ "metadata": [
556
+ {
557
+ "version": 0.0
558
+ }
559
+ ],
560
+ "model_specific_prompt_kwargs": {
561
+ "default": {
562
+ "format": "default",
563
+ "pre_prompt": "",
564
+ "post_prompt": "\nAnswer with the option's letter from the given choices directly."
565
+ },
566
+ "qwen_vl": {
567
+ "format": "qwen_vl"
568
+ },
569
+ "idefics2": {
570
+ "format": "default",
571
+ "pre_prompt": "",
572
+ "post_prompt": "\nAnswer:"
573
+ }
574
+ },
575
+ "model_specific_generation_kwargs": {
576
+ "llava": {
577
+ "image_aspect_ratio": "original"
578
+ }
579
+ }
580
+ },
581
+ "seedbench_2_plus": {
582
+ "task": "seedbench_2_plus",
583
+ "dataset_path": "doolayer/SEED-Bench-2-Plus",
584
+ "dataset_kwargs": {
585
+ "token": true
586
+ },
587
+ "test_split": "test",
588
+ "doc_to_visual": "<function seed_doc_to_visual at 0x7f788a10b310>",
589
+ "doc_to_text": "<function seed_doc_to_text at 0x7f788a10b940>",
590
+ "doc_to_target": "answer",
591
+ "process_results": "<function seed_process_result at 0x7f788a10be50>",
592
+ "description": "",
593
+ "target_delimiter": " ",
594
+ "fewshot_delimiter": "\n\n",
595
+ "metric_list": [
596
+ {
597
+ "metric": "seedbench_2_plus_Chart",
598
+ "aggregation": "<function seed_aggregation_result at 0x7f787821c3a0>",
599
+ "higher_is_better": true
600
+ },
601
+ {
602
+ "metric": "seedbench_2_plus_Map",
603
+ "aggregation": "<function seed_aggregation_result at 0x7f787821c820>",
604
+ "higher_is_better": true
605
+ },
606
+ {
607
+ "metric": "seedbench_2_plus_Web",
608
+ "aggregation": "<function seed_aggregation_result at 0x7f787821cca0>",
609
+ "higher_is_better": true
610
+ },
611
+ {
612
+ "metric": "seedbench_2_plus_all",
613
+ "aggregation": "<function seed_aggregation_result at 0x7f787824b160>",
614
+ "higher_is_better": true
615
+ }
616
+ ],
617
+ "output_type": "generate_until",
618
+ "generation_kwargs": {
619
+ "until": [
620
+ "ASSISTANT:"
621
+ ],
622
+ "max_new_tokens": 16,
623
+ "image_aspect_ratio": "original"
624
+ },
625
+ "repeats": 1,
626
+ "should_decontaminate": false,
627
+ "metadata": [
628
+ {
629
+ "version": 0.0
630
+ }
631
+ ],
632
+ "model_specific_prompt_kwargs": {
633
+ "llava": {
634
+ "img_token": "<image>",
635
+ "post_prompt": "Answer with the option's letter from the given choices directly."
636
+ },
637
+ "gpt4V": {
638
+ "img_token": "<image>",
639
+ "post_prompt": "Answer with the option's letter from the given choices directly."
640
+ },
641
+ "default": {
642
+ "img_token": "<image>",
643
+ "post_prompt": "Answer with the option's letter from the given choices directly."
644
+ }
645
+ }
646
+ },
647
+ "textvqa_val": {
648
+ "task": "textvqa_val",
649
+ "dataset_path": "lmms-lab/textvqa",
650
+ "test_split": "validation",
651
+ "doc_to_visual": "<function textvqa_doc_to_visual at 0x7f788fa299d0>",
652
+ "doc_to_text": "<function textvqa_doc_to_text at 0x7f788fa29e50>",
653
+ "doc_to_target": "answer",
654
+ "process_results": "<function textvqa_process_results at 0x7f788f899160>",
655
+ "description": "",
656
+ "target_delimiter": " ",
657
+ "fewshot_delimiter": "\n\n",
658
+ "metric_list": [
659
+ {
660
+ "metric": "exact_match",
661
+ "aggregation": "mean",
662
+ "higher_is_better": true,
663
+ "ignore_case": true,
664
+ "ignore_punctuation": true
665
+ },
666
+ {
667
+ "metric": "submission",
668
+ "aggregation": "<function textvqa_aggregate_submissions at 0x7f788fa29790>",
669
+ "higher_is_better": true
670
+ }
671
+ ],
672
+ "output_type": "generate_until",
673
+ "generation_kwargs": {
674
+ "until": [
675
+ "ASSISTANT:"
676
+ ]
677
+ },
678
+ "repeats": 1,
679
+ "should_decontaminate": false,
680
+ "model_specific_prompt_kwargs": {
681
+ "default": {
682
+ "pre_prompt": "",
683
+ "post_prompt": "\nAnswer the question using a single word or phrase.",
684
+ "ocr": false
685
+ },
686
+ "qwen_vl": {
687
+ "pre_prompt": "",
688
+ "post_prompt": " Answer:"
689
+ }
690
+ }
691
+ }
692
+ },
693
+ "versions": {
694
+ "ai2d": "Yaml",
695
+ "infovqa_val": "Yaml",
696
+ "mmbench_en_dev": "Yaml",
697
+ "mme": "Yaml",
698
+ "mmerealworld_lite": "Yaml",
699
+ "mmmu_pro_standard": "Yaml",
700
+ "mmmu_val": "Yaml",
701
+ "mmstar": "Yaml",
702
+ "scienceqa_img": "Yaml",
703
+ "seedbench_2_plus": "Yaml",
704
+ "textvqa_val": "Yaml"
705
+ },
706
+ "n-shot": {
707
+ "ai2d": 0,
708
+ "infovqa_val": 0,
709
+ "mmbench_en_dev": 0,
710
+ "mme": 0,
711
+ "mmerealworld_lite": 0,
712
+ "mmmu_pro_standard": 0,
713
+ "mmmu_val": 0,
714
+ "mmstar": 0,
715
+ "scienceqa_img": 0,
716
+ "seedbench_2_plus": 0,
717
+ "textvqa_val": 0
718
+ },
719
+ "model_configs": {
720
+ "model": "llava",
721
+ "model_args": "pretrained=/cm/archive/namnv78_new/revise_checkpoints/Xphi35-siglip224/SMOE/665K36/revise_Full_smoe_sharev3/checkpoint-12477,conv_template=phi35",
722
+ "batch_size": "1",
723
+ "device": null,
724
+ "limit": null,
725
+ "bootstrap_iters": 100000,
726
+ "gen_kwargs": ""
727
+ },
728
+ "git_hash": "289c7fe5"
729
+ }
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/scienceqa_img.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/seedbench_2_plus.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/submissions/mmbench_en_dev_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"overall_acc": 0.7121993127147767, "category_acc": {"action_recognition": 0.9074074074074074, "attribute_comparison": 0.7045454545454546, "attribute_recognition": 0.8513513513513513, "celebrity_recognition": 0.7676767676767676, "function_reasoning": 0.8607594936708861, "future_prediction": 0.55, "identity_reasoning": 0.9777777777777777, "image_emotion": 0.84, "image_quality": 0.4528301886792453, "image_scene": 0.9615384615384616, "image_style": 0.9245283018867925, "image_topic": 0.8611111111111112, "nature_relation": 0.5625, "object_localization": 0.49382716049382713, "ocr": 0.6923076923076923, "physical_property_reasoning": 0.6, "physical_relation": 0.4583333333333333, "social_relation": 0.813953488372093, "spatial_relationship": 0.26666666666666666, "structuralized_imagetext_understanding": 0.4230769230769231}, "l2_category_acc": {"attribute_reasoning": 0.7889447236180904, "coarse_perception": 0.831081081081081, "finegrained_perception (cross-instance)": 0.6433566433566433, "finegrained_perception (instance-level)": 0.7030716723549488, "logic_reasoning": 0.4661016949152542, "relation_reasoning": 0.6347826086956522}}
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/submissions/mmbench_en_dev_results.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eae64c51364e19fe9591076a3878e0882d544ae87b51040157b5428d3528835
3
+ size 865116
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/submissions/textvqa_submission_2025-07-17-19-29-53.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2000_llava...l_mme_llava_model_args_82420a/textvqa_val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2784ee378d820efb47d9c9d42a268758638ebfa76232aff787fb54feb5998952
3
+ size 13138199
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2030_llava...bench_llava_model_args_82420a/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2030_llava...bench_llava_model_args_82420a/results/ocrbench_results.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################### OCRBench #############################
2
+ Text Recognition(Total 300): 181
3
+ ---------------- Details of Recognition Score ------------------
4
+ Regular Text Recognition(Total 50): 47
5
+ Irregular Text Recognition(Total 50): 41
6
+ Artistic Text Recognition(Total 50): 44
7
+ Handwriting Recognition(Total 50): 24
8
+ Digit String Recognition(Total 50): 11
9
+ Non-Semantic Text Recognition(Total 50): 14
10
+ ----------------------------------------------------------------
11
+ Scene Text-centric VQA(Total 200): 113
12
+ ----------------------------------------------------------------
13
+ Doc-oriented VQA(Total 200): 22
14
+ ----------------------------------------------------------------
15
+ Key Information Extraction(Total 200): 10
16
+ Handwritten Mathematical Expression Recognition(Total 100): 0
17
+ --------------------- Final Score ------------------------------
18
+ Final Score(Total 1000): 326
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2031_llava...bench_llava_model_args_82420a/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2031_llava...bench_llava_model_args_82420a/rank2_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 2 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2031_llava...bench_llava_model_args_82420a/rank3_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 3 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/rank2_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 2 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/rank3_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 3 eval done
sft/665K36/revise_Full_smoe_sharev3/checkpoint-12477/logs/0717_2043_llava...bench_llava_model_args_82420a/results/ocrbench_results.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################### OCRBench #############################
2
+ Text Recognition(Total 300): 181
3
+ ---------------- Details of Recognition Score ------------------
4
+ Regular Text Recognition(Total 50): 47
5
+ Irregular Text Recognition(Total 50): 41
6
+ Artistic Text Recognition(Total 50): 44
7
+ Handwriting Recognition(Total 50): 24
8
+ Digit String Recognition(Total 50): 11
9
+ Non-Semantic Text Recognition(Total 50): 14
10
+ ----------------------------------------------------------------
11
+ Scene Text-centric VQA(Total 200): 113
12
+ ----------------------------------------------------------------
13
+ Doc-oriented VQA(Total 200): 22
14
+ ----------------------------------------------------------------
15
+ Key Information Extraction(Total 200): 10
16
+ Handwritten Mathematical Expression Recognition(Total 100): 0
17
+ --------------------- Final Score ------------------------------
18
+ Final Score(Total 1000): 326