DavidNguyen commited on
Commit
21ca734
·
verified ·
1 Parent(s): 5a24a6f

621e39e0a3f90b3e1ad3c6a69fbca3ac88ed2bccf1a4c66e51c018ac56d4b3ec

Browse files
Files changed (33) hide show
  1. .gitattributes +2 -0
  2. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/infovqa_val.json +3 -0
  3. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/mmmu_pro_standard.json +0 -0
  4. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/mmmu_pro_vision.json +0 -0
  5. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/rank0_metric_eval_done.txt +1 -0
  6. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/rank1_metric_eval_done.txt +1 -0
  7. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/rank2_metric_eval_done.txt +1 -0
  8. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/rank3_metric_eval_done.txt +1 -0
  9. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/realworldqa.json +0 -0
  10. sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/results.json +245 -0
  11. sft/1M3/Full_smoe_tcmoe/logs/0626_1139_llava_v1.5_gqa_llava_model_args_59313d/gqa.json +3 -0
  12. sft/1M3/Full_smoe_tcmoe/logs/0626_1139_llava_v1.5_gqa_llava_model_args_59313d/rank0_metric_eval_done.txt +1 -0
  13. sft/1M3/Full_smoe_tcmoe/logs/0626_1139_llava_v1.5_gqa_llava_model_args_59313d/results.json +79 -0
  14. sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/gpt_response/hallusion_output_vd_model.json +0 -0
  15. sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/gpt_response/hallusion_output_vs_model.json +0 -0
  16. sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/hallusion_bench_image.json +0 -0
  17. sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/mathvista_testmini.json +0 -0
  18. sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/rank0_metric_eval_done.txt +1 -0
  19. sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/rank1_metric_eval_done.txt +1 -0
  20. sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/results.json +146 -0
  21. sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/submissions/mathvista_testmini_scores.json +0 -0
  22. sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/chartqa.json +0 -0
  23. sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/rank0_metric_eval_done.txt +1 -0
  24. sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/rank1_metric_eval_done.txt +1 -0
  25. sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/results.json +168 -0
  26. sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/seedbench_2_plus.json +0 -0
  27. sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/ocrbench.json +0 -0
  28. sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/rank0_metric_eval_done.txt +1 -0
  29. sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/rank1_metric_eval_done.txt +1 -0
  30. sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/rank2_metric_eval_done.txt +1 -0
  31. sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/rank3_metric_eval_done.txt +1 -0
  32. sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/results.json +67 -0
  33. sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/results/ocrbench_results.txt +18 -0
.gitattributes CHANGED
@@ -207,3 +207,5 @@ sft/1M3/Full_smoe_tcmoe/logs/0626_0357_llava..._pope_llava_model_args_59313d/mme
207
  sft/1M3/Full_smoe_tcmoe/logs/0626_0357_llava..._pope_llava_model_args_59313d/pope.json filter=lfs diff=lfs merge=lfs -text
208
  sft/1M3/Full_smoe_tcmoe/logs/0626_0357_llava..._pope_llava_model_args_59313d/submissions/mmbench_en_dev_results.xlsx filter=lfs diff=lfs merge=lfs -text
209
  sft/1M3/Full_smoe_tcmoe/logs/0626_0357_llava..._pope_llava_model_args_59313d/textvqa_val.json filter=lfs diff=lfs merge=lfs -text
 
 
 
207
  sft/1M3/Full_smoe_tcmoe/logs/0626_0357_llava..._pope_llava_model_args_59313d/pope.json filter=lfs diff=lfs merge=lfs -text
208
  sft/1M3/Full_smoe_tcmoe/logs/0626_0357_llava..._pope_llava_model_args_59313d/submissions/mmbench_en_dev_results.xlsx filter=lfs diff=lfs merge=lfs -text
209
  sft/1M3/Full_smoe_tcmoe/logs/0626_0357_llava..._pope_llava_model_args_59313d/textvqa_val.json filter=lfs diff=lfs merge=lfs -text
210
+ sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/infovqa_val.json filter=lfs diff=lfs merge=lfs -text
211
+ sft/1M3/Full_smoe_tcmoe/logs/0626_1139_llava_v1.5_gqa_llava_model_args_59313d/gqa.json filter=lfs diff=lfs merge=lfs -text
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/infovqa_val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfa57a8969ea335579f36927fa1b04d59c4ada57f64ee5c223cf86770a85275d
3
+ size 576440094
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/mmmu_pro_standard.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/mmmu_pro_vision.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/rank2_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 2 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/rank3_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 3 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/realworldqa.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_0430_llava...a_val_llava_model_args_59313d/results.json ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "infovqa_val": {
4
+ "anls,none": 0.2995358800428418,
5
+ "anls_stderr,none": 0.00865642075096775,
6
+ "alias": "infovqa_val"
7
+ },
8
+ "mmmu_pro": {
9
+ "mmmu_acc,none": 0.18931,
10
+ "mmmu_acc_stderr,none": 0.03685532629908108,
11
+ "alias": "mmmu_pro"
12
+ },
13
+ "mmmu_pro_standard": {
14
+ "mmmu_acc,none": 0.26301,
15
+ "mmmu_acc_stderr,none": "N/A",
16
+ "alias": " - mmmu_pro_standard"
17
+ },
18
+ "mmmu_pro_vision": {
19
+ "mmmu_acc,none": 0.11561,
20
+ "mmmu_acc_stderr,none": "N/A",
21
+ "alias": " - mmmu_pro_vision"
22
+ },
23
+ "realworldqa": {
24
+ "exact_match,flexible-extract": 0.515032679738562,
25
+ "exact_match_stderr,flexible-extract": 0.018081187561622823,
26
+ "alias": "realworldqa"
27
+ }
28
+ },
29
+ "groups": {
30
+ "mmmu_pro": {
31
+ "mmmu_acc,none": 0.18931,
32
+ "mmmu_acc_stderr,none": 0.03685532629908108,
33
+ "alias": "mmmu_pro"
34
+ }
35
+ },
36
+ "configs": {
37
+ "infovqa_val": {
38
+ "task": "infovqa_val",
39
+ "dataset_path": "lmms-lab/DocVQA",
40
+ "dataset_name": "InfographicVQA",
41
+ "dataset_kwargs": {
42
+ "token": true
43
+ },
44
+ "test_split": "validation",
45
+ "doc_to_visual": "<function infovqa_doc_to_visual at 0x7fc9fa5d69d0>",
46
+ "doc_to_text": "<function infovqa_doc_to_text at 0x7fc9fa5d6ca0>",
47
+ "doc_to_target": "answers",
48
+ "description": "",
49
+ "target_delimiter": " ",
50
+ "fewshot_delimiter": "\n\n",
51
+ "metric_list": [
52
+ {
53
+ "metric": "anls",
54
+ "aggregation": "mean",
55
+ "higher_is_better": true
56
+ }
57
+ ],
58
+ "output_type": "generate_until",
59
+ "generation_kwargs": {
60
+ "max_new_tokens": 32,
61
+ "temperature": 0.0,
62
+ "do_sample": false,
63
+ "until": [
64
+ "\n\n"
65
+ ]
66
+ },
67
+ "repeats": 1,
68
+ "should_decontaminate": false,
69
+ "model_specific_prompt_kwargs": {
70
+ "default": {
71
+ "pre_prompt": "",
72
+ "post_prompt": "\nAnswer the question using a single word or phrase."
73
+ }
74
+ }
75
+ },
76
+ "mmmu_pro_standard": {
77
+ "task": "mmmu_pro_standard",
78
+ "dataset_path": "MMMU/MMMU_Pro",
79
+ "dataset_name": "standard (10 options)",
80
+ "test_split": "test",
81
+ "doc_to_visual": "<function mmmu_pro_doc_to_visual at 0x7fca25805790>",
82
+ "doc_to_text": "<function mmmu_pro_doc_to_text at 0x7fca25810550>",
83
+ "doc_to_target": "{{answer}}",
84
+ "process_results": "<function mmmu_pro_process_results at 0x7fca258194c0>",
85
+ "description": "",
86
+ "target_delimiter": " ",
87
+ "fewshot_delimiter": "\n\n",
88
+ "metric_list": [
89
+ {
90
+ "metric": "mmmu_acc",
91
+ "aggregation": "<function mmmu_pro_aggregate_results at 0x7fca25823430>",
92
+ "higher_is_better": true
93
+ }
94
+ ],
95
+ "output_type": "generate_until",
96
+ "generation_kwargs": {
97
+ "max_new_tokens": 256,
98
+ "until": [
99
+ "\n\n"
100
+ ]
101
+ },
102
+ "repeats": 1,
103
+ "should_decontaminate": false,
104
+ "metadata": {
105
+ "version": 0.0,
106
+ "interleaved_format": false
107
+ },
108
+ "model_specific_prompt_kwargs": {
109
+ "default": {
110
+ "pre_prompt": "",
111
+ "post_prompt": "Answer with the option letter from the given choices directly."
112
+ }
113
+ }
114
+ },
115
+ "mmmu_pro_vision": {
116
+ "task": "mmmu_pro_vision",
117
+ "dataset_path": "MMMU/MMMU_Pro",
118
+ "dataset_name": "vision",
119
+ "test_split": "test",
120
+ "doc_to_visual": "<function mmmu_pro_doc_to_visual at 0x7fca257f2ca0>",
121
+ "doc_to_text": "Answer with the option letter from the given choices directly.",
122
+ "doc_to_target": "{{answer}}",
123
+ "process_results": "<function mmmu_pro_process_results at 0x7fca257f9b80>",
124
+ "description": "",
125
+ "target_delimiter": " ",
126
+ "fewshot_delimiter": "\n\n",
127
+ "metric_list": [
128
+ {
129
+ "metric": "mmmu_acc",
130
+ "aggregation": "<function mmmu_pro_aggregate_results at 0x7fca25800af0>",
131
+ "higher_is_better": true
132
+ }
133
+ ],
134
+ "output_type": "generate_until",
135
+ "generation_kwargs": {
136
+ "max_new_tokens": 256,
137
+ "until": [
138
+ "\n\n"
139
+ ]
140
+ },
141
+ "repeats": 1,
142
+ "should_decontaminate": false,
143
+ "metadata": {
144
+ "version": 0.0,
145
+ "interleaved_format": false
146
+ }
147
+ },
148
+ "realworldqa": {
149
+ "task": "realworldqa",
150
+ "dataset_path": "lmms-lab/RealWorldQA",
151
+ "dataset_kwargs": {
152
+ "token": true
153
+ },
154
+ "test_split": "test",
155
+ "doc_to_visual": "<function realworldqa_doc_to_visual at 0x7fc9f1671a60>",
156
+ "doc_to_text": "<function realworldqa_doc_to_text at 0x7fc9f168a0d0>",
157
+ "doc_to_target": "answer",
158
+ "description": "",
159
+ "target_delimiter": " ",
160
+ "fewshot_delimiter": "\n\n",
161
+ "metric_list": [
162
+ {
163
+ "metric": "exact_match",
164
+ "aggregation": "mean",
165
+ "higher_is_better": true,
166
+ "ignore_case": true,
167
+ "ignore_punctuation": true
168
+ }
169
+ ],
170
+ "output_type": "generate_until",
171
+ "generation_kwargs": {
172
+ "max_new_tokens": 16,
173
+ "temperature": 0.0,
174
+ "top_p": 1.0,
175
+ "num_beams": 1,
176
+ "do_sample": false,
177
+ "until": [
178
+ "\n\n"
179
+ ]
180
+ },
181
+ "repeats": 1,
182
+ "filter_list": [
183
+ {
184
+ "name": "flexible-extract",
185
+ "filter": [
186
+ {
187
+ "function": "<class 'utils.NumberWordsToDigitsFilter'>"
188
+ },
189
+ {
190
+ "function": "<class 'utils.MultiChoiceRegexFilter'>",
191
+ "group_select": 0,
192
+ "ignore_case": true,
193
+ "ignore_punctuation": true,
194
+ "regex_pattern": "(\\([A-Z]\\))"
195
+ }
196
+ ]
197
+ }
198
+ ],
199
+ "should_decontaminate": false,
200
+ "metadata": [
201
+ {
202
+ "version": 0.0
203
+ }
204
+ ],
205
+ "model_specific_prompt_kwargs": {
206
+ "default": {
207
+ "pre_prompt": "",
208
+ "post_prompt": ""
209
+ },
210
+ "gpt4v": {
211
+ "pre_prompt": "",
212
+ "post_prompt": ""
213
+ },
214
+ "xcomposer2_4khd": {
215
+ "pre_prompt": "[UNUSED_TOKEN_146]user\nQuestion: ",
216
+ "post_prompt": "[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\nThe answer is"
217
+ }
218
+ }
219
+ }
220
+ },
221
+ "versions": {
222
+ "infovqa_val": "Yaml",
223
+ "mmmu_pro": "N/A",
224
+ "mmmu_pro_standard": "Yaml",
225
+ "mmmu_pro_vision": "Yaml",
226
+ "realworldqa": "Yaml"
227
+ },
228
+ "n-shot": {
229
+ "infovqa_val": 0,
230
+ "mmmu_pro": 0,
231
+ "mmmu_pro_standard": 0,
232
+ "mmmu_pro_vision": 0,
233
+ "realworldqa": 0
234
+ },
235
+ "model_configs": {
236
+ "model": "llava",
237
+ "model_args": "pretrained=/cm/archive/namnv78/checkpoints/Xphi35-siglip224/SMOE/1M3/Full_smoe_tcmoe,conv_template=phi35",
238
+ "batch_size": "1",
239
+ "device": null,
240
+ "limit": null,
241
+ "bootstrap_iters": 100000,
242
+ "gen_kwargs": ""
243
+ },
244
+ "git_hash": "289c7fe5"
245
+ }
sft/1M3/Full_smoe_tcmoe/logs/0626_1139_llava_v1.5_gqa_llava_model_args_59313d/gqa.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcc4c5e650a5f87ecc3e3934c40f696d319a0a49ed56237d9b4a66604c28b234
3
+ size 38569629
sft/1M3/Full_smoe_tcmoe/logs/0626_1139_llava_v1.5_gqa_llava_model_args_59313d/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1139_llava_v1.5_gqa_llava_model_args_59313d/results.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "gqa": {
4
+ "exact_match,none": 0.3945778343138814,
5
+ "exact_match_stderr,none": 0.004358198135704839,
6
+ "alias": "gqa"
7
+ }
8
+ },
9
+ "configs": {
10
+ "gqa": {
11
+ "task": "gqa",
12
+ "dataset_path": "lmms-lab/GQA",
13
+ "dataset_name": "testdev_balanced_instructions",
14
+ "dataset_kwargs": {
15
+ "token": true
16
+ },
17
+ "test_split": "testdev",
18
+ "doc_to_visual": "<function gqa_doc_to_visual at 0x7ff78ec2f8b0>",
19
+ "doc_to_text": "<function gqa_doc_to_text at 0x7ff78eb83b80>",
20
+ "doc_to_target": "answer",
21
+ "description": "",
22
+ "target_delimiter": " ",
23
+ "fewshot_delimiter": "\n\n",
24
+ "metric_list": [
25
+ {
26
+ "metric": "exact_match",
27
+ "aggregation": "mean",
28
+ "higher_is_better": true,
29
+ "ignore_case": true,
30
+ "ignore_punctuation": true
31
+ }
32
+ ],
33
+ "output_type": "generate_until",
34
+ "generation_kwargs": {
35
+ "max_new_tokens": 16,
36
+ "temperature": 0.0,
37
+ "top_p": 1.0,
38
+ "num_beams": 1,
39
+ "do_sample": false,
40
+ "until": [
41
+ "\n\n"
42
+ ]
43
+ },
44
+ "repeats": 1,
45
+ "should_decontaminate": false,
46
+ "metadata": [
47
+ {
48
+ "version": 0.0
49
+ }
50
+ ],
51
+ "model_specific_prompt_kwargs": {
52
+ "default": {
53
+ "pre_prompt": "",
54
+ "post_prompt": "\nAnswer the question using a single word or phrase."
55
+ },
56
+ "qwen_vl": {
57
+ "pre_prompt": "",
58
+ "post_prompt": " Answer:"
59
+ }
60
+ }
61
+ }
62
+ },
63
+ "versions": {
64
+ "gqa": "Yaml"
65
+ },
66
+ "n-shot": {
67
+ "gqa": 0
68
+ },
69
+ "model_configs": {
70
+ "model": "llava",
71
+ "model_args": "pretrained=/cm/archive/namnv78/checkpoints/Xphi35-siglip224/SMOE/1M3/Full_smoe_tcmoe,conv_template=phi35",
72
+ "batch_size": "1",
73
+ "device": null,
74
+ "limit": null,
75
+ "bootstrap_iters": 100000,
76
+ "gen_kwargs": ""
77
+ },
78
+ "git_hash": "289c7fe5"
79
+ }
sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/gpt_response/hallusion_output_vd_model.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/gpt_response/hallusion_output_vs_model.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/hallusion_bench_image.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/mathvista_testmini.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/results.json ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "hallusion_bench_image": {
4
+ "aAcc,none": 45.0053,
5
+ "aAcc_stderr,none": "N/A",
6
+ "fAcc,none": 17.6301,
7
+ "fAcc_stderr,none": "N/A",
8
+ "qAcc,none": 14.9451,
9
+ "qAcc_stderr,none": "N/A",
10
+ "alias": "hallusion_bench_image"
11
+ },
12
+ "mathvista_testmini": {
13
+ "gpt_eval_score,none": 37.8,
14
+ "gpt_eval_score_stderr,none": "N/A",
15
+ "alias": "mathvista_testmini"
16
+ }
17
+ },
18
+ "configs": {
19
+ "hallusion_bench_image": {
20
+ "task": "hallusion_bench_image",
21
+ "dataset_path": "lmms-lab/HallusionBench",
22
+ "dataset_kwargs": {
23
+ "token": true
24
+ },
25
+ "test_split": "image",
26
+ "doc_to_visual": "<function hb_doc_to_visual at 0x7fdbc65ef0d0>",
27
+ "doc_to_text": "<function hb_doc_to_text at 0x7fdbc65ef820>",
28
+ "doc_to_target": "gt_answer_details",
29
+ "process_results": "<function hb_process_results at 0x7fdbc6563040>",
30
+ "description": "",
31
+ "target_delimiter": " ",
32
+ "fewshot_delimiter": "\n\n",
33
+ "metric_list": [
34
+ {
35
+ "metric": "aAcc",
36
+ "aggregation": "<function hb_aggregation_result_aAcc at 0x7fdbc65639d0>",
37
+ "higher_is_better": true
38
+ },
39
+ {
40
+ "metric": "qAcc",
41
+ "aggregation": "<function hb_aggregation_result_qAcc at 0x7fdbc6467040>",
42
+ "higher_is_better": true
43
+ },
44
+ {
45
+ "metric": "fAcc",
46
+ "aggregation": "<function hb_aggregation_result_fAcc at 0x7fdbc6467820>",
47
+ "higher_is_better": true
48
+ }
49
+ ],
50
+ "output_type": "generate_until",
51
+ "generation_kwargs": {
52
+ "max_new_tokens": 128,
53
+ "temperature": 0.0,
54
+ "top_p": 1.0,
55
+ "num_beams": 1,
56
+ "do_sample": false,
57
+ "until": [
58
+ "\n\n"
59
+ ]
60
+ },
61
+ "repeats": 1,
62
+ "should_decontaminate": false,
63
+ "metadata": [
64
+ {
65
+ "version": 0.0
66
+ }
67
+ ],
68
+ "model_specific_prompt_kwargs": {
69
+ "default": {
70
+ "pre_prompt": "",
71
+ "post_prompt": ""
72
+ }
73
+ }
74
+ },
75
+ "mathvista_testmini": {
76
+ "task": "mathvista_testmini",
77
+ "dataset_path": "AI4Math/MathVista",
78
+ "dataset_kwargs": {
79
+ "token": true
80
+ },
81
+ "test_split": "testmini",
82
+ "doc_to_visual": "<function mathvista_doc_to_visual at 0x7fdb9bbb0af0>",
83
+ "doc_to_text": "<function mathvista_doc_to_text at 0x7fdb9b7a91f0>",
84
+ "doc_to_target": "answer",
85
+ "process_results": "<function mathvista_process_results at 0x7fdb9b7b18b0>",
86
+ "description": "",
87
+ "target_delimiter": " ",
88
+ "fewshot_delimiter": "\n\n",
89
+ "metric_list": [
90
+ {
91
+ "metric": "gpt_eval_score",
92
+ "aggregation": "<function mathvista_aggregate_results at 0x7fdb9b7bbf70>",
93
+ "higher_is_better": true
94
+ }
95
+ ],
96
+ "output_type": "generate_until",
97
+ "generation_kwargs": {
98
+ "until": [
99
+ "ASSISTANT:"
100
+ ],
101
+ "max_new_tokens": 1024,
102
+ "temperature": 0.0,
103
+ "top_p": 1.0,
104
+ "num_beams": 1,
105
+ "do_sample": false,
106
+ "image_aspect_ratio": "original"
107
+ },
108
+ "repeats": 1,
109
+ "should_decontaminate": false,
110
+ "model_specific_prompt_kwargs": {
111
+ "default": {
112
+ "shot_type": "format-prompt",
113
+ "shot": 0,
114
+ "use_caption": false,
115
+ "use_ocr": false
116
+ },
117
+ "phi3v": {
118
+ "shot_type": "solution"
119
+ }
120
+ },
121
+ "model_specific_generation_kwargs": {
122
+ "llava": {
123
+ "image_aspect_ratio": "original"
124
+ }
125
+ }
126
+ }
127
+ },
128
+ "versions": {
129
+ "hallusion_bench_image": "Yaml",
130
+ "mathvista_testmini": "Yaml"
131
+ },
132
+ "n-shot": {
133
+ "hallusion_bench_image": 0,
134
+ "mathvista_testmini": 0
135
+ },
136
+ "model_configs": {
137
+ "model": "llava",
138
+ "model_args": "pretrained=/cm/archive/namnv78/checkpoints/Xphi35-siglip224/SMOE/1M3/Full_smoe_tcmoe,conv_template=phi35",
139
+ "batch_size": "1",
140
+ "device": null,
141
+ "limit": null,
142
+ "bootstrap_iters": 100000,
143
+ "gen_kwargs": ""
144
+ },
145
+ "git_hash": "289c7fe5"
146
+ }
sft/1M3/Full_smoe_tcmoe/logs/0626_1211_llava...image_llava_model_args_59313d/submissions/mathvista_testmini_scores.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/chartqa.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/results.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "chartqa": {
4
+ "relaxed_overall,none": 0.1644,
5
+ "relaxed_overall_stderr,none": 0.007414246045132662,
6
+ "relaxed_human_split,none": 0.1864,
7
+ "relaxed_human_split_stderr,none": 0.011019127412601738,
8
+ "relaxed_augmented_split,none": 0.1424,
9
+ "relaxed_augmented_split_stderr,none": 0.009888175403493184,
10
+ "alias": "chartqa"
11
+ },
12
+ "seedbench_2_plus": {
13
+ "seedbench_2_plus_Chart,none": 0.5049382716049383,
14
+ "seedbench_2_plus_Chart_stderr,none": "N/A",
15
+ "seedbench_2_plus_all,none": 0.5072463768115942,
16
+ "seedbench_2_plus_all_stderr,none": "N/A",
17
+ "seedbench_2_plus_Web,none": 0.5545454545454546,
18
+ "seedbench_2_plus_Web_stderr,none": "N/A",
19
+ "seedbench_2_plus_Map,none": 0.4708798017348203,
20
+ "seedbench_2_plus_Map_stderr,none": "N/A",
21
+ "alias": "seedbench_2_plus"
22
+ }
23
+ },
24
+ "configs": {
25
+ "chartqa": {
26
+ "task": "chartqa",
27
+ "dataset_path": "lmms-lab/ChartQA",
28
+ "dataset_kwargs": {
29
+ "token": true
30
+ },
31
+ "test_split": "test",
32
+ "doc_to_visual": "<function chartqa_doc_to_visual at 0x7fa788965d30>",
33
+ "doc_to_text": "<function chartqa_doc_to_text at 0x7fa78896c790>",
34
+ "doc_to_target": "answer",
35
+ "process_results": "<function chartqa_process_results at 0x7fa78896ca60>",
36
+ "description": "",
37
+ "target_delimiter": " ",
38
+ "fewshot_delimiter": "\n\n",
39
+ "metric_list": [
40
+ {
41
+ "metric": "relaxed_overall",
42
+ "aggregation": "mean",
43
+ "higher_is_better": true
44
+ },
45
+ {
46
+ "metric": "relaxed_human_split",
47
+ "aggregation": "mean",
48
+ "higher_is_better": true
49
+ },
50
+ {
51
+ "metric": "relaxed_augmented_split",
52
+ "aggregation": "mean",
53
+ "higher_is_better": true
54
+ }
55
+ ],
56
+ "output_type": "generate_until",
57
+ "generation_kwargs": {
58
+ "max_new_tokens": 16,
59
+ "temperature": 0.0,
60
+ "do_sample": false,
61
+ "until": [
62
+ "\n\n"
63
+ ]
64
+ },
65
+ "repeats": 1,
66
+ "should_decontaminate": false,
67
+ "metadata": [
68
+ {
69
+ "version": 0.0
70
+ }
71
+ ],
72
+ "model_specific_prompt_kwargs": {
73
+ "default": {
74
+ "pre_prompt": "",
75
+ "post_prompt": "\nAnswer the question with a single word."
76
+ },
77
+ "qwen_vl": {
78
+ "pre_prompt": "",
79
+ "post_prompt": " Answer:"
80
+ }
81
+ }
82
+ },
83
+ "seedbench_2_plus": {
84
+ "task": "seedbench_2_plus",
85
+ "dataset_path": "doolayer/SEED-Bench-2-Plus",
86
+ "dataset_kwargs": {
87
+ "token": true
88
+ },
89
+ "test_split": "test",
90
+ "doc_to_visual": "<function seed_doc_to_visual at 0x7fa7b10d8ca0>",
91
+ "doc_to_text": "<function seed_doc_to_text at 0x7fa7b1101310>",
92
+ "doc_to_target": "answer",
93
+ "process_results": "<function seed_process_result at 0x7fa7b1101820>",
94
+ "description": "",
95
+ "target_delimiter": " ",
96
+ "fewshot_delimiter": "\n\n",
97
+ "metric_list": [
98
+ {
99
+ "metric": "seedbench_2_plus_Chart",
100
+ "aggregation": "<function seed_aggregation_result at 0x7fa7b1101d30>",
101
+ "higher_is_better": true
102
+ },
103
+ {
104
+ "metric": "seedbench_2_plus_Map",
105
+ "aggregation": "<function seed_aggregation_result at 0x7fa7b0ea31f0>",
106
+ "higher_is_better": true
107
+ },
108
+ {
109
+ "metric": "seedbench_2_plus_Web",
110
+ "aggregation": "<function seed_aggregation_result at 0x7fa7b0ea3670>",
111
+ "higher_is_better": true
112
+ },
113
+ {
114
+ "metric": "seedbench_2_plus_all",
115
+ "aggregation": "<function seed_aggregation_result at 0x7fa7b0ea3af0>",
116
+ "higher_is_better": true
117
+ }
118
+ ],
119
+ "output_type": "generate_until",
120
+ "generation_kwargs": {
121
+ "until": [
122
+ "ASSISTANT:"
123
+ ],
124
+ "max_new_tokens": 16,
125
+ "image_aspect_ratio": "original"
126
+ },
127
+ "repeats": 1,
128
+ "should_decontaminate": false,
129
+ "metadata": [
130
+ {
131
+ "version": 0.0
132
+ }
133
+ ],
134
+ "model_specific_prompt_kwargs": {
135
+ "llava": {
136
+ "img_token": "<image>",
137
+ "post_prompt": "Answer with the option's letter from the given choices directly."
138
+ },
139
+ "gpt4V": {
140
+ "img_token": "<image>",
141
+ "post_prompt": "Answer with the option's letter from the given choices directly."
142
+ },
143
+ "default": {
144
+ "img_token": "<image>",
145
+ "post_prompt": "Answer with the option's letter from the given choices directly."
146
+ }
147
+ }
148
+ }
149
+ },
150
+ "versions": {
151
+ "chartqa": "Yaml",
152
+ "seedbench_2_plus": "Yaml"
153
+ },
154
+ "n-shot": {
155
+ "chartqa": 0,
156
+ "seedbench_2_plus": 0
157
+ },
158
+ "model_configs": {
159
+ "model": "llava",
160
+ "model_args": "pretrained=/cm/archive/namnv78/checkpoints/Xphi35-siglip224/SMOE/1M3/Full_smoe_tcmoe,conv_template=phi35",
161
+ "batch_size": "1",
162
+ "device": null,
163
+ "limit": null,
164
+ "bootstrap_iters": 100000,
165
+ "gen_kwargs": ""
166
+ },
167
+ "git_hash": "289c7fe5"
168
+ }
sft/1M3/Full_smoe_tcmoe/logs/0626_1233_llava..._plus_llava_model_args_59313d/seedbench_2_plus.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/ocrbench.json ADDED
The diff for this file is too large to render. See raw diff
 
sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/rank0_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 0 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/rank1_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 1 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/rank2_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 2 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/rank3_metric_eval_done.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ rank 3 eval done
sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/results.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "ocrbench": {
4
+ "ocrbench_accuracy,none": 0.391,
5
+ "ocrbench_accuracy_stderr,none": "N/A",
6
+ "alias": "ocrbench"
7
+ }
8
+ },
9
+ "configs": {
10
+ "ocrbench": {
11
+ "task": "ocrbench",
12
+ "dataset_path": "echo840/OCRBench",
13
+ "dataset_kwargs": {
14
+ "token": true
15
+ },
16
+ "test_split": "test",
17
+ "doc_to_visual": "<function ocrbench_doc_to_visual at 0x7f9ffcd0ac10>",
18
+ "doc_to_text": "<function ocrbench_doc_to_text at 0x7f9ffc5251f0>",
19
+ "doc_to_target": "answer",
20
+ "process_results": "<function ocrbench_process_results at 0x7f9ffc525550>",
21
+ "description": "",
22
+ "target_delimiter": " ",
23
+ "fewshot_delimiter": "\n\n",
24
+ "metric_list": [
25
+ {
26
+ "metric": "ocrbench_accuracy",
27
+ "aggregation": "<function ocrbench_aggregate_accuracy at 0x7f9ffc5258b0>",
28
+ "higher_is_better": true
29
+ }
30
+ ],
31
+ "output_type": "generate_until",
32
+ "generation_kwargs": {
33
+ "max_new_tokens": 128,
34
+ "temperature": 0.0,
35
+ "top_p": 1.0,
36
+ "num_beams": 1,
37
+ "do_sample": false,
38
+ "until": [
39
+ "\n\n"
40
+ ]
41
+ },
42
+ "repeats": 1,
43
+ "should_decontaminate": false,
44
+ "metadata": [
45
+ {
46
+ "version": 0.0
47
+ }
48
+ ]
49
+ }
50
+ },
51
+ "versions": {
52
+ "ocrbench": "Yaml"
53
+ },
54
+ "n-shot": {
55
+ "ocrbench": 0
56
+ },
57
+ "model_configs": {
58
+ "model": "llava",
59
+ "model_args": "pretrained=/cm/archive/namnv78/checkpoints/Xphi35-siglip224/SMOE/1M3/Full_smoe_tcmoe,conv_template=phi35",
60
+ "batch_size": "1",
61
+ "device": null,
62
+ "limit": null,
63
+ "bootstrap_iters": 100000,
64
+ "gen_kwargs": ""
65
+ },
66
+ "git_hash": "289c7fe5"
67
+ }
sft/1M3/Full_smoe_tcmoe/logs/0626_1418_llava...bench_llava_model_args_59313d/results/ocrbench_results.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################### OCRBench #############################
2
+ Text Recognition(Total 300): 204
3
+ ---------------- Details of Recognition Score ------------------
4
+ Regular Text Recognition(Total 50): 44
5
+ Irregular Text Recognition(Total 50): 40
6
+ Artistic Text Recognition(Total 50): 43
7
+ Handwriting Recognition(Total 50): 18
8
+ Digit String Recognition(Total 50): 37
9
+ Non-Semantic Text Recognition(Total 50): 22
10
+ ----------------------------------------------------------------
11
+ Scene Text-centric VQA(Total 200): 110
12
+ ----------------------------------------------------------------
13
+ Doc-oriented VQA(Total 200): 25
14
+ ----------------------------------------------------------------
15
+ Key Information Extraction(Total 200): 18
16
+ Handwritten Mathematical Expression Recognition(Total 100): 34
17
+ --------------------- Final Score ------------------------------
18
+ Final Score(Total 1000): 391