XuYifanXUXU commited on
Commit
c03d13f
·
1 Parent(s): 67da17c

Upload fullset of TinyBenchMark

Browse files
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ *.pdf filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
Fullset/TinyBench/0_overview.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f3f3ba36b6191cc8193edaf5cf27b1b7df44235566d9c41473d5bbc152a2414
3
+ size 1148375
Fullset/TinyBench/2401.18059v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01319e2e6e6ecfd504ee1bbda41d023f9e060bd3505ab3c161baa1d6d696cf75
3
+ size 2547113
Fullset/TinyBench/MMMU_website.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86edded839fa8cc8cf00ec852a73faaa4f8540c618dab1fbee65ee30e5d29d32
3
+ size 13328644
Fullset/TinyBench/PG_2020.03.09_US-Germany_FINAL.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:538faebdf5ff106946523de5cf66c2a55dfd5f5cf1fa77558bf8bfad88ba4a86
3
+ size 658530
Fullset/TinyBench/PG_2021.03.04_US-Views-on-China_FINAL.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d2be648c1752c4ec86c398ae3e199fd0976a61c4fcd4060a5497750425b461
3
+ size 2734242
Fullset/TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11fc8671985f4a810ed635ba2df7ae29c2fc0632de230b2a02386b2fbad08103
3
+ size 525251
Fullset/TinyBench/SAO-StudentSupport_Guidebook-Content.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7515cd264f0a33a782169d938a291e9ec4fc81a094a30a4ecf951d4cfa7ff336
3
+ size 8813581
Fullset/TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:737e16a1aaac8ba108e7dc92d542960b93c07fe723593920b528557394e4986b
3
+ size 4472720
Fullset/TinyBench/avalaunchpresentationsthatkickasteriskv3copy-150318114804-conversion-gate01_95.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40e8219add5c83ebe97ce47a3a8e4bce3dd1c27ecc592f897dba0f3a4a6ec831
3
+ size 1960940
Fullset/TinyBench/fd76bbefe469561966e5387aa709c482.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fc1c32fe534bc49d025c8699be4421a2abba60c71bdc360121c1f974e36a819
3
+ size 1586692
Fullset/TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e63ef520ce513cb19132f57d6efe8a59cf180a5a2aee4bbff1c4718804b06caa
3
+ size 3021284
Fullset/TinyBench/llava_onevision.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5322de2d2185bd6c215efe8a5e5e692dd2ed9df5de4f3e476ca45675e9ffd6d1
3
+ size 14945322
Fullset/TinyBench/new_Art_Theory_5.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7442188e8fde347d572ac7d609f2d79e0581929711f134ad122193b9a34cc97
3
+ size 23524699
Fullset/TinyBench/new_Computer_Science_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4416715e29577e7cc2c36531905957fce3e178e9ecfbcbc4b9a37a820dd15852
3
+ size 16993661
Fullset/TinyBench/new_Psychology_5.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5916aaeb9077a1717ab5797ff5dec2eb15a37ff028088d0876af6ec2b08d02c5
3
+ size 23364249
Fullset/TinyBench/test_Computer_Science_351.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f32b78bc57b8fc60822be24093bc4440bad946bdbbb759d7649c1bb43f2ffeaa
3
+ size 14532580
Fullset/TinyBench/validation_Mechanical_Engineering_18.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95a49022d7d76257ba7ca3d78485c0391f28ccf4780c44052d31bfa6fc347707
3
+ size 36046593
Fullset/TinyBench/videommmu_paper.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c14d3e34611bd69fbf15ae83d61063d8522398ef5f4432d25aff7f6bec4ae3b
3
+ size 10858331
Fullset/TinyBench_files.xlsx ADDED
Binary file (8.12 kB). View file
 
Fullset/qa_table_perception_v00.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "doc_id": [
5
+ "TinyBench/llava_onevision.pdf"
6
+ ],
7
+ "file_type": "paper pdf",
8
+ "question": "What is LLaVA-OV-72B zero-shot accuracy result on ActNet-QA benchmark?",
9
+ "question_type": "fact",
10
+ "evidence_type": "text",
11
+ "answer": "63.3",
12
+ "content_domain": "Academic paper",
13
+ "Comment": ""
14
+ },
15
+ {
16
+ "id": "1",
17
+ "doc_id": [
18
+ "TinyBench/videommmu_paper.pdf"
19
+ ],
20
+ "file_type": "paper pdf",
21
+ "question": "Which proprietary model achieves the highest Perception score among the models tested in the videommmu_paper.pdf paper?",
22
+ "question_type": "fact",
23
+ "evidence_type": "text",
24
+ "answer": "Claude-3.5-Sonnet achieves the highest Perception score among proprietary models with 72.00%",
25
+ "content_domain": "Academic paper",
26
+ "Comment": ""
27
+ },
28
+ {
29
+ "id": "2",
30
+ "doc_id": [
31
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
32
+ ],
33
+ "file_type": "document pdf",
34
+ "question": "At NTU, what is the total number of days of Recess week in the first semester of the Calendar year of 2016-2017?",
35
+ "question_type": "fact",
36
+ "evidence_type": "text",
37
+ "answer": "5 days",
38
+ "content_domain": "University guidebook",
39
+ "Comment": ""
40
+ },
41
+ {
42
+ "id": "3",
43
+ "doc_id": [
44
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
45
+ ],
46
+ "file_type": "document pdf",
47
+ "question": "What is the total number of days of Recess week and Vacation days at NTU in 2018?",
48
+ "question_type": "fact",
49
+ "evidence_type": "text",
50
+ "answer": "No Answer",
51
+ "content_domain": "University guidebook",
52
+ "Comment": ""
53
+ },
54
+ {
55
+ "id": "4",
56
+ "doc_id": [
57
+ "TinyBench/videommmu_paper.pdf"
58
+ ],
59
+ "file_type": "paper pdf",
60
+ "question": "What is TeleMM's score on the MMMU(val) on the leaderboard?",
61
+ "question_type": "fact",
62
+ "evidence_type": "text",
63
+ "answer": "61.40%",
64
+ "content_domain": "Academic paper",
65
+ "Comment": ""
66
+ }
67
+ ]
Fullset/qa_v0.json ADDED
@@ -0,0 +1,1770 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "doc_id": [
5
+ "TinyBench/videommmu_paper.pdf"
6
+ ],
7
+ "file_type": "paper pdf",
8
+ "question": "What are novelties of Video-MMMU dataset",
9
+ "question_type": "summary",
10
+ "evidence_type": "text",
11
+ "answer": "1) Knowledge-Intensive Video Collection: The dataset includes 300 expert-level videos across 6 professional disciplines, covering 30 subjects. 2) Knowledge Acquisition-Based QA Design: Each video contains three QA pairs corresponding to the stages of knowledge acquisition—Perception (extracting key information), Comprehension (grasping concepts), and Adaptation (applying knowledge to new contexts). 3) Quantitative Knowledge Assessment: they introduce a delta knowledge metric to measure performance gains on practice exam questions after watching the videos, enabling quantitative evaluation of LMMs' ability to learn and apply new knowledge.",
12
+ "content_domain": "Academic paper",
13
+ "Comment": ""
14
+ },
15
+ {
16
+ "id": "1",
17
+ "doc_id": [
18
+ "TinyBench/videommmu_paper.pdf"
19
+ ],
20
+ "file_type": "paper pdf",
21
+ "question": "How QA pairs are categorized in Video-MMMU?",
22
+ "question_type": "summary",
23
+ "evidence_type": "text",
24
+ "answer": "Perception Questions assess the ability to extract information from videos via: 1) Optical Character Recognition (OCR) and 2) Automatic Speech Recognition (ASR). Comprehension Questions evaluate understanding through: 1) Concept Comprehension (CC) and 2) Problem-Solving Strategy Comprehension (PSC). Adaptation Questions test the ability to apply knowledge to new scenarios via: 1) Case Study Analysis (CSA) and 2) Problem-Solving Strategy Adaptation (PSA).",
25
+ "content_domain": "Academic paper",
26
+ "Comment": ""
27
+ },
28
+ {
29
+ "id": "2",
30
+ "doc_id": [
31
+ "TinyBench/videommmu_paper.pdf"
32
+ ],
33
+ "file_type": "paper pdf",
34
+ "question": "Are the QAs in Video-MMMU fully annotated by human?",
35
+ "question_type": "fact",
36
+ "evidence_type": "text",
37
+ "answer": "No. Perception and Comprehension questions are manually created. For Adaptation, questions in Science, Engineering, Medicine, and Business are sourced from MMMU/MMMU-Pro, while Art and Humanities remain manual.",
38
+ "content_domain": "Academic paper",
39
+ "Comment": ""
40
+ },
41
+ {
42
+ "id": "3",
43
+ "doc_id": [
44
+ "TinyBench/videommmu_paper.pdf"
45
+ ],
46
+ "file_type": "paper pdf",
47
+ "question": "Does Video-MMMU has the longest video length among all benchmarks",
48
+ "question_type": "reasoning",
49
+ "evidence_type": "table",
50
+ "answer": "No. 506.2s. The benchmark with longest video length is Video-MME, which is 1017.9s.",
51
+ "content_domain": "Academic paper",
52
+ "Comment": ""
53
+ },
54
+ {
55
+ "id": "4",
56
+ "doc_id": [
57
+ "TinyBench/videommmu_paper.pdf"
58
+ ],
59
+ "file_type": "paper pdf",
60
+ "question": "Does Claude-3.5-Sonnet achieves the highest delta score on Video-MMMU?",
61
+ "question_type": "reasoning",
62
+ "evidence_type": "table",
63
+ "answer": "No. 11.4. smaller than GPT-4o's 15.6%.",
64
+ "content_domain": "Academic paper",
65
+ "Comment": ""
66
+ },
67
+ {
68
+ "id": "5",
69
+ "doc_id": [
70
+ "TinyBench/videommmu_paper.pdf"
71
+ ],
72
+ "file_type": "paper pdf",
73
+ "question": "According to Figure 5 in Video-MMMU, does Claude-3.5-Sonnet achieves the highest delta score on Video-MMMU?",
74
+ "question_type": "reasoning",
75
+ "evidence_type": "table",
76
+ "answer": "No. 11.4. smaller than GPT-4o's 15.6%.",
77
+ "content_domain": "Academic paper",
78
+ "Comment": ""
79
+ },
80
+ {
81
+ "id": "6",
82
+ "doc_id": [
83
+ "TinyBench/videommmu_paper.pdf"
84
+ ],
85
+ "file_type": "paper pdf",
86
+ "question": "How delta knowlegde is calculated? What is the formula?",
87
+ "question_type": "fact",
88
+ "evidence_type": "formula",
89
+ "answer": "Δ_knowledge = \\frac{Acc_{post} - Acc_{pre}}{100\\% - Acc_{pre}} \\times 100\\% \\quad \\text{where } Acc_{pre} \\text{ and } Acc_{post} \\text{ represent the accuracy before and after watching the video, respectively.}",
90
+ "content_domain": "Academic paper",
91
+ "Comment": ""
92
+ },
93
+ {
94
+ "id": "7",
95
+ "doc_id": [
96
+ "TinyBench/videommmu_paper.pdf"
97
+ ],
98
+ "file_type": "paper pdf",
99
+ "question": "What is the SOTA model performance on Video-MMMU dataset",
100
+ "question_type": "reasoning",
101
+ "evidence_type": "table",
102
+ "answer": "65.78%, Claude-3.5-Sonnet",
103
+ "content_domain": "Academic paper",
104
+ "Comment": ""
105
+ },
106
+ {
107
+ "id": "8",
108
+ "doc_id": [
109
+ "TinyBench/videommmu_paper.pdf"
110
+ ],
111
+ "file_type": "paper pdf",
112
+ "question": "What is GPT-4o's Overall score on video-mmmu?",
113
+ "question_type": "fact",
114
+ "evidence_type": "table",
115
+ "answer": "61.22",
116
+ "content_domain": "Academic paper",
117
+ "Comment": ""
118
+ },
119
+ {
120
+ "id": "9",
121
+ "doc_id": [
122
+ "TinyBench/videommmu_paper.pdf"
123
+ ],
124
+ "file_type": "paper pdf",
125
+ "question": "How much error in video-mmmu are perception error?",
126
+ "question_type": "fact",
127
+ "evidence_type": "text",
128
+ "answer": "15%",
129
+ "content_domain": "Academic paper",
130
+ "Comment": ""
131
+ },
132
+ {
133
+ "id": "10",
134
+ "doc_id": [
135
+ "TinyBench/videommmu_paper.pdf"
136
+ ],
137
+ "file_type": "paper pdf",
138
+ "question": "Explain the error analysis in video-mmmu?",
139
+ "question_type": "summary",
140
+ "evidence_type": "text",
141
+ "answer": "Method Selection Error (8%): The model chooses the wrong approach, failing to apply the correct strategy demonstrated in the video. Method Adaptation Error (64%): The model recalls and understands the video-taught method but struggles to adapt it to new scenarios. For example, it correctly applies DFS in a simple tree but fails in a complex graph with cycles, highlighting its difficulty in transferring learned methods across contexts. Question Misreading Error (15%): The model misinterprets question details, such as numerical values or conditions, unrelated to its knowledge application. Other Errors: Includes Refuse to Answer (4%), where the model expresses uncertainty; Annotation Error (4%), due to inaccurate labeling; and Answer Extraction Error (5%), where answers fail to be extracted from longer responses.",
142
+ "content_domain": "Academic paper",
143
+ "Comment": ""
144
+ },
145
+ {
146
+ "id": "11",
147
+ "doc_id": [
148
+ "TinyBench/videommmu_paper.pdf"
149
+ ],
150
+ "file_type": "paper pdf",
151
+ "question": "How much video-mmmu error is Refuse to answer error?",
152
+ "question_type": "fact",
153
+ "evidence_type": "text",
154
+ "answer": "4%",
155
+ "content_domain": "Academic paper",
156
+ "Comment": ""
157
+ },
158
+ {
159
+ "id": "12",
160
+ "doc_id": [
161
+ "TinyBench/videommmu_paper.pdf"
162
+ ],
163
+ "file_type": "paper pdf",
164
+ "question": "How much video-mmmu error is Annotation error?",
165
+ "question_type": "fact",
166
+ "evidence_type": "text",
167
+ "answer": "4%",
168
+ "content_domain": "Academic paper",
169
+ "Comment": ""
170
+ },
171
+ {
172
+ "id": "13",
173
+ "doc_id": [
174
+ "TinyBench/videommmu_paper.pdf"
175
+ ],
176
+ "file_type": "paper pdf",
177
+ "question": "How much video-mmmu error is Answer Extraction error?",
178
+ "question_type": "fact",
179
+ "evidence_type": "text",
180
+ "answer": "5%",
181
+ "content_domain": "Academic paper",
182
+ "Comment": ""
183
+ },
184
+ {
185
+ "id": "14",
186
+ "doc_id": [
187
+ "TinyBench/videommmu_paper.pdf"
188
+ ],
189
+ "file_type": "paper pdf",
190
+ "question": "What are the findings of the knowledge acquisition experiment?",
191
+ "question_type": "summary",
192
+ "evidence_type": "text",
193
+ "answer": "The **Knowledge Acquisition** analysis reveals a **Δ_knowledge gap** of 33.1% for humans vs. 15.6% for top models (GPT-4o), with some models even declining post-video. **Wrong-to-Right Rate** shows moderate gains for models (e.g., Gemini-1.5-Pro: 29.5%), but **Right-to-Wrong Rate** is high (e.g., LongVA: 55%), indicating models struggle to retain prior knowledge while integrating new information. In contrast, humans achieve a higher **Wrong-to-Right Rate** (40.4%) and lower **Right-to-Wrong Rate** (10.7%), demonstrating superior learning stability.",
194
+ "content_domain": "Academic paper",
195
+ "Comment": ""
196
+ },
197
+ {
198
+ "id": "15",
199
+ "doc_id": [
200
+ "TinyBench/videommmu_paper.pdf"
201
+ ],
202
+ "file_type": "paper pdf",
203
+ "question": "What is Aria's Overall score with transcript on video-mmmu?",
204
+ "question_type": "fact",
205
+ "evidence_type": "figure",
206
+ "answer": "53.67%",
207
+ "content_domain": "Academic paper",
208
+ "Comment": ""
209
+ },
210
+ {
211
+ "id": "16",
212
+ "doc_id": [
213
+ "TinyBench/videommmu_paper.pdf"
214
+ ],
215
+ "file_type": "paper pdf",
216
+ "question": "Explain the example of DFS to help me understand the method adaptation error.",
217
+ "question_type": "summary",
218
+ "evidence_type": "figure",
219
+ "answer": "The video teaches DFS principles, but the adaptation question applies them to a complex graph with cycles. Before the video, both Claude and Humans misfocused on cycles. Afterward, both grasped the core principle, but Claude failed to adapt it correctly, while Humans successfully applied it. This highlights the difficulty of method adaptation in new scenarios.",
220
+ "content_domain": "Academic paper",
221
+ "Comment": ""
222
+ },
223
+ {
224
+ "id": "17",
225
+ "doc_id": [
226
+ "TinyBench/videommmu_paper.pdf"
227
+ ],
228
+ "file_type": "paper pdf",
229
+ "question": "How many video-mmmu Questions are ASR?",
230
+ "question_type": "fact",
231
+ "evidence_type": "figure",
232
+ "answer": "23",
233
+ "content_domain": "Academic paper",
234
+ "Comment": ""
235
+ },
236
+ {
237
+ "id": "18",
238
+ "doc_id": [
239
+ "TinyBench/videommmu_paper.pdf"
240
+ ],
241
+ "file_type": "paper pdf",
242
+ "question": "What is Question Misreading error? Can you explain with an example case of GPT-4o?",
243
+ "question_type": "summary",
244
+ "evidence_type": "figure",
245
+ "answer": "The video explains how to determine the work function (∅) in Photoelectric Effect Graphs by identifying the y-intercept, eliminating the need for formulas. Before the video: • Both humans and the model relied on formulas, leading to incorrect answers. After the video: • The model correctly recognized the y-intercept method but misread the graph, identifying -2.0 instead of -1.5 due to a mistaken x-intercept assumption. • Humans accurately identified the y-intercept and found the correct answer (1.5). This case illustrates a Question Misreading Error by GPT-4o, where it applied the right method but misinterpreted the graph.",
246
+ "content_domain": "Academic paper",
247
+ "Comment": ""
248
+ },
249
+ {
250
+ "id": "19",
251
+ "doc_id": [
252
+ "TinyBench/videommmu_paper.pdf"
253
+ ],
254
+ "file_type": "paper pdf",
255
+ "question": "Can you explain the wrong-to-right example of the video lecture '23 tree' in Video-MMMU?",
256
+ "question_type": "summary",
257
+ "evidence_type": "figure",
258
+ "answer": "This case shows the model learning from a 2-3 tree lecture to correct its misunderstanding of insertion and reorganization. The video explains node insertion cases and restructuring rules, which the adaptation question tests. Before the video, the model: • Misjudged insertion effects on the root • Misunderstood reorganization rules • Incorrectly identified only S4 as true After the video, the model: • Recognized node splits and reorganization • Applied Case 2 principles correctly • Identified S1 and S4 as true This demonstrates successful knowledge acquisition, as the model corrected its understanding and applied the learned principles accurately.",
259
+ "content_domain": "Academic paper",
260
+ "Comment": ""
261
+ },
262
+ {
263
+ "id": "20",
264
+ "doc_id": [
265
+ "TinyBench/videommmu_paper.pdf"
266
+ ],
267
+ "file_type": "paper pdf",
268
+ "question": "What is the number of Video-MMMU questions where audio might help?",
269
+ "question_type": "reasoning",
270
+ "evidence_type": "figure",
271
+ "answer": "Art: 16, Business: 28, Medicine: 32, Science: 23, Humanities: 26, Engineering: 29. Total = 154.",
272
+ "content_domain": "Academic paper",
273
+ "Comment": ""
274
+ },
275
+ {
276
+ "id": "21",
277
+ "doc_id": [
278
+ "TinyBench/videommmu_paper.pdf"
279
+ ],
280
+ "file_type": "paper pdf",
281
+ "question": "How many questions are in the Comprehension track of Video-MMMU?",
282
+ "question_type": "reasoning",
283
+ "evidence_type": "figure",
284
+ "answer": "300",
285
+ "content_domain": "Academic paper",
286
+ "Comment": ""
287
+ },
288
+ {
289
+ "id": "22",
290
+ "doc_id": [
291
+ "TinyBench/videommmu_paper.pdf"
292
+ ],
293
+ "file_type": "paper pdf",
294
+ "question": "How many questions are in the Art discipline of Video-MMMU?",
295
+ "question_type": "reasoning",
296
+ "evidence_type": "figure + Multi-page",
297
+ "answer": "Video: 7% * 300 = 21. Each video has 3 questions from 3 tracks. So 21 * 3 = 63.",
298
+ "content_domain": "Academic paper",
299
+ "Comment": ""
300
+ },
301
+ {
302
+ "id": "23",
303
+ "doc_id": [
304
+ "TinyBench/videommmu_paper.pdf"
305
+ ],
306
+ "file_type": "paper pdf",
307
+ "question": "What is the performance gap between Human and model in the main experiment of Video-MMMU?",
308
+ "question_type": "resaoning",
309
+ "evidence_type": "table",
310
+ "answer": "74.44-65.78=8.66",
311
+ "content_domain": "Academic paper",
312
+ "Comment": ""
313
+ },
314
+ {
315
+ "id": "24",
316
+ "doc_id": [
317
+ "TinyBench/new_Art_Theory_5.mp4"
318
+ ],
319
+ "file_type": "video tutorial",
320
+ "question": "What are differences between Rococo and Baroque Art?",
321
+ "question_type": "ocr + fact",
322
+ "evidence_type": "text",
323
+ "answer": "abandonment of symmetry; embraced graceful lines and curves; flowers as ornamentation; Chinese & Japanese motifs; warm, pastel colours",
324
+ "content_domain": "short_tutorial_video",
325
+ "Comment": ""
326
+ },
327
+ {
328
+ "id": "25",
329
+ "doc_id": [
330
+ "TinyBench/new_Psychology_5.mp4"
331
+ ],
332
+ "file_type": "video tutorial",
333
+ "question": "What are the 3 advantages of naturalistic observation?",
334
+ "question_type": "ocr + fact",
335
+ "evidence_type": "text",
336
+ "answer": "High ecological validity. If undisclosed, there are no demand characteristics. Reflects real-life situations.",
337
+ "content_domain": "short_tutorial_video",
338
+ "Comment": ""
339
+ },
340
+ {
341
+ "id": "26",
342
+ "doc_id": [
343
+ "TinyBench/validation_Mechanical_Engineering_18.mp4"
344
+ ],
345
+ "file_type": "video tutorial",
346
+ "question": "I remember I have a video about Rigid body. What is the angular acceleration for Question 1?",
347
+ "question_type": "ocr + fact",
348
+ "evidence_type": "text",
349
+ "answer": "14.71rad/s2",
350
+ "content_domain": "short_tutorial_video",
351
+ "Comment": ""
352
+ },
353
+ {
354
+ "id": "27",
355
+ "doc_id": [
356
+ "TinyBench/validation_Mechanical_Engineering_18.mp4"
357
+ ],
358
+ "file_type": "video tutorial",
359
+ "question": "I remember I have a video about Rigid body. I do not understand problem 1 in the video. Can you explain problem 1 in detail for me?",
360
+ "question_type": "ocr + summary",
361
+ "evidence_type": "text",
362
+ "answer": "The problem involves a uniform 24-kg plate released from rest, requiring calculations for its initial angular acceleration and the reactions at pin A. ### Breakdown: 1. **Setup:** - The plate is initially at rest with known geometry. - Point A is the pinned location where reactions are determined. 2. **Diagrams:** - **Free Body Diagram:** Includes gravitational force (24 kg g) and support reactions. - **Kinetic Diagram:** Shows acceleration effects due to applied forces. 3. **Equations & Calculations:** - **Angular Acceleration:** Derived using equations of motion (moment of inertia and external forces). - **Reactions at Pin A:** Found by resolving forces and applying equilibrium conditions (∑F = 0, ∑M = I alpha). 4. **Execution:** - Apply rigid body dynamics equations (e.g., \\( \\sum M = I \\cdot \\alpha \\)). - Solve for unknowns using given values. 5. **Conclusion:** - Compute initial angular acceleration. - Determine horizontal and vertical reaction forces at pin A.",
363
+ "content_domain": "short_tutorial_video",
364
+ "Comment": ""
365
+ },
366
+ {
367
+ "id": "28",
368
+ "doc_id": [
369
+ "TinyBench/validation_Mechanical_Engineering_18.mp4"
370
+ ],
371
+ "file_type": "video tutorial",
372
+ "question": "What is Case 3 of Insertion in 2-3 Tree? Can you explain it?",
373
+ "question_type": "ocr + summary",
374
+ "evidence_type": "text",
375
+ "answer": "Insert in a node with two data elements whose parents also contain two data elements.",
376
+ "content_domain": "short_tutorial_video",
377
+ "Comment": ""
378
+ },
379
+ {
380
+ "id": "29",
381
+ "doc_id": [
382
+ "TinyBench/validation_Mechanical_Engineering_18.mp4"
383
+ ],
384
+ "file_type": "video tutorial",
385
+ "question": "What are the three cases of Insertion in 2-3 Tree?",
386
+ "question_type": "ocr + summary",
387
+ "evidence_type": "text",
388
+ "answer": "Insert in a node with 1 element. Insert in a node with two data elements whose parents contain only one element. Insert in a node with two data elements whose parents also contain two data elements.",
389
+ "content_domain": "short_tutorial_video",
390
+ "Comment": ""
391
+ },
392
+ {
393
+ "id": "30",
394
+ "doc_id": [
395
+ "TinyBench/new_Computer_Science_1.mp4"
396
+ ],
397
+ "file_type": "video tutorial",
398
+ "question": "What is the answer to the In Video Quiz in my Andrew Ng's lecture?",
399
+ "question_type": "ocr + fact",
400
+ "evidence_type": "text",
401
+ "answer": "a_2^{[3]} = g \\left( \\mathbf{w}_2^{[3]} \\cdot \\mathbf{a}^{[2]} + b_2^{[3]} \\right)",
402
+ "content_domain": "short_tutorial_video",
403
+ "Comment": "Formula"
404
+ },
405
+ {
406
+ "id": "31",
407
+ "doc_id": [
408
+ "TinyBench/videommmu_paper.pdf"
409
+ ],
410
+ "file_type": "paper pdf",
411
+ "question": "Which open-source model performs best on the Adaptation track and how does it compare to the worst-performing open-source model?",
412
+ "question_type": "reasoning",
413
+ "evidence_type": "table + Multi-row",
414
+ "answer": "LLaVA-Video-72B performs the best among open-source models on the Adaptation track with an accuracy of 43.33%. The worst-performing open-source model is InternVL2-8B with an accuracy of 31.67%. The difference in performance is 11.66 percentage points.",
415
+ "content_domain": "Academic paper",
416
+ "Comment": ""
417
+ },
418
+ {
419
+ "id": "32",
420
+ "doc_id": [
421
+ "TinyBench/videommmu_paper.pdf"
422
+ ],
423
+ "file_type": "paper pdf",
424
+ "question": "Which discipline shows the largest performance gap between Human Experts and Claude-3.5-Sonnet?",
425
+ "question_type": "reasoning",
426
+ "evidence_type": "table + Multi-row",
427
+ "answer": "The largest performance gap is in the Medicine discipline: Human Experts scored 70.54%, while Claude-3.5-Sonnet scored 58.14%, resulting in a 12.4 percentage point gap.",
428
+ "content_domain": "Academic paper",
429
+ "Comment": ""
430
+ },
431
+ {
432
+ "id": "33",
433
+ "doc_id": [
434
+ "TinyBench/videommmu_paper.pdf"
435
+ ],
436
+ "file_type": "paper pdf",
437
+ "question": "Which proprietary model achieves the highest Perception score, and how much higher is it than the top open-source model?",
438
+ "question_type": "reasoning",
439
+ "evidence_type": "table + Multi-row",
440
+ "answer": "Claude-3.5-Sonnet achieves the highest Perception score among proprietary models with 72.00%. The top open-source model in Perception is Aria with 65.67%. Claude outperforms Aria by 6.33 percentage points.",
441
+ "content_domain": "Academic paper",
442
+ "Comment": ""
443
+ },
444
+ {
445
+ "id": "34",
446
+ "doc_id": [
447
+ "TinyBench/videommmu_paper.pdf"
448
+ ],
449
+ "file_type": "paper pdf",
450
+ "question": "Which model demonstrates the highest Wrong-to-Right Rate and what is its corresponding Delta Knowledge value?",
451
+ "question_type": "reasoning",
452
+ "evidence_type": "table + Multi-row",
453
+ "answer": "Claude-3.5-Sonnet has a Wrong-to-Right Rate of 28.8% and a ∆knowledge value of 11.4%.",
454
+ "content_domain": "Academic paper",
455
+ "Comment": ""
456
+ },
457
+ {
458
+ "id": "35",
459
+ "doc_id": [
460
+ "TinyBench/videommmu_paper.pdf"
461
+ ],
462
+ "file_type": "paper pdf",
463
+ "question": "Which model shows a negative Delta Knowledge score and also has the highest Right-to-Wrong Rate?",
464
+ "question_type": "reasoning",
465
+ "evidence_type": "table + Multi-row",
466
+ "answer": "InternVL2-8B has the lowest ∆knowledge score of -8.5% and the highest Right-to-Wrong Rate at 55.0%.",
467
+ "content_domain": "Academic paper",
468
+ "Comment": ""
469
+ },
470
+ {
471
+ "id": "36",
472
+ "doc_id": [
473
+ "TinyBench/videommmu_paper.pdf"
474
+ ],
475
+ "file_type": "paper pdf",
476
+ "question": "Are all questions in the three tracks newly created by annotators?",
477
+ "question_type": "fact",
478
+ "evidence_type": "text + Multi-row",
479
+ "answer": "No. Perception and Comprehension questions are newly created by annotators, but Adaptation questions are sourced from MMMU and MMMU-Pro for STEM/business fields.",
480
+ "content_domain": "Academic paper",
481
+ "Comment": ""
482
+ },
483
+ {
484
+ "id": "37",
485
+ "doc_id": [
486
+ "TinyBench/videommmu_paper.pdf"
487
+ ],
488
+ "file_type": "paper pdf",
489
+ "question": "What are the formulas for Wrong-to-Right Rate and Right-to-Wrong Rate?",
490
+ "question_type": "fact",
491
+ "evidence_type": "formula + Multi-chunk",
492
+ "answer": "Wrong-to-Right Rate = (N_Wrong-to-Right / N_Wrong-before) × 100%. Right-to-Wrong Rate = (N_Right-to-Wrong / N_Right-before) × 100%. These are defined in two separate paragraphs when discussing model response changes after watching videos.",
493
+ "content_domain": "Academic paper",
494
+ "Comment": ""
495
+ },
496
+ {
497
+ "id": "38",
498
+ "doc_id": [
499
+ "TinyBench/videommmu_paper.pdf"
500
+ ],
501
+ "file_type": "paper pdf",
502
+ "question": "What are the three main types of errors observed in Claude-3.5-Sonnet on the Adaptation track and their respective percentages?",
503
+ "question_type": "fact",
504
+ "evidence_type": "text + Multi-chunk",
505
+ "answer": "The three main types of errors are: Method Adaptation Error (64%), Question Misreading (15%), and Method Selection Error (8%). This is summarized in Figure 7 and explained in detail in adjacent paragraphs.",
506
+ "content_domain": "Academic paper",
507
+ "Comment": ""
508
+ },
509
+ {
510
+ "id": "39",
511
+ "doc_id": [
512
+ "TinyBench/videommmu_paper.pdf"
513
+ ],
514
+ "file_type": "paper pdf",
515
+ "question": "How are Adaptation track questions evaluated and what inputs are provided to models?",
516
+ "question_type": "fact",
517
+ "evidence_type": "text + Multi-chunk",
518
+ "answer": "The input includes the full video and a final frame with the question's image appended at the end. A special prompt is added indicating this setup, as shown in Figure 8 and the paragraph explaining inputs in Section 4.1. Prompt: System Message: As an AI assistant, you should watch and learn from the video. Then, adapt what you learned to answer the following question. The image for this question is at the end of the video. Question: [Question Text] Options: A) [Option A] B) [Option B] [etc.]",
519
+ "content_domain": "Academic paper",
520
+ "Comment": ""
521
+ },
522
+ {
523
+ "id": "40",
524
+ "doc_id": [
525
+ "TinyBench/videommmu_paper.pdf"
526
+ ],
527
+ "file_type": "paper pdf",
528
+ "question": "What are the two video types in Video-MMMU and how do they differ?",
529
+ "question_type": "fact",
530
+ "evidence_type": "text + Multi-chunk",
531
+ "answer": "Video-MMMU includes two types of videos: Concept-Introduction and Problem-Solving. Concept videos focus on explaining theories and facts, while Problem-Solving videos demonstrate step-by-step solutions. This is explained in the text and visually illustrated in Figure 2.",
532
+ "content_domain": "Academic paper",
533
+ "Comment": ""
534
+ },
535
+ {
536
+ "id": "41",
537
+ "doc_id": [
538
+ "TinyBench/videommmu_paper.pdf"
539
+ ],
540
+ "file_type": "paper pdf",
541
+ "question": "What are the three cognitive tracks in Video-MMMU and how are they visually illustrated?",
542
+ "question_type": "fact",
543
+ "evidence_type": "text + Multi-page",
544
+ "answer": "The three tracks are Perception (extracting key information), Comprehension (understanding underlying concepts), and Adaptation (applying knowledge to new problems). They are introduced on page 1 and visually illustrated in Figure 1 on page 3, where each stage is associated with a different model behavior.",
545
+ "content_domain": "Academic paper",
546
+ "Comment": ""
547
+ },
548
+ {
549
+ "id": "42",
550
+ "doc_id": [
551
+ "TinyBench/videommmu_paper.pdf"
552
+ ],
553
+ "file_type": "paper pdf",
554
+ "question": "Which model shows the highest deltaknowledge score and what does this metric represent?",
555
+ "question_type": "reasoning",
556
+ "evidence_type": "table + Multi-page",
557
+ "answer": "GPT-4o shows the highest ∆knowledge score among models with 15.6%. The ∆knowledge metric represents the normalized performance improvement in the Adaptation track after watching a video, as defined on page 7.",
558
+ "content_domain": "Academic paper",
559
+ "Comment": ""
560
+ },
561
+ {
562
+ "id": "43",
563
+ "doc_id": [
564
+ "TinyBench/videommmu_paper.pdf"
565
+ ],
566
+ "file_type": "paper pdf",
567
+ "question": "What does a Method Adaptation Error look like in the Adaptation track, and how common is it, and what example is illustrated in which figure?",
568
+ "question_type": "reasoning",
569
+ "evidence_type": "text + Multi-page",
570
+ "answer": "A Method Adaptation Error occurs when a model recalls the correct method from the video but fails to apply it to a new scenario. Figure 6 shows an example with DFS intervals, while page 8 states that Method Adaptation accounts for 64% of Claude-3.5-Sonnet’s errors.",
571
+ "content_domain": "Academic paper",
572
+ "Comment": ""
573
+ },
574
+ {
575
+ "id": "44",
576
+ "doc_id": [
577
+ "TinyBench/videommmu_paper.pdf"
578
+ ],
579
+ "file_type": "paper pdf",
580
+ "question": "What is the difference between Concept Comprehension and Problem-solving Strategy Comprehension in the Comprehension track?",
581
+ "question_type": "reasoning",
582
+ "evidence_type": "text + Multi-chunk",
583
+ "answer": "Page 3 introduces the taxonomy where Concept Comprehension (CC) evaluates understanding of statements, often using multiple-answer formats, while Problem-solving Strategy Comprehension (PSC) changes inputs in example questions to test generalization.",
584
+ "content_domain": "Academic paper",
585
+ "Comment": ""
586
+ },
587
+ {
588
+ "id": "45",
589
+ "doc_id": [
590
+ "TinyBench/videommmu_paper.pdf"
591
+ ],
592
+ "file_type": "paper pdf",
593
+ "question": "Give me an example of a question that is a good fit for the Problem-solving Strategy Comprehension track.",
594
+ "question_type": "reasoning",
595
+ "evidence_type": "figure + Multi-row",
596
+ "answer": "An example question is: 'In the video, Example Question (1) is solved with an angle θ=25 degrees. If the angle θ is adjusted to 30 degrees while all other conditions remain unchanged, what will be the updated result for Example Question (1) as explained in the video?' This question appears in the Science domain and tests whether the model comprehends and can apply the same problem-solving strategy demonstrated in the video.",
597
+ "content_domain": "Academic paper",
598
+ "Comment": ""
599
+ },
600
+ {
601
+ "id": "46",
602
+ "doc_id": [
603
+ "TinyBench/videommmu_paper.pdf"
604
+ ],
605
+ "file_type": "paper pdf",
606
+ "question": "How many proprietary and open-source models are evaluated in Video-MMMU?",
607
+ "question_type": "fact",
608
+ "evidence_type": "text + Multi-row",
609
+ "answer": "Video-MMMU evaluates 4 proprietary LMMs (Gemini 1.5 Flash, Gemini 1.5 Pro, GPT-4o, Claude-3.5-Sonnet) and 11 open-source LMMs (VILA1.5-8B, LongVA-7B, Llama-3.2-11B, LLaVA-OneVision-7B, VILA1.5-40B, LLaVA-Video-7B, InternVL2-8B, MAmmoTH-VL-8B, LLaVA-OneVision-72B, LLaVA-Video-72B, Aria).",
610
+ "content_domain": "Academic paper",
611
+ "Comment": ""
612
+ },
613
+ {
614
+ "id": "47",
615
+ "doc_id": [
616
+ "TinyBench/0-Overview.pdf"
617
+ ],
618
+ "file_type": "ppt",
619
+ "question": "What is the deadline for the course project of SC4001?",
620
+ "question_type": "fact",
621
+ "evidence_type": "text",
622
+ "answer": "Nov 15",
623
+ "content_domain": "Course Material",
624
+ "Comment": ""
625
+ },
626
+ {
627
+ "id": "48",
628
+ "doc_id": [
629
+ "TinyBench/0-Overview.pdf"
630
+ ],
631
+ "file_type": "ppt",
632
+ "question": "What are the course hours of SC4001?",
633
+ "question_type": "fact",
634
+ "evidence_type": "text",
635
+ "answer": "Lectures: Friday from 12:30 pm to 2:20 pm (at LT2A). Tutorials: Wednesday from 5:30 pm to 6:20 pm (at LT2A). Tutorials start from the 3rd week (28 Aug). Note that the lecture in Week 6 (20 Sep) is at LT1.",
636
+ "content_domain": "Course Material",
637
+ "Comment": ""
638
+ },
639
+ {
640
+ "id": "49",
641
+ "doc_id": [
642
+ "TinyBench/0-Overview.pdf"
643
+ ],
644
+ "file_type": "ppt",
645
+ "question": "For the SC4001 course, what is the bonus for tutorial participation if my participation is 70-100%?",
646
+ "question_type": "fact",
647
+ "evidence_type": "table",
648
+ "answer": "5",
649
+ "content_domain": "Course Material",
650
+ "Comment": ""
651
+ },
652
+ {
653
+ "id": "50",
654
+ "doc_id": [
655
+ "TinyBench/0-Overview.pdf"
656
+ ],
657
+ "file_type": "ppt",
658
+ "question": "For the SC4001 course, what is the bonus for tutorial participation if my participation is 60%?",
659
+ "question_type": "reasoning",
660
+ "evidence_type": "table",
661
+ "answer": "2",
662
+ "content_domain": "Course Material",
663
+ "Comment": "60% is not directly given on the table"
664
+ },
665
+ {
666
+ "id": "51",
667
+ "doc_id": [
668
+ "TinyBench/0-Overview.pdf"
669
+ ],
670
+ "file_type": "ppt",
671
+ "question": "When does CNN & LeNet emerge?",
672
+ "question_type": "fact",
673
+ "evidence_type": "figure",
674
+ "answer": "1998",
675
+ "content_domain": "Course Material",
676
+ "Comment": ""
677
+ },
678
+ {
679
+ "id": "52",
680
+ "doc_id": [
681
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
682
+ ],
683
+ "file_type": "pdf report",
684
+ "question": "I am an undergraduate student. Which email should I contact for on-campus housing matters?",
685
+ "question_type": "fact",
686
+ "evidence_type": "table",
687
+ "answer": "has-ug@ntu.edu.sg",
688
+ "content_domain": "Guidebook",
689
+ "Comment": ""
690
+ },
691
+ {
692
+ "id": "53",
693
+ "doc_id": [
694
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
695
+ ],
696
+ "file_type": "pdf report",
697
+ "question": "What is the dental telephone number for Fullerton Healthcare at NTU, and what are the operating hours?",
698
+ "question_type": "fact",
699
+ "evidence_type": "table",
700
+ "answer": "(65) 6790 8331, Operating Hours: Monday to Friday 8.30 am to 9.00 pm (last registration at 8.30 pm). Saturday: 9.30 am to 12.00 noon. Sunday and Public Holidays: Closed",
701
+ "content_domain": "Guidebook",
702
+ "Comment": ""
703
+ },
704
+ {
705
+ "id": "54",
706
+ "doc_id": [
707
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
708
+ ],
709
+ "file_type": "pdf report",
710
+ "question": "How do I get to NTU by bus from Boon Lay station?",
711
+ "question_type": "fact",
712
+ "evidence_type": "text",
713
+ "answer": "Bus 179 and 199.",
714
+ "content_domain": "Guidebook",
715
+ "Comment": ""
716
+ },
717
+ {
718
+ "id": "55",
719
+ "doc_id": [
720
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
721
+ ],
722
+ "file_type": "pdf report",
723
+ "question": "I want to report medical emergencies after office hours. Who should I call and what is the number?",
724
+ "question_type": "fact",
725
+ "evidence_type": "table",
726
+ "answer": "NTU Campus Security, 6790 5200.",
727
+ "content_domain": "Guidebook",
728
+ "Comment": ""
729
+ },
730
+ {
731
+ "id": "56",
732
+ "doc_id": [
733
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
734
+ ],
735
+ "file_type": "pdf report",
736
+ "question": "When is the final week of teaching in Semester 2 of the 2016-2017 academic year?",
737
+ "question_type": "reasoning",
738
+ "evidence_type": "table",
739
+ "answer": "10-Apr-17 to 14-Apr-17.",
740
+ "content_domain": "Guidebook",
741
+ "Comment": ""
742
+ },
743
+ {
744
+ "id": "57",
745
+ "doc_id": [
746
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
747
+ ],
748
+ "file_type": "pdf report",
749
+ "question": "Today is March 30, 2017. How many days do I have left before the revision and examination period starts?",
750
+ "question_type": "reasoning",
751
+ "evidence_type": "table",
752
+ "answer": "18 days.",
753
+ "content_domain": "Guidebook",
754
+ "Comment": ""
755
+ },
756
+ {
757
+ "id": "58",
758
+ "doc_id": [
759
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
760
+ ],
761
+ "file_type": "pdf report",
762
+ "question": "I am in Semester 2, 2017. How many public holidays do I get, and when are they?",
763
+ "question_type": "reasoning",
764
+ "evidence_type": "multi-table + multi-page",
765
+ "answer": "4 public holidays:\n- Chinese New Year – January 28-29, 2017 (Saturday, Sunday)\n- Good Friday – April 14, 2017 (Friday)\n- Labour Day – May 1, 2017 (Monday)",
766
+ "content_domain": "Guidebook",
767
+ "Comment": ""
768
+ },
769
+ {
770
+ "id": "59",
771
+ "doc_id": [
772
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
773
+ ],
774
+ "file_type": "pdf report",
775
+ "question": "I am in Semester 2, 2017. How many public holidays do I have during the teaching weeks, and when are they?",
776
+ "question_type": "reasoning",
777
+ "evidence_type": "multi-table + multi-page",
778
+ "answer": "3 public holidays:\n- Chinese New Year – January 28-29, 2017 (Saturday, Sunday)\n- Good Friday – April 14, 2017 (Friday)",
779
+ "content_domain": "Guidebook",
780
+ "Comment": ""
781
+ },
782
+ {
783
+ "id": "60",
784
+ "doc_id": [
785
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
786
+ ],
787
+ "file_type": "pdf report",
788
+ "question": "Where can I find ATMs on the NTU campus? I need to withdraw some cash!",
789
+ "question_type": "fact",
790
+ "evidence_type": "table + Multi-row",
791
+ "answer": "You can find ATMs at the following locations on NTU campus:\nOCBC ATM\n- North Spine Plaza, Level 1 (near OCBC Bank)\n- South Spine, Level B3\n- Near Canteen 2\nPOSB ATM\n- North Spine Plaza, Level 2\n- South Spine, Level B3\n- Near Canteen 2\nState Bank of India ATM\n- North Spine Plaza, Level 2\nUOB ATM\n- North Spine Plaza, Level 2",
792
+ "content_domain": "Guidebook",
793
+ "Comment": "Multiple rows should be returned."
794
+ },
795
+ {
796
+ "id": "61",
797
+ "doc_id": [
798
+ "TinyBench/SAO-StudentSupport_Guidebook-Content.pdf"
799
+ ],
800
+ "file_type": "pdf report",
801
+ "question": "I think there's a Citibank ATM on campus. Can you confirm where it is?",
802
+ "question_type": "fact",
803
+ "evidence_type": "figure",
804
+ "answer": "There is a Citibank ATM on campus. You can find it at North Spine Plaza. It's located alongside OCBC and DBS ATMs, as seen in the image.",
805
+ "content_domain": "Guidebook",
806
+ "Comment": "OCR & Reasoning"
807
+ },
808
+ {
809
+ "id": "62",
810
+ "doc_id": [
811
+ "TinyBench/mmmu_website.pdf"
812
+ ],
813
+ "file_type": "web pdf",
814
+ "question": "What is the best model performance on the MMMU benchmark?",
815
+ "question_type": "reasoning",
816
+ "evidence_type": "table",
817
+ "answer": "o1, 78.2%",
818
+ "content_domain": "Webpage",
819
+ "Comment": ""
820
+ },
821
+ {
822
+ "id": "63",
823
+ "doc_id": [
824
+ "TinyBench/2401.18059v1.pdf"
825
+ ],
826
+ "file_type": "pdf paper",
827
+ "question": "In figure 4, which nodes are retrieved by RAPTOR for both questions?",
828
+ "question_type": "ocr + fact",
829
+ "evidence_type": "figure",
830
+ "answer": "[16, 19, 25]",
831
+ "content_domain": "Academic paper",
832
+ "Comment": ""
833
+ },
834
+ {
835
+ "id": "64",
836
+ "doc_id": [
837
+ "TinyBench/2401.18059v1.pdf"
838
+ ],
839
+ "file_type": "pdf paper",
840
+ "question": "In my paper about RAPTOR, what are the horizontal and vertical axis of Figure 3 respectively?",
841
+ "question_type": "fact",
842
+ "evidence_type": "figure",
843
+ "answer": "['Context Length', 'F1']",
844
+ "content_domain": "Academic paper",
845
+ "Comment": ""
846
+ },
847
+ {
848
+ "id": "65",
849
+ "doc_id": [
850
+ "TinyBench/2401.18059v1.pdf"
851
+ ],
852
+ "file_type": "pdf paper",
853
+ "question": "Based on \"PSEUDOCODE FOR RETRIEVAL METHODS\" in my RAPTOR paper, which algorithm has more number of lines? (Give the algorithm name)",
854
+ "question_type": "fact",
855
+ "evidence_type": "figure",
856
+ "answer": "Collapsed Tree Algorithm",
857
+ "content_domain": "Academic paper",
858
+ "Comment": ""
859
+ },
860
+ {
861
+ "id": "66",
862
+ "doc_id": [
863
+ "TinyBench/2401.18059v1.pdf"
864
+ ],
865
+ "file_type": "pdf paper",
866
+ "question": "I remember I have a paper about TREE-ORGANIZED RETRIEVAL. In Figure 1's demonstration, what are the color of the nodes that appear in more than one clusters?",
867
+ "question_type": "fact",
868
+ "evidence_type": "figure",
869
+ "answer": "['green', 'yellow']",
870
+ "content_domain": "Academic paper",
871
+ "Comment": ""
872
+ },
873
+ {
874
+ "id": "67",
875
+ "doc_id": [
876
+ "TinyBench/2401.18059v1.pdf"
877
+ ],
878
+ "file_type": "pdf paper",
879
+ "question": "In Figure 1's demonstration of RAPTOR, what are the color of the nodes that appear in all three clusters? Enumerate all of them them in a list (return an empty list if no such colors).",
880
+ "question_type": "fact",
881
+ "evidence_type": "figure",
882
+ "answer": "[]",
883
+ "content_domain": "Academic paper",
884
+ "Comment": ""
885
+ },
886
+ {
887
+ "id": "68",
888
+ "doc_id": [
889
+ "TinyBench/2401.18059v1.pdf"
890
+ ],
891
+ "file_type": "pdf paper",
892
+ "question": "What model is the clustering algorithm of the RAPTOR paper based on, and what presents a challenge to it?",
893
+ "question_type": "fact",
894
+ "evidence_type": "text",
895
+ "answer": "['Gaussian Mixture Models', 'the high dimensionality of vector embeddings']",
896
+ "content_domain": "Academic paper",
897
+ "Comment": ""
898
+ },
899
+ {
900
+ "id": "69",
901
+ "doc_id": [
902
+ "TinyBench/2401.18059v1.pdf"
903
+ ],
904
+ "file_type": "pdf paper",
905
+ "question": "Write down for me the pseudo code from appendix that corresponds to step 5 of the tree traversal method.",
906
+ "question_type": "fact",
907
+ "evidence_type": "formula",
908
+ "answer": "Slayer's sorted(top k)[:k].nodes",
909
+ "content_domain": "Academic paper",
910
+ "Comment": ""
911
+ },
912
+ {
913
+ "id": "70",
914
+ "doc_id": [
915
+ "TinyBench/PG_2021.03.04_US-Views-on-China_FINAL.pdf"
916
+ ],
917
+ "file_type": "pdf paper",
918
+ "question": "Wait, I want to know that among all valid respondents, how many rep/lean rep independents (the exact number, not the ratio) favor building a strong relationship with China on trade? Can you give your answer to the nearest hundred, such as 100, 200 or 300....",
919
+ "question_type": "reasoning",
920
+ "evidence_type": "figure",
921
+ "answer": "300",
922
+ "content_domain": "Research report / Introduction",
923
+ "Comment": ""
924
+ },
925
+ {
926
+ "id": "71",
927
+ "doc_id": [
928
+ "TinyBench/PG_2021.03.04_US-Views-on-China_FINAL.pdf"
929
+ ],
930
+ "file_type": "pdf report",
931
+ "question": "Among all valid respondents, how many hispanic independents (the exact number, not the ratio) favor building a strong relationship with China on trade? Please give your answer to the nearest hundred, such as 100, 200 or 300....",
932
+ "question_type": "hallucination",
933
+ "evidence_type": "figure",
934
+ "answer": "Not answerable",
935
+ "content_domain": "Research report / Introduction",
936
+ "Comment": ""
937
+ },
938
+ {
939
+ "id": "72",
940
+ "doc_id": [
941
+ "TinyBench/PG_2021.03.04_US-Views-on-China_FINAL.pdf"
942
+ ],
943
+ "file_type": "pdf report",
944
+ "question": "How many quotations from male respondent over 50 years old are included in this report PG_2021.03.04_US-Views-on-China_FINAL.pdf?",
945
+ "question_type": "reasoning + multi-page",
946
+ "evidence_type": "text",
947
+ "answer": "6",
948
+ "content_domain": "Research report / Introduction",
949
+ "Comment": ""
950
+ },
951
+ {
952
+ "id": "73",
953
+ "doc_id": [
954
+ "TinyBench/PG_2021.03.04_US-Views-on-China_FINAL.pdf"
955
+ ],
956
+ "file_type": "pdf report",
957
+ "question": "How many charts shown in the report about U.S.-China relationship have results stratified by respondents' gender?",
958
+ "question_type": "reasoning + multi-page",
959
+ "evidence_type": "figure",
960
+ "answer": "6",
961
+ "content_domain": "Research report / Introduction",
962
+ "Comment": ""
963
+ },
964
+ {
965
+ "id": "74",
966
+ "doc_id": [
967
+ "TinyBench/PG_2021.03.04_US-Views-on-China_FINAL.pdf"
968
+ ],
969
+ "file_type": "ppt",
970
+ "question": "Please list all countries whose power and influence respondents would like to see diminished in the results of my report on Questions Q41a-t.",
971
+ "question_type": "reasoning + multi-page",
972
+ "evidence_type": "text",
973
+ "answer": "['China', 'North Korea', 'Russia', 'Iran']",
974
+ "content_domain": "Research report / Introduction",
975
+ "Comment": ""
976
+ },
977
+ {
978
+ "id": "75",
979
+ "doc_id": [
980
+ "TinyBench/PG_2021.03.04_US-Views-on-China_FINAL.pdf"
981
+ ],
982
+ "file_type": "pdf report",
983
+ "question": "What's the absolute percent change of dem/lean dem respondents viewing China's growing military power a very serious threat from 2020 to 2021?",
984
+ "question_type": "reasoning",
985
+ "evidence_type": "figure",
986
+ "answer": "1%",
987
+ "content_domain": "Research report / Introduction",
988
+ "Comment": ""
989
+ },
990
+ {
991
+ "id": "76",
992
+ "doc_id": [
993
+ "TinyBench/PG_2021.03.04_US-Views-on-China_FINAL.pdf"
994
+ ],
995
+ "file_type": "pdf report",
996
+ "question": "What's the absolute percent change of respondents age above 65 viewing China's growing military power a very serious threat from 2020 to 2021?",
997
+ "question_type": "hallucination",
998
+ "evidence_type": "figure",
999
+ "answer": "Not answerable",
1000
+ "content_domain": "Research report / Introduction",
1001
+ "Comment": ""
1002
+ },
1003
+ {
1004
+ "id": "77",
1005
+ "doc_id": [
1006
+ "TinyBench/PG_2020.03.09_US-Germany_FINAL.pdf"
1007
+ ],
1008
+ "file_type": "pdf report",
1009
+ "question": "What's the percentage value of west Germany respondents viewing Germany's relationship with the United States is as important as its relationship with Russia?",
1010
+ "question_type": "fact",
1011
+ "evidence_type": "figure",
1012
+ "answer": "29%",
1013
+ "content_domain": "Research report / Introduction",
1014
+ "Comment": ""
1015
+ },
1016
+ {
1017
+ "id": "78",
1018
+ "doc_id": [
1019
+ "TinyBench/PG_2020.03.09_US-Germany_FINAL.pdf"
1020
+ ],
1021
+ "file_type": "pdf report",
1022
+ "question": "List all countries, except U.S. and German, that are mentioned in some illustrated surveys of my report that is about U.S. and Germany. EU is not a single country.",
1023
+ "question_type": "reasoning + multi-page",
1024
+ "evidence_type": "text",
1025
+ "answer": "['UK', 'Israel', 'China', 'Canada', 'Mexico', 'Japan', 'France', 'Austria', 'Russia']",
1026
+ "content_domain": "Research report / Introduction",
1027
+ "Comment": ""
1028
+ },
1029
+ {
1030
+ "id": "79",
1031
+ "doc_id": [
1032
+ "TinyBench/PG_2020.03.09_US-Germany_FINAL.pdf"
1033
+ ],
1034
+ "file_type": "pdf report",
1035
+ "question": "Among people who age 30-49, what is the difference of percentage value between Americen and Germen having positive view on their bilateral relationship?",
1036
+ "question_type": "reasoning",
1037
+ "evidence_type": "figure",
1038
+ "answer": "37%",
1039
+ "content_domain": "Research report / Introduction",
1040
+ "Comment": ""
1041
+ },
1042
+ {
1043
+ "id": "80",
1044
+ "doc_id": [
1045
+ "TinyBench/PG_2020.03.09_US-Germany_FINAL.pdf"
1046
+ ],
1047
+ "file_type": "pdf report",
1048
+ "question": "Enumerate all chapter names (for example, Sharp divides in German and American views of security issues, from use of force to defense budgeting) that discussed the relationship between Germany or the U.S. with NATO. Your answer should be a list.",
1049
+ "question_type": "reasoning + multi-page",
1050
+ "evidence_type": "text",
1051
+ "answer": "['Sharp divides in German and American views of security issues, from use of force to defense budgeting', 'Americans and Germans differ in their views of international organizations and leaders']",
1052
+ "content_domain": "Research report / Introduction",
1053
+ "Comment": ""
1054
+ },
1055
+ {
1056
+ "id": "81",
1057
+ "doc_id": [
1058
+ "TinyBench/PG_2020.03.09_US-Germany_FINAL.pdf"
1059
+ ],
1060
+ "file_type": "pdf report",
1061
+ "question": "List all titles of the charts (for example, Americans and Germans diverge sharply in their views of bilateral relations) in which the results are grouped by political affiliation.",
1062
+ "question_type": "reasoning + multi-page",
1063
+ "evidence_type": "figure",
1064
+ "answer": "['Republican support for increased defense spending from Europe has waned since 2017', 'Supporters of CDU/CSU more likely to favor increased defense spending', 'Democrats and Republicans are about as likely to name Germany as a top foreign policy partner, but Republicans are keener on Israel', 'Supporters of different parties take alternate stances on U.S.-German cooperation', 'Ideological differences in views of the UN, EU and Russia']",
1065
+ "content_domain": "Research report / Introduction",
1066
+ "Comment": ""
1067
+ },
1068
+ {
1069
+ "id": "82",
1070
+ "doc_id": [
1071
+ "TinyBench/PG_2020.03.09_US-Germany_FINAL.pdf"
1072
+ ],
1073
+ "file_type": "pdf report",
1074
+ "question": "What percentage of Germany respondent view China or Japan as the top economic power? Give me a percentage value.",
1075
+ "question_type": "reasoning",
1076
+ "evidence_type": "figure",
1077
+ "answer": "59%",
1078
+ "content_domain": "Research report / Introduction",
1079
+ "Comment": ""
1080
+ },
1081
+ {
1082
+ "id": "83",
1083
+ "doc_id": [
1084
+ "TinyBench/PG_2020.03.09_US-Germany_FINAL.pdf"
1085
+ ],
1086
+ "file_type": "pdf report",
1087
+ "question": "What percentage of Chinese respondent view the U.S.  or Japan as the top economic power? Give me a percentage value.",
1088
+ "question_type": "hallucination",
1089
+ "evidence_type": "figure",
1090
+ "answer": "Not answerable",
1091
+ "content_domain": "Research report / Introduction",
1092
+ "Comment": ""
1093
+ },
1094
+ {
1095
+ "id": "84",
1096
+ "doc_id": [
1097
+ "TinyBench/fd76bbefe469561966e5387aa709c482.pdf"
1098
+ ],
1099
+ "file_type": "pdf paper",
1100
+ "question": "What degree does LEBOUR have?",
1101
+ "question_type": "reasoning",
1102
+ "evidence_type": "text",
1103
+ "answer": "M.A.",
1104
+ "content_domain": "Academic paper",
1105
+ "Comment": ""
1106
+ },
1107
+ {
1108
+ "id": "85",
1109
+ "doc_id": [
1110
+ "TinyBench/fd76bbefe469561966e5387aa709c482.pdf"
1111
+ ],
1112
+ "file_type": "pdf paper",
1113
+ "question": "What is the title of the Figure 2 in my paper about Earth Shake?",
1114
+ "question_type": "fact",
1115
+ "evidence_type": "figure",
1116
+ "answer": "Diagram of Breccia Gashes with top denuded off but bottom shown in cliff",
1117
+ "content_domain": "Academic paper",
1118
+ "Comment": ""
1119
+ },
1120
+ {
1121
+ "id": "86",
1122
+ "doc_id": [
1123
+ "TinyBench/fd76bbefe469561966e5387aa709c482.pdf"
1124
+ ],
1125
+ "file_type": "pdf paper",
1126
+ "question": "Among figure 1-4 in my file fd76bbefe469561966e5387aa709c482.pdf, how many figures show more than one breccia gash?",
1127
+ "question_type": "reasoning + multi-page",
1128
+ "evidence_type": "figure",
1129
+ "answer": "2",
1130
+ "content_domain": "Academic paper",
1131
+ "Comment": ""
1132
+ },
1133
+ {
1134
+ "id": "87",
1135
+ "doc_id": [
1136
+ "TinyBench/fd76bbefe469561966e5387aa709c482.pdf"
1137
+ ],
1138
+ "file_type": "pdf paper",
1139
+ "question": "In my file fd76bbefe469561966e5387aa709c482.pdf, when was the last serious shock recorded in the table? I want your answer needs to be accurate to the year, month, day, hour, and minute.",
1140
+ "question_type": "fact",
1141
+ "evidence_type": "table",
1142
+ "answer": "1884 April 5, 10 45 a.m.",
1143
+ "content_domain": "Academic paper",
1144
+ "Comment": ""
1145
+ },
1146
+ {
1147
+ "id": "88",
1148
+ "doc_id": [
1149
+ "TinyBench/avalaunchpresentationsthatkickasteriskv3copy-150318114804-conversion-gate01_95.pdf"
1150
+ ],
1151
+ "file_type": "pdf paper",
1152
+ "question": "In my avalaunchpresentationsthatkickasteriskv3copy-150318114804-conversion-gate01_95.pdf, how many children are being read to in the photograph of Page 3?",
1153
+ "question_type": "fact",
1154
+ "evidence_type": "text",
1155
+ "answer": "2",
1156
+ "content_domain": "Tutorial/Workshop",
1157
+ "Comment": ""
1158
+ },
1159
+ {
1160
+ "id": "89",
1161
+ "doc_id": [
1162
+ "TinyBench/avalaunchpresentationsthatkickasteriskv3copy-150318114804-conversion-gate01_95.pdf"
1163
+ ],
1164
+ "file_type": "ppt",
1165
+ "question": "What is the name of the company that created my presentation about ASTERISKS?",
1166
+ "question_type": "fact",
1167
+ "evidence_type": "text",
1168
+ "answer": "AVALAUNCH MEDIA",
1169
+ "content_domain": "Tutorial/Workshop",
1170
+ "Comment": ""
1171
+ },
1172
+ {
1173
+ "id": "90",
1174
+ "doc_id": [
1175
+ "TinyBench/avalaunchpresentationsthatkickasteriskv3copy-150318114804-conversion-gate01_95.pdf"
1176
+ ],
1177
+ "file_type": "ppt",
1178
+ "question": "According to the presentation about ASTERISKS, what are three bad fonts?",
1179
+ "question_type": "fact",
1180
+ "evidence_type": "text",
1181
+ "answer": "['Times New Roman', 'Arial', 'Comic Sans']",
1182
+ "content_domain": "Tutorial/Workshop",
1183
+ "Comment": ""
1184
+ },
1185
+ {
1186
+ "id": "91",
1187
+ "doc_id": [
1188
+ "TinyBench/avalaunchpresentationsthatkickasteriskv3copy-150318114804-conversion-gate01_95.pdf"
1189
+ ],
1190
+ "file_type": "ppt",
1191
+ "question": "How many reasons your presentation needs for asterisk kicked?",
1192
+ "question_type": "fact + reasoning",
1193
+ "evidence_type": "text",
1194
+ "answer": "4",
1195
+ "content_domain": "Tutorial/Workshop",
1196
+ "Comment": ""
1197
+ },
1198
+ {
1199
+ "id": "92",
1200
+ "doc_id": [
1201
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1202
+ ],
1203
+ "file_type": "ppt",
1204
+ "question": "How many districts were sampled during Wave III?",
1205
+ "question_type": "fact",
1206
+ "evidence_type": "table",
1207
+ "answer": "44",
1208
+ "content_domain": "Research report / Introduction",
1209
+ "Comment": ""
1210
+ },
1211
+ {
1212
+ "id": "93",
1213
+ "doc_id": [
1214
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1215
+ ],
1216
+ "file_type": "ppt",
1217
+ "question": "How many districts were sampled during Wave IV?",
1218
+ "question_type": "fact",
1219
+ "evidence_type": "table",
1220
+ "answer": "Not answerable",
1221
+ "content_domain": "Research report / Introduction",
1222
+ "Comment": ""
1223
+ },
1224
+ {
1225
+ "id": "94",
1226
+ "doc_id": [
1227
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1228
+ ],
1229
+ "file_type": "ppt",
1230
+ "question": "How many fieldwork personnel in all for Wave I and Wave II?",
1231
+ "question_type": "fact + reasoning",
1232
+ "evidence_type": "table",
1233
+ "answer": "102",
1234
+ "content_domain": "Research report / Introduction",
1235
+ "Comment": ""
1236
+ },
1237
+ {
1238
+ "id": "95",
1239
+ "doc_id": [
1240
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1241
+ ],
1242
+ "file_type": "ppt",
1243
+ "question": "According to my finalmediafindingspdf-141228031149-conversion-gate02_95.pdf, what is the color of Kailali in my map of Page 12?",
1244
+ "question_type": "fact",
1245
+ "evidence_type": "image",
1246
+ "answer": "yellow",
1247
+ "content_domain": "Research report / Introduction",
1248
+ "Comment": ""
1249
+ },
1250
+ {
1251
+ "id": "96",
1252
+ "doc_id": [
1253
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1254
+ ],
1255
+ "file_type": "ppt",
1256
+ "question": "In my finalmediafindingspdf-141228031149-conversion-gate02_95.pdf, what is the color of Beijing in my map of Page 12?",
1257
+ "question_type": "hallucination",
1258
+ "evidence_type": "image",
1259
+ "answer": "Not answerable",
1260
+ "content_domain": "Research report / Introduction",
1261
+ "Comment": ""
1262
+ },
1263
+ {
1264
+ "id": "97",
1265
+ "doc_id": [
1266
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1267
+ ],
1268
+ "file_type": "ppt",
1269
+ "question": "Among 4021 respondents, what is the percentage of them having a smart phone?",
1270
+ "question_type": "fact",
1271
+ "evidence_type": "figure",
1272
+ "answer": "27.20%",
1273
+ "content_domain": "Research report / Introduction",
1274
+ "Comment": ""
1275
+ },
1276
+ {
1277
+ "id": "98",
1278
+ "doc_id": [
1279
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1280
+ ],
1281
+ "file_type": "ppt",
1282
+ "question": "According to finalmediafindingspdf-141228031149-conversion-gate02_95.pdf, how many percentage respondents in this survey access to internet few times a week or more?",
1283
+ "question_type": "reasoning",
1284
+ "evidence_type": "figure",
1285
+ "answer": "14%",
1286
+ "content_domain": "Research report / Introduction",
1287
+ "Comment": ""
1288
+ },
1289
+ {
1290
+ "id": "99",
1291
+ "doc_id": [
1292
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1293
+ ],
1294
+ "file_type": "ppt",
1295
+ "question": "According to finalmediafindingspdf-141228031149-conversion-gate02_95.pdf, how many percentage respondents in this survey access to internet more than two times per month?",
1296
+ "question_type": "hallucination",
1297
+ "evidence_type": "figure",
1298
+ "answer": "Not answerable",
1299
+ "content_domain": "Research report / Introduction",
1300
+ "Comment": ""
1301
+ },
1302
+ {
1303
+ "id": "100",
1304
+ "doc_id": [
1305
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1306
+ ],
1307
+ "file_type": "ppt",
1308
+ "question": "Enumerate the media sources surveyed in this report about Nepal Media Landscape 2014.",
1309
+ "question_type": "fact + multi-page",
1310
+ "evidence_type": "figure",
1311
+ "answer": "['Radio', 'Newspaper', 'Televison', 'Internet']",
1312
+ "content_domain": "Research report / Introduction",
1313
+ "Comment": ""
1314
+ },
1315
+ {
1316
+ "id": "101",
1317
+ "doc_id": [
1318
+ "TinyBench/finalmediafindingspdf-141228031149-conversion-gate02_95.pdf"
1319
+ ],
1320
+ "file_type": "ppt",
1321
+ "question": "How many female respondents in wave III never listen to the radio in recent half year?",
1322
+ "question_type": "reasoning + multi-page",
1323
+ "evidence_type": "table + figure",
1324
+ "answer": "1115",
1325
+ "content_domain": "Research report / Introduction",
1326
+ "Comment": ""
1327
+ },
1328
+ {
1329
+ "id": "102",
1330
+ "doc_id": [
1331
+ "TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf"
1332
+ ],
1333
+ "file_type": "ppt",
1334
+ "question": "In 2014, respondents who believe traditional values are outdated increased by how many percentage points compared to 2011?",
1335
+ "question_type": "reasoning",
1336
+ "evidence_type": "figure",
1337
+ "answer": "29%",
1338
+ "content_domain": "Research report / Introduction",
1339
+ "Comment": ""
1340
+ },
1341
+ {
1342
+ "id": "103",
1343
+ "doc_id": [
1344
+ "TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf"
1345
+ ],
1346
+ "file_type": "ppt",
1347
+ "question": "In 2014, respondents who believe traditional values are outdated increased by how many percentage points compared to 2001?",
1348
+ "question_type": "hallucination",
1349
+ "evidence_type": "figure",
1350
+ "answer": "Not answerable",
1351
+ "content_domain": "Research report / Introduction",
1352
+ "Comment": ""
1353
+ },
1354
+ {
1355
+ "id": "104",
1356
+ "doc_id": [
1357
+ "TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf"
1358
+ ],
1359
+ "file_type": "ppt",
1360
+ "question": "In the year in which Palestine was added to the survey, respondents who believe traditional values are outdated increased by how many percentage points compared to 2011, according to asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf?",
1361
+ "question_type": "reasoning + multi-page",
1362
+ "evidence_type": "figure",
1363
+ "answer": "29%",
1364
+ "content_domain": "Research report / Introduction",
1365
+ "Comment": ""
1366
+ },
1367
+ {
1368
+ "id": "105",
1369
+ "doc_id": [
1370
+ "TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf"
1371
+ ],
1372
+ "file_type": "ppt",
1373
+ "question": "Which country had the highest percentage of respondents for whom traditional values mean a lot?",
1374
+ "question_type": "fact",
1375
+ "evidence_type": "figure",
1376
+ "answer": "Oman",
1377
+ "content_domain": "Research report / Introduction",
1378
+ "Comment": ""
1379
+ },
1380
+ {
1381
+ "id": "106",
1382
+ "doc_id": [
1383
+ "TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf"
1384
+ ],
1385
+ "file_type": "ppt",
1386
+ "question": "What is the 8th (out of top10) findings listed in my presentation named asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf?",
1387
+ "question_type": "fact",
1388
+ "evidence_type": "text",
1389
+ "answer": "Arab youth are increasingly concerned about obesity and lifestyle diseases and do not believe that healthcare in their country is improving",
1390
+ "content_domain": "Research report / Introduction",
1391
+ "Comment": ""
1392
+ },
1393
+ {
1394
+ "id": "107",
1395
+ "doc_id": [
1396
+ "TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf"
1397
+ ],
1398
+ "file_type": "ppt",
1399
+ "question": "What is the 11th findings listed in my presentation named asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf?",
1400
+ "question_type": "hallucination",
1401
+ "evidence_type": "text",
1402
+ "answer": "Not answerable",
1403
+ "content_domain": "Research report / Introduction",
1404
+ "Comment": ""
1405
+ },
1406
+ {
1407
+ "id": "108",
1408
+ "doc_id": [
1409
+ "TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf"
1410
+ ],
1411
+ "file_type": "ppt",
1412
+ "question": "Among the top 10 findings, how many of them are supported by statistical results grouped by the countries in my presentation named asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf?",
1413
+ "question_type": "reasoning + multi-page",
1414
+ "evidence_type": "figure",
1415
+ "answer": "4",
1416
+ "content_domain": "Research report / Introduction",
1417
+ "Comment": ""
1418
+ },
1419
+ {
1420
+ "id": "109",
1421
+ "doc_id": [
1422
+ "TinyBench/asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf"
1423
+ ],
1424
+ "file_type": "ppt",
1425
+ "question": "According to asdaaburson-marstellerarabyouthsurvey2014-140407100615-phpapp01_95.pdf, which country's youth show the greatest concern about unemployment problem?",
1426
+ "question_type": "fact",
1427
+ "evidence_type": "figure",
1428
+ "answer": "Egypt",
1429
+ "content_domain": "Research report / Introduction",
1430
+ "Comment": ""
1431
+ },
1432
+ {
1433
+ "id": "110",
1434
+ "doc_id": [
1435
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1436
+ ],
1437
+ "file_type": "pdf report",
1438
+ "question": "What is the percentage of people who do not go online in my file PIP_Seniors-and-Tech-Use_040314.pdf?",
1439
+ "question_type": "fact",
1440
+ "evidence_type": "text",
1441
+ "answer": "41",
1442
+ "content_domain": "Research report / Introduction",
1443
+ "Comment": ""
1444
+ },
1445
+ {
1446
+ "id": "111",
1447
+ "doc_id": [
1448
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1449
+ ],
1450
+ "file_type": "pdf report",
1451
+ "question": "How many people who do not go online or only use SNS in the Older Adults and Technology by Princeton Survey Research Associates International from July 18 to September 30, 2013?",
1452
+ "question_type": "fact",
1453
+ "evidence_type": "text",
1454
+ "answer": "4087",
1455
+ "content_domain": "Research report / Introduction",
1456
+ "Comment": ""
1457
+ },
1458
+ {
1459
+ "id": "112",
1460
+ "doc_id": [
1461
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1462
+ ],
1463
+ "file_type": "pdf report",
1464
+ "question": "How many people who do not go online or only use SNS in the Older Adults and Technology by Princeton Survey Research Associates International from July 18 to September 30, 2022 ?",
1465
+ "question_type": "hallucination",
1466
+ "evidence_type": "text",
1467
+ "answer": "Not answerable",
1468
+ "content_domain": "Research report / Introduction",
1469
+ "Comment": ""
1470
+ },
1471
+ {
1472
+ "id": "113",
1473
+ "doc_id": [
1474
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1475
+ ],
1476
+ "file_type": "pdf report",
1477
+ "question": "What is the percentage gap between male 65+ age group who use internet and  broadband at home in the Pew Research Center Internet Project July 18-September 30, 2013 tracking survey?",
1478
+ "question_type": "reasoning + multi-page",
1479
+ "evidence_type": "table + figure",
1480
+ "answer": "12%",
1481
+ "content_domain": "Research report / Introduction",
1482
+ "Comment": ""
1483
+ },
1484
+ {
1485
+ "id": "114",
1486
+ "doc_id": [
1487
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1488
+ ],
1489
+ "file_type": "pdf report",
1490
+ "question": "What is the gap between the percentage of 65+ age group who go online in 2000 and the percentage of 80+ age group still go online on 2022?",
1491
+ "question_type": "hallucination",
1492
+ "evidence_type": "table + figure",
1493
+ "answer": "Not answerable",
1494
+ "content_domain": "Research report / Introduction",
1495
+ "Comment": ""
1496
+ },
1497
+ {
1498
+ "id": "115",
1499
+ "doc_id": [
1500
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1501
+ ],
1502
+ "file_type": "pdf report",
1503
+ "question": "Among the Higher-income seniors, what are the percentage of them go online, has smartphone phone, and own a tablet computer? Please write the answer in the list format and in descend order, e.g., [\"9%\",\"8%\"] in the Pew Research Center Internet Project July 18-September 30, 2013 tracking survey?",
1504
+ "question_type": "fact + multi-page",
1505
+ "evidence_type": "table",
1506
+ "answer": "['90%', '42%', '39%']",
1507
+ "content_domain": "Research report / Introduction",
1508
+ "Comment": ""
1509
+ },
1510
+ {
1511
+ "id": "116",
1512
+ "doc_id": [
1513
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1514
+ ],
1515
+ "file_type": "pdf report",
1516
+ "question": "Among the Higher-income seniors, what are the percentage of them go online, has smartphone phone, and own a tablet computer? Please write the answer in the list format and in descend order,e.g., [\"9%\",\"8%\"] in the Pew Research Center Internet Project July 18-September 30, 2022 tracking survey?",
1517
+ "question_type": "hallucination",
1518
+ "evidence_type": "table",
1519
+ "answer": "Not answerable",
1520
+ "content_domain": "Research report / Introduction",
1521
+ "Comment": ""
1522
+ },
1523
+ {
1524
+ "id": "117",
1525
+ "doc_id": [
1526
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1527
+ ],
1528
+ "file_type": "pdf report",
1529
+ "question": "How many 65+ age group people go online 3-5 times per week or Every day in the Pew Research Center Internet Project July 18-September 30, 2013 tracking survey?",
1530
+ "question_type": "fact",
1531
+ "evidence_type": "text",
1532
+ "answer": "1251",
1533
+ "content_domain": "Research report / Introduction",
1534
+ "Comment": ""
1535
+ },
1536
+ {
1537
+ "id": "118",
1538
+ "doc_id": [
1539
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1540
+ ],
1541
+ "file_type": "pdf report",
1542
+ "question": "How many 65+ age group people go online 3-5 times per week or Every day in the Pew Research Center Internet Project July 18-September 30, 2020 tracking survey?",
1543
+ "question_type": "hallucination",
1544
+ "evidence_type": "table + figure",
1545
+ "answer": "Not answerable",
1546
+ "content_domain": "Research report / Introduction",
1547
+ "Comment": ""
1548
+ },
1549
+ {
1550
+ "id": "119",
1551
+ "doc_id": [
1552
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1553
+ ],
1554
+ "file_type": "pdf report",
1555
+ "question": "What is the gap of 65+ people with College graduate contain a cell phone and a tablet computer in the Pew Research Center Internet Project July 18-September 30, 2013 tracking survey?",
1556
+ "question_type": "fact + multi-page",
1557
+ "evidence_type": "table",
1558
+ "answer": "301",
1559
+ "content_domain": "Research report / Introduction",
1560
+ "Comment": ""
1561
+ },
1562
+ {
1563
+ "id": "120",
1564
+ "doc_id": [
1565
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1566
+ ],
1567
+ "file_type": "pdf report",
1568
+ "question": "What is the gap of infants with College graduate contain a cell phone and a tablet computer in the Pew Research Center Internet Project July 18-September 30, 2013 tracking survey?",
1569
+ "question_type": "hallucination",
1570
+ "evidence_type": "table",
1571
+ "answer": "Not answerable",
1572
+ "content_domain": "Research report / Introduction",
1573
+ "Comment": ""
1574
+ },
1575
+ {
1576
+ "id": "121",
1577
+ "doc_id": [
1578
+ "TinyBench/PIP_Seniors-and-Tech-Use_040314.pdf"
1579
+ ],
1580
+ "file_type": "pdf report",
1581
+ "question": "What is the gap between male 65+ age group who use internet and  broadband at home in the Pew Research Center Internet Project July 18-September 30, 2013 tracking survey? Please write the answer into float format.",
1582
+ "question_type": "fact + multi-page",
1583
+ "evidence_type": "table",
1584
+ "answer": "73",
1585
+ "content_domain": "Research report / Introduction",
1586
+ "Comment": ""
1587
+ },
1588
+ {
1589
+ "id": "122",
1590
+ "doc_id": [
1591
+ "TinyBench/mmmu_website.pdf"
1592
+ ],
1593
+ "file_type": "web pdf",
1594
+ "question": "What is TeleMM's score on the MMMU(val) on the leaderboard?",
1595
+ "question_type": "fact",
1596
+ "evidence_type": "table",
1597
+ "answer": "61.40%",
1598
+ "content_domain": "Webpage",
1599
+ "Comment": ""
1600
+ },
1601
+ {
1602
+ "id": "123",
1603
+ "doc_id": [
1604
+ "TinyBench/mmmu_website.pdf"
1605
+ ],
1606
+ "file_type": "web pdf",
1607
+ "question": "What is Gemini 2.0 Flash's score?",
1608
+ "question_type": "fact",
1609
+ "evidence_type": "table",
1610
+ "answer": "71.70%",
1611
+ "content_domain": "Webpage",
1612
+ "Comment": ""
1613
+ },
1614
+ {
1615
+ "id": "124",
1616
+ "doc_id": [
1617
+ "TinyBench/mmmu_website.pdf"
1618
+ ],
1619
+ "file_type": "web pdf",
1620
+ "question": "How many questions are there in the Chemistry subject of the MMMU benchmark? And what is the percentage of the Chemistry questions?",
1621
+ "question_type": "fact",
1622
+ "evidence_type": "figure",
1623
+ "answer": "638, 5.5%",
1624
+ "content_domain": "Webpage",
1625
+ "Comment": ""
1626
+ },
1627
+ {
1628
+ "id": "125",
1629
+ "doc_id": [
1630
+ "TinyBench/mmmu_website.pdf"
1631
+ ],
1632
+ "file_type": "web pdf",
1633
+ "question": "How many perceptage of questions are in the Tech&Engineering discipline in the MMMU benchmark?",
1634
+ "question_type": "fact",
1635
+ "evidence_type": "figure",
1636
+ "answer": "26%",
1637
+ "content_domain": "Webpage",
1638
+ "Comment": ""
1639
+ },
1640
+ {
1641
+ "id": "126",
1642
+ "doc_id": [
1643
+ "TinyBench/MMMU_website.pdf"
1644
+ ],
1645
+ "file_type": "web pdf",
1646
+ "question": "What is the SOTA model performance on MMMU_val according to my memory?",
1647
+ "question_type": "reasoning + multi-page",
1648
+ "evidence_type": "table",
1649
+ "answer": "O1. 78.2%",
1650
+ "content_domain": "",
1651
+ "Comment": ""
1652
+ },
1653
+ {
1654
+ "id": "127",
1655
+ "doc_id": [
1656
+ "TinyBench/MMMU_website.pdf"
1657
+ ],
1658
+ "file_type": "web pdf",
1659
+ "question": "What is the SOTA performance on MMMU_val according to my memory?",
1660
+ "question_type": "reasoning + multi-page",
1661
+ "evidence_type": "table",
1662
+ "answer": "Human Expert (High). 88.6%.",
1663
+ "content_domain": "",
1664
+ "Comment": ""
1665
+ },
1666
+ {
1667
+ "id": "128",
1668
+ "doc_id": [
1669
+ "TinyBench/MMMU_website.pdf"
1670
+ ],
1671
+ "file_type": "web pdf",
1672
+ "question": "What is the SOTA model on MMMU_val according to my memory?",
1673
+ "question_type": "reasoning + multi-page",
1674
+ "evidence_type": "table",
1675
+ "answer": "O1.",
1676
+ "content_domain": "",
1677
+ "Comment": ""
1678
+ },
1679
+ {
1680
+ "id": "129",
1681
+ "doc_id": [
1682
+ "TinyBench/llava_onevision.pdf"
1683
+ ],
1684
+ "file_type": "pdf paper",
1685
+ "question": "In my LLaVA-OneVision paper, what is GPT4o's performance on AI2D?",
1686
+ "question_type": "fact",
1687
+ "evidence_type": "table",
1688
+ "answer": "94.20%",
1689
+ "content_domain": "Academic paper",
1690
+ "Comment": ""
1691
+ },
1692
+ {
1693
+ "id": "130",
1694
+ "doc_id": [
1695
+ "TinyBench/llava_onevision.pdf"
1696
+ ],
1697
+ "file_type": "pdf paper",
1698
+ "question": "What is GPT4o's performance on AI2D?",
1699
+ "question_type": "fact",
1700
+ "evidence_type": "table",
1701
+ "answer": "94.20%",
1702
+ "content_domain": "Academic paper",
1703
+ "Comment": ""
1704
+ },
1705
+ {
1706
+ "id": "131",
1707
+ "doc_id": [
1708
+ "TinyBench/llava_onevision.pdf"
1709
+ ],
1710
+ "file_type": "pdf paper",
1711
+ "question": "What is LLaVA OneVision-72B's score on Video-MME?",
1712
+ "question_type": "fact",
1713
+ "evidence_type": "table",
1714
+ "answer": "66.20%",
1715
+ "content_domain": "Academic paper",
1716
+ "Comment": ""
1717
+ },
1718
+ {
1719
+ "id": "132",
1720
+ "doc_id": [
1721
+ "TinyBench/llava_onevision.pdf"
1722
+ ],
1723
+ "file_type": "pdf paper",
1724
+ "question": "I remember I have a LLaVA-OneVision paper, I want to check if LLaVA-OneVision-72B outperforms GPT-4o on the MLVU benchmark? If so, by how much?",
1725
+ "question_type": "reasoning",
1726
+ "evidence_type": "table",
1727
+ "answer": "Yes, LLaVA-OneVision-72B outperforms GPT-4o on the MLVU benchmark. 68.0% - 64.6% = 3.4%",
1728
+ "content_domain": "Academic paper",
1729
+ "Comment": ""
1730
+ },
1731
+ {
1732
+ "id": "133",
1733
+ "doc_id": [
1734
+ "TinyBench/llava_onevision.pdf"
1735
+ ],
1736
+ "file_type": "pdf paper",
1737
+ "question": "I do not understand Figure 1 in my LLaVA-OneVision paper. Can you explain it to me?",
1738
+ "question_type": "summary",
1739
+ "evidence_type": "figure",
1740
+ "answer": "- **Model Overview**: The figure illustrates the LLaVA-OneVision network architecture, which extends the LLaVA framework to support multiple visual signals, including single images, multiple images, and videos. - **Components**: The architecture consists of a vision encoder (SigLIP), a projection layer (2-layer MLP), and a language model (Qwen-2). - **Process Flow**: Visual inputs are processed by the vision encoder, projected into a latent space, and then fed into the language model alongside textual instructions to generate responses. - **Expanded Capabilities**: Unlike standard LLaVA, this version integrates multi-image and video inputs, enabling broader multimodal understanding.",
1741
+ "content_domain": "Academic paper",
1742
+ "Comment": ""
1743
+ },
1744
+ {
1745
+ "id": "134",
1746
+ "doc_id": [
1747
+ "TinyBench/llava_onevision.pdf, TinyBench/videommmu_paper.pdf"
1748
+ ],
1749
+ "file_type": "pdf paper",
1750
+ "question": "How many papers are co-authored by Ziwei Liu and Bo Li?",
1751
+ "question_type": "reasoning + multi-doc",
1752
+ "evidence_type": "title/section_header",
1753
+ "answer": "At least 11 papers.",
1754
+ "content_domain": "Academic paper",
1755
+ "Comment": ""
1756
+ },
1757
+ {
1758
+ "id": "135",
1759
+ "doc_id": [
1760
+ "NA"
1761
+ ],
1762
+ "file_type": "pdf paper",
1763
+ "question": "How many papers are co-authored by Xiang Yue and Jingkang Yang?",
1764
+ "question_type": "reasoning + multi-doc",
1765
+ "evidence_type": "title/section_header",
1766
+ "answer": "0 paper. They are not co-authors.",
1767
+ "content_domain": "Academic paper",
1768
+ "Comment": ""
1769
+ }
1770
+ ]
Fullset/qa_v00.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "doc_id": [
5
+ "TinyBench/videommmu_paper.pdf"
6
+ ],
7
+ "file_type": "paper pdf",
8
+ "question": "What are novelties of Video-MMMU dataset",
9
+ "question_type": "summary",
10
+ "evidence_type": "text",
11
+ "answer": "1) Knowledge-Intensive Video Collection: The dataset includes 300 expert-level videos across 6 professional disciplines, covering 30 subjects. 2) Knowledge Acquisition-Based QA Design: Each video contains three QA pairs corresponding to the stages of knowledge acquisition—Perception (extracting key information), Comprehension (grasping concepts), and Adaptation (applying knowledge to new contexts). 3) Quantitative Knowledge Assessment: they introduce a delta knowledge metric to measure performance gains on practice exam questions after watching the videos, enabling quantitative evaluation of LMMs' ability to learn and apply new knowledge.",
12
+ "content_domain": "Academic paper",
13
+ "Comment": ""
14
+ },
15
+ {
16
+ "id": "1",
17
+ "doc_id": [
18
+ "TinyBench/videommmu_paper.pdf"
19
+ ],
20
+ "file_type": "paper pdf",
21
+ "question": "How QA pairs are categorized in Video-MMMU?",
22
+ "question_type": "summary",
23
+ "evidence_type": "text",
24
+ "answer": "Perception Questions assess the ability to extract information from videos via: 1) Optical Character Recognition (OCR) and 2) Automatic Speech Recognition (ASR). Comprehension Questions evaluate understanding through: 1) Concept Comprehension (CC) and 2) Problem-Solving Strategy Comprehension (PSC). Adaptation Questions test the ability to apply knowledge to new scenarios via: 1) Case Study Analysis (CSA) and 2) Problem-Solving Strategy Adaptation (PSA).",
25
+ "content_domain": "Academic paper",
26
+ "Comment": ""
27
+ }
28
+ ]
Fullset/qa_v1.json ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": "0",
4
+ "doc_id": [
5
+ "TinyBench/videommmu_paper.pdf"
6
+ ],
7
+ "file_type": "paper pdf",
8
+ "question": "What are 3 novelties of Video-MMMU dataset",
9
+ "question_type": "summary",
10
+ "evidence_type": "text",
11
+ "answer": "1) Knowledge-Intensive Video Collection: The dataset includes 300 expert-level videos across 6 professional disciplines, covering 30 subjects. 2) Knowledge Acquisition-Based QA Design: Each video contains three QA pairs corresponding to the stages of knowledge acquisition—Perception (extracting key information), Comprehension (grasping concepts), and Adaptation (applying knowledge to new contexts). 3) Quantitative Knowledge Assessment: they introduce a delta knowledge metric to measure performance gains on practice exam questions after watching the videos, enabling quantitative evaluation of LMMs' ability to learn and apply new knowledge.",
12
+ "content_domain": "Academic paper",
13
+ "Comment": ""
14
+ },
15
+ {
16
+ "id": "1",
17
+ "doc_id": [
18
+ "TinyBench/videommmu_paper.pdf"
19
+ ],
20
+ "file_type": "paper pdf",
21
+ "question": "How QA pairs are categorized within each track in Video-MMMU?",
22
+ "question_type": "summary",
23
+ "evidence_type": "text",
24
+ "answer": "Perception Questions assess the ability to extract information from videos via: 1) Optical Character Recognition (OCR) and 2) Automatic Speech Recognition (ASR). Comprehension Questions evaluate understanding through: 1) Concept Comprehension (CC) and 2) Problem-Solving Strategy Comprehension (PSC). Adaptation Questions test the ability to apply knowledge to new scenarios via: 1) Case Study Analysis (CSA) and 2) Problem-Solving Strategy Adaptation (PSA).",
25
+ "content_domain": "Academic paper",
26
+ "Comment": ""
27
+ },
28
+ {
29
+ "id": "2",
30
+ "doc_id": [
31
+ "TinyBench/videommmu_paper.pdf"
32
+ ],
33
+ "file_type": "paper pdf",
34
+ "question": "Are the QAs in Video-MMMU all newly annotated by human annotators with no other sources of data?",
35
+ "question_type": "fact",
36
+ "evidence_type": "text",
37
+ "answer": "No. Perception and Comprehension questions are manually created. For Adaptation, questions in Science, Engineering, Medicine, and Business are sourced from MMMU/MMMU-Pro, while Art and Humanities remain manual.",
38
+ "content_domain": "Academic paper",
39
+ "Comment": ""
40
+ },
41
+ {
42
+ "id": "3",
43
+ "doc_id": [
44
+ "TinyBench/videommmu_paper.pdf"
45
+ ],
46
+ "file_type": "paper pdf",
47
+ "question": "Does Video-MMMU has the longest video length among all benchmarks recorded in the paper?",
48
+ "question_type": "reasoning",
49
+ "evidence_type": "table",
50
+ "answer": "No. 506.2s. The benchmark with longest video length is Video-MME, which is 1017.9s.",
51
+ "content_domain": "Academic paper",
52
+ "Comment": ""
53
+ },
54
+ {
55
+ "id": "4",
56
+ "doc_id": [
57
+ "TinyBench/videommmu_paper.pdf"
58
+ ],
59
+ "file_type": "paper pdf",
60
+ "question": "Does Claude-3.5-Sonnet achieves the highest delta score on Video-MMMU? If so, what is the delta score? If not, tell me the score and also what is the delta score of the highest delta score model?",
61
+ "question_type": "reasoning",
62
+ "evidence_type": "table",
63
+ "answer": "No. 11.4. smaller than GPT-4o's 15.6%.",
64
+ "content_domain": "Academic paper",
65
+ "Comment": ""
66
+ },
67
+ {
68
+ "id": "6",
69
+ "doc_id": [
70
+ "TinyBench/videommmu_paper.pdf"
71
+ ],
72
+ "file_type": "paper pdf",
73
+ "question": "How delta knowlegde is calculated? What is the formula?",
74
+ "question_type": "fact",
75
+ "evidence_type": "formula",
76
+ "answer": "Δ_knowledge = \\frac{Acc_{post} - Acc_{pre}}{100\\% - Acc_{pre}} \\times 100\\% \\quad \\text{where } Acc_{pre} \\text{ and } Acc_{post} \\text{ represent the accuracy before and after watching the video, respectively.}",
77
+ "content_domain": "Academic paper",
78
+ "Comment": ""
79
+ },
80
+ {
81
+ "id": "7",
82
+ "doc_id": [
83
+ "TinyBench/videommmu_paper.pdf"
84
+ ],
85
+ "file_type": "paper pdf",
86
+ "question": "What is the SOTA model performance on Video-MMMU dataset",
87
+ "question_type": "reasoning",
88
+ "evidence_type": "table",
89
+ "answer": "65.78%, Claude-3.5-Sonnet",
90
+ "content_domain": "Academic paper",
91
+ "Comment": ""
92
+ },
93
+ {
94
+ "id": "8",
95
+ "doc_id": [
96
+ "TinyBench/videommmu_paper.pdf"
97
+ ],
98
+ "file_type": "paper pdf",
99
+ "question": "What is GPT-4o's Overall score on video-mmmu?",
100
+ "question_type": "fact",
101
+ "evidence_type": "table",
102
+ "answer": "61.22",
103
+ "content_domain": "Academic paper",
104
+ "Comment": ""
105
+ },
106
+ {
107
+ "id": "9",
108
+ "doc_id": [
109
+ "TinyBench/videommmu_paper.pdf"
110
+ ],
111
+ "file_type": "paper pdf",
112
+ "question": "How much error in video-mmmu are question misreading error?",
113
+ "question_type": "fact",
114
+ "evidence_type": "text",
115
+ "answer": "15%",
116
+ "content_domain": "Academic paper",
117
+ "Comment": ""
118
+ },
119
+ {
120
+ "id": "10",
121
+ "doc_id": [
122
+ "TinyBench/videommmu_paper.pdf"
123
+ ],
124
+ "file_type": "paper pdf",
125
+ "question": "Explain the error analysis in video-mmmu?",
126
+ "question_type": "summary",
127
+ "evidence_type": "text",
128
+ "answer": "Method Selection Error (8%): The model chooses the wrong approach, failing to apply the correct strategy demonstrated in the video. Method Adaptation Error (64%): The model recalls and understands the video-taught method but struggles to adapt it to new scenarios. For example, it correctly applies DFS in a simple tree but fails in a complex graph with cycles, highlighting its difficulty in transferring learned methods across contexts. Question Misreading Error (15%): The model misinterprets question details, such as numerical values or conditions, unrelated to its knowledge application. Other Errors: Includes Refuse to Answer (4%), where the model expresses uncertainty; Annotation Error (4%), due to inaccurate labeling; and Answer Extraction Error (5%), where answers fail to be extracted from longer responses.",
129
+ "content_domain": "Academic paper",
130
+ "Comment": ""
131
+ },
132
+ {
133
+ "id": "11",
134
+ "doc_id": [
135
+ "TinyBench/videommmu_paper.pdf"
136
+ ],
137
+ "file_type": "paper pdf",
138
+ "question": "How much percentage of video-mmmu error is Refuse to answer error?",
139
+ "question_type": "fact",
140
+ "evidence_type": "text",
141
+ "answer": "4%",
142
+ "content_domain": "Academic paper",
143
+ "Comment": ""
144
+ },
145
+ {
146
+ "id": "12",
147
+ "doc_id": [
148
+ "TinyBench/videommmu_paper.pdf"
149
+ ],
150
+ "file_type": "paper pdf",
151
+ "question": "How much video-mmmu error is Annotation error?",
152
+ "question_type": "fact",
153
+ "evidence_type": "text",
154
+ "answer": "4%",
155
+ "content_domain": "Academic paper",
156
+ "Comment": ""
157
+ },
158
+ {
159
+ "id": "13",
160
+ "doc_id": [
161
+ "TinyBench/videommmu_paper.pdf"
162
+ ],
163
+ "file_type": "paper pdf",
164
+ "question": "How much video-mmmu error is Answer Extraction error?",
165
+ "question_type": "fact",
166
+ "evidence_type": "text",
167
+ "answer": "5%",
168
+ "content_domain": "Academic paper",
169
+ "Comment": ""
170
+ },
171
+ {
172
+ "id": "15",
173
+ "doc_id": [
174
+ "TinyBench/videommmu_paper.pdf"
175
+ ],
176
+ "file_type": "paper pdf",
177
+ "question": "What is Aria's Overall score with transcript on video-mmmu?",
178
+ "question_type": "fact",
179
+ "evidence_type": "figure",
180
+ "answer": "53.67%",
181
+ "content_domain": "Academic paper",
182
+ "Comment": ""
183
+ },
184
+ {
185
+ "id": "16",
186
+ "doc_id": [
187
+ "TinyBench/videommmu_paper.pdf"
188
+ ],
189
+ "file_type": "paper pdf",
190
+ "question": "Explain the example of DFS to help me understand the method adaptation error.",
191
+ "question_type": "summary",
192
+ "evidence_type": "figure",
193
+ "answer": "The video teaches DFS principles, but the adaptation question applies them to a complex graph with cycles. Before the video, both Claude and Humans misfocused on cycles. Afterward, both grasped the core principle, but Claude failed to adapt it correctly, while Humans successfully applied it. This highlights the difficulty of method adaptation in new scenarios.",
194
+ "content_domain": "Academic paper",
195
+ "Comment": ""
196
+ },
197
+ {
198
+ "id": "17",
199
+ "doc_id": [
200
+ "TinyBench/videommmu_paper.pdf"
201
+ ],
202
+ "file_type": "paper pdf",
203
+ "question": "How many video-mmmu Questions are ASR?",
204
+ "question_type": "fact",
205
+ "evidence_type": "figure",
206
+ "answer": "23",
207
+ "content_domain": "Academic paper",
208
+ "Comment": ""
209
+ },
210
+ {
211
+ "id": "18",
212
+ "doc_id": [
213
+ "TinyBench/videommmu_paper.pdf"
214
+ ],
215
+ "file_type": "paper pdf",
216
+ "question": "What is Question Misreading error? Can you explain with an example case of GPT-4o?",
217
+ "question_type": "summary",
218
+ "evidence_type": "figure",
219
+ "answer": "The video explains how to determine the work function (∅) in Photoelectric Effect Graphs by identifying the y-intercept, eliminating the need for formulas. Before the video: • Both humans and the model relied on formulas, leading to incorrect answers. After the video: • The model correctly recognized the y-intercept method but misread the graph, identifying -2.0 instead of -1.5 due to a mistaken x-intercept assumption. • Humans accurately identified the y-intercept and found the correct answer (1.5). This case illustrates a Question Misreading Error by GPT-4o, where it applied the right method but misinterpreted the graph.",
220
+ "content_domain": "Academic paper",
221
+ "Comment": ""
222
+ },
223
+ {
224
+ "id": "19",
225
+ "doc_id": [
226
+ "TinyBench/videommmu_paper.pdf"
227
+ ],
228
+ "file_type": "paper pdf",
229
+ "question": "Can you explain the wrong-to-right example of the video lecture '23 tree' in Video-MMMU?",
230
+ "question_type": "summary",
231
+ "evidence_type": "figure",
232
+ "answer": "This case shows the model learning from a 2-3 tree lecture to correct its misunderstanding of insertion and reorganization. The video explains node insertion cases and restructuring rules, which the adaptation question tests. Before the video, the model: • Misjudged insertion effects on the root • Misunderstood reorganization rules • Incorrectly identified only S4 as true After the video, the model: • Recognized node splits and reorganization • Applied Case 2 principles correctly • Identified S1 and S4 as true This demonstrates successful knowledge acquisition, as the model corrected its understanding and applied the learned principles accurately.",
233
+ "content_domain": "Academic paper",
234
+ "Comment": ""
235
+ },
236
+ {
237
+ "id": "20",
238
+ "doc_id": [
239
+ "TinyBench/videommmu_paper.pdf"
240
+ ],
241
+ "file_type": "paper pdf",
242
+ "question": "What is the number of Video-MMMU questions where audio might help?",
243
+ "question_type": "reasoning",
244
+ "evidence_type": "figure",
245
+ "answer": "Art: 16, Business: 28, Medicine: 32, Science: 23, Humanities: 26, Engineering: 29. Total = 154.",
246
+ "content_domain": "Academic paper",
247
+ "Comment": ""
248
+ },
249
+ {
250
+ "id": "21",
251
+ "doc_id": [
252
+ "TinyBench/videommmu_paper.pdf"
253
+ ],
254
+ "file_type": "paper pdf",
255
+ "question": "How many questions are in the Comprehension track of Video-MMMU?",
256
+ "question_type": "reasoning",
257
+ "evidence_type": "figure",
258
+ "answer": "300",
259
+ "content_domain": "Academic paper",
260
+ "Comment": ""
261
+ },
262
+ {
263
+ "id": "22",
264
+ "doc_id": [
265
+ "TinyBench/videommmu_paper.pdf"
266
+ ],
267
+ "file_type": "paper pdf",
268
+ "question": "How many questions are in the Art discipline of Video-MMMU?",
269
+ "question_type": "reasoning",
270
+ "evidence_type": "figure + Multi-page",
271
+ "answer": "Video: 7% * 300 = 21. Each video has 3 questions from 3 tracks. So 21 * 3 = 63.",
272
+ "content_domain": "Academic paper",
273
+ "Comment": ""
274
+ },
275
+ {
276
+ "id": "23",
277
+ "doc_id": [
278
+ "TinyBench/videommmu_paper.pdf"
279
+ ],
280
+ "file_type": "paper pdf",
281
+ "question": "What is the performance gap between Human and model in the main experiment of Video-MMMU?",
282
+ "question_type": "resaoning",
283
+ "evidence_type": "table",
284
+ "answer": "74.44-65.78=8.66",
285
+ "content_domain": "Academic paper",
286
+ "Comment": ""
287
+ },
288
+ {
289
+ "id": "31",
290
+ "doc_id": [
291
+ "TinyBench/videommmu_paper.pdf"
292
+ ],
293
+ "file_type": "paper pdf",
294
+ "question": "Which open-source model performs best on the Adaptation track and how does it compare to the worst-performing open-source model?",
295
+ "question_type": "reasoning",
296
+ "evidence_type": "table + Multi-row",
297
+ "answer": "LLaVA-Video-72B performs the best among open-source models on the Adaptation track with an accuracy of 43.33%. The worst-performing open-source model is InternVL2-8B with an accuracy of 31.67%. The difference in performance is 11.66 percentage points.",
298
+ "content_domain": "Academic paper",
299
+ "Comment": ""
300
+ },
301
+ {
302
+ "id": "32",
303
+ "doc_id": [
304
+ "TinyBench/videommmu_paper.pdf"
305
+ ],
306
+ "file_type": "paper pdf",
307
+ "question": "Which discipline shows the largest performance gap between Human Experts and Claude-3.5-Sonnet?",
308
+ "question_type": "reasoning",
309
+ "evidence_type": "table + Multi-row",
310
+ "answer": "The largest performance gap is in the Medicine discipline: Human Experts scored 70.54%, while Claude-3.5-Sonnet scored 58.14%, resulting in a 12.4 percentage point gap.",
311
+ "content_domain": "Academic paper",
312
+ "Comment": ""
313
+ },
314
+ {
315
+ "id": "33",
316
+ "doc_id": [
317
+ "TinyBench/videommmu_paper.pdf"
318
+ ],
319
+ "file_type": "paper pdf",
320
+ "question": "Which proprietary model achieves the highest Perception score, and how much higher is it than the top open-source model?",
321
+ "question_type": "reasoning",
322
+ "evidence_type": "table + Multi-row",
323
+ "answer": "Claude-3.5-Sonnet achieves the highest Perception score among proprietary models with 72.00%. The top open-source model in Perception is Aria with 65.67%. Claude outperforms Aria by 6.33 percentage points.",
324
+ "content_domain": "Academic paper",
325
+ "Comment": ""
326
+ },
327
+ {
328
+ "id": "34",
329
+ "doc_id": [
330
+ "TinyBench/videommmu_paper.pdf"
331
+ ],
332
+ "file_type": "paper pdf",
333
+ "question": "Which model demonstrates the highest Wrong-to-Right Rate and what is its corresponding Delta Knowledge value?",
334
+ "question_type": "reasoning",
335
+ "evidence_type": "table + Multi-row",
336
+ "answer": "Claude-3.5-Sonnet has a Wrong-to-Right Rate of 28.8% and a ∆knowledge value of 11.4%.",
337
+ "content_domain": "Academic paper",
338
+ "Comment": ""
339
+ },
340
+ {
341
+ "id": "35",
342
+ "doc_id": [
343
+ "TinyBench/videommmu_paper.pdf"
344
+ ],
345
+ "file_type": "paper pdf",
346
+ "question": "Which model shows a negative Delta Knowledge score and also has the highest Right-to-Wrong Rate?",
347
+ "question_type": "reasoning",
348
+ "evidence_type": "table + Multi-row",
349
+ "answer": "InternVL2-8B has the lowest ∆knowledge score of -8.5% and the highest Right-to-Wrong Rate at 55.0%.",
350
+ "content_domain": "Academic paper",
351
+ "Comment": ""
352
+ },
353
+ {
354
+ "id": "37",
355
+ "doc_id": [
356
+ "TinyBench/videommmu_paper.pdf"
357
+ ],
358
+ "file_type": "paper pdf",
359
+ "question": "What are the formulas for Wrong-to-Right Rate and Right-to-Wrong Rate?",
360
+ "question_type": "fact",
361
+ "evidence_type": "formula + Multi-chunk",
362
+ "answer": "Wrong-to-Right Rate = (N_Wrong-to-Right / N_Wrong-before) × 100%. Right-to-Wrong Rate = (N_Right-to-Wrong / N_Right-before) × 100%. These are defined in two separate paragraphs when discussing model response changes after watching videos.",
363
+ "content_domain": "Academic paper",
364
+ "Comment": ""
365
+ },
366
+ {
367
+ "id": "38",
368
+ "doc_id": [
369
+ "TinyBench/videommmu_paper.pdf"
370
+ ],
371
+ "file_type": "paper pdf",
372
+ "question": "What are the three main types of errors observed in Claude-3.5-Sonnet on the Adaptation track and their respective percentages?",
373
+ "question_type": "fact",
374
+ "evidence_type": "text + Multi-chunk",
375
+ "answer": "The three main types of errors are: Method Adaptation Error (64%), Question Misreading (15%), and Method Selection Error (8%). This is summarized in Figure 7 and explained in detail in adjacent paragraphs.",
376
+ "content_domain": "Academic paper",
377
+ "Comment": ""
378
+ },
379
+ {
380
+ "id": "39",
381
+ "doc_id": [
382
+ "TinyBench/videommmu_paper.pdf"
383
+ ],
384
+ "file_type": "paper pdf",
385
+ "question": "How are Adaptation track questions evaluated and what inputs are provided to models?",
386
+ "question_type": "fact",
387
+ "evidence_type": "text + Multi-chunk",
388
+ "answer": "The input includes the full video and a final frame with the question's image appended at the end. A special prompt is added indicating this setup, as shown in Figure 8 and the paragraph explaining inputs in Section 4.1. Prompt: System Message: As an AI assistant, you should watch and learn from the video. Then, adapt what you learned to answer the following question. The image for this question is at the end of the video. Question: [Question Text] Options: A) [Option A] B) [Option B] [etc.]",
389
+ "content_domain": "Academic paper",
390
+ "Comment": ""
391
+ },
392
+ {
393
+ "id": "40",
394
+ "doc_id": [
395
+ "TinyBench/videommmu_paper.pdf"
396
+ ],
397
+ "file_type": "paper pdf",
398
+ "question": "What are the two video types in Video-MMMU and how do they differ?",
399
+ "question_type": "fact",
400
+ "evidence_type": "text + Multi-chunk",
401
+ "answer": "Video-MMMU includes two types of videos: Concept-Introduction and Problem-Solving. Concept videos focus on explaining theories and facts, while Problem-Solving videos demonstrate step-by-step solutions. This is explained in the text and visually illustrated in Figure 2.",
402
+ "content_domain": "Academic paper",
403
+ "Comment": ""
404
+ },
405
+ {
406
+ "id": "41",
407
+ "doc_id": [
408
+ "TinyBench/videommmu_paper.pdf"
409
+ ],
410
+ "file_type": "paper pdf",
411
+ "question": "What are the three cognitive tracks in Video-MMMU and how are they visually illustrated?",
412
+ "question_type": "fact",
413
+ "evidence_type": "text + Multi-page",
414
+ "answer": "The three tracks are Perception (extracting key information), Comprehension (understanding underlying concepts), and Adaptation (applying knowledge to new problems). They are introduced on page 1 and visually illustrated in Figure 1 on page 3, where each stage is associated with a different model behavior.",
415
+ "content_domain": "Academic paper",
416
+ "Comment": ""
417
+ },
418
+ {
419
+ "id": "42",
420
+ "doc_id": [
421
+ "TinyBench/videommmu_paper.pdf"
422
+ ],
423
+ "file_type": "paper pdf",
424
+ "question": "Which model shows the highest deltaknowledge score and what does this metric represent?",
425
+ "question_type": "reasoning",
426
+ "evidence_type": "table + Multi-page",
427
+ "answer": "GPT-4o shows the highest ∆knowledge score among models with 15.6%. The ∆knowledge metric represents the normalized performance improvement in the Adaptation track after watching a video, as defined on page 7.",
428
+ "content_domain": "Academic paper",
429
+ "Comment": ""
430
+ },
431
+ {
432
+ "id": "43",
433
+ "doc_id": [
434
+ "TinyBench/videommmu_paper.pdf"
435
+ ],
436
+ "file_type": "paper pdf",
437
+ "question": "What does a Method Adaptation Error look like in the Adaptation track, and how common is it, and what example is illustrated in which figure?",
438
+ "question_type": "reasoning",
439
+ "evidence_type": "text + Multi-page",
440
+ "answer": "A Method Adaptation Error occurs when a model recalls the correct method from the video but fails to apply it to a new scenario. Figure 6 shows an example with DFS intervals, while page 8 states that Method Adaptation accounts for 64% of Claude-3.5-Sonnet’s errors.",
441
+ "content_domain": "Academic paper",
442
+ "Comment": ""
443
+ },
444
+ {
445
+ "id": "44",
446
+ "doc_id": [
447
+ "TinyBench/videommmu_paper.pdf"
448
+ ],
449
+ "file_type": "paper pdf",
450
+ "question": "What is the difference between Concept Comprehension and Problem-solving Strategy Comprehension in the Comprehension track?",
451
+ "question_type": "reasoning",
452
+ "evidence_type": "text + Multi-chunk",
453
+ "answer": "Page 3 introduces the taxonomy where Concept Comprehension (CC) evaluates understanding of statements, often using multiple-answer formats, while Problem-solving Strategy Comprehension (PSC) changes inputs in example questions to test generalization.",
454
+ "content_domain": "Academic paper",
455
+ "Comment": ""
456
+ },
457
+ {
458
+ "id": "45",
459
+ "doc_id": [
460
+ "TinyBench/videommmu_paper.pdf"
461
+ ],
462
+ "file_type": "paper pdf",
463
+ "question": "Give me an example of a question that is a good fit for the Problem-solving Strategy Comprehension track.",
464
+ "question_type": "reasoning",
465
+ "evidence_type": "figure + Multi-row",
466
+ "answer": "An example question is: 'In the video, Example Question (1) is solved with an angle θ=25 degrees. If the angle θ is adjusted to 30 degrees while all other conditions remain unchanged, what will be the updated result for Example Question (1) as explained in the video?' This question appears in the Science domain and tests whether the model comprehends and can apply the same problem-solving strategy demonstrated in the video.",
467
+ "content_domain": "Academic paper",
468
+ "Comment": ""
469
+ },
470
+ {
471
+ "id": "46",
472
+ "doc_id": [
473
+ "TinyBench/videommmu_paper.pdf"
474
+ ],
475
+ "file_type": "paper pdf",
476
+ "question": "How many proprietary and open-source models are evaluated in Video-MMMU?",
477
+ "question_type": "fact",
478
+ "evidence_type": "text + Multi-row",
479
+ "answer": "Video-MMMU evaluates 4 proprietary LMMs (Gemini 1.5 Flash, Gemini 1.5 Pro, GPT-4o, Claude-3.5-Sonnet) and 11 open-source LMMs (VILA1.5-8B, LongVA-7B, Llama-3.2-11B, LLaVA-OneVision-7B, VILA1.5-40B, LLaVA-Video-7B, InternVL2-8B, MAmmoTH-VL-8B, LLaVA-OneVision-72B, LLaVA-Video-72B, Aria).",
480
+ "content_domain": "Academic paper",
481
+ "Comment": ""
482
+ }
483
+ ]
file_counts.json ADDED
File without changes