yecanming commited on
Commit
35aa491
·
1 Parent(s): 272af26

feat: data, code, readme

Browse files
.gitattributes CHANGED
@@ -1,59 +1,89 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
  *.model filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
  *.onnx filter=lfs diff=lfs merge=lfs -text
19
  *.ot filter=lfs diff=lfs merge=lfs -text
20
  *.parquet filter=lfs diff=lfs merge=lfs -text
21
  *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
  *.pt filter=lfs diff=lfs merge=lfs -text
25
  *.pth filter=lfs diff=lfs merge=lfs -text
26
  *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
  *.tar filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
31
  *.tflite filter=lfs diff=lfs merge=lfs -text
32
  *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
  *.xz filter=lfs diff=lfs merge=lfs -text
35
  *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
  *.jpg filter=lfs diff=lfs merge=lfs -text
 
55
  *.jpeg filter=lfs diff=lfs merge=lfs -text
 
 
56
  *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
 
 
 
 
 
 
 
 
 
 
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
 
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
 
21
  *.tar filter=lfs diff=lfs merge=lfs -text
22
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
23
+ *.mat filter=lfs diff=lfs merge=lfs -text
24
+ *.npz filter=lfs diff=lfs merge=lfs -text
25
+ *.npy filter=lfs diff=lfs merge=lfs -text
26
+ *.h5 filter=lfs diff=lfs merge=lfs -text
27
+ *.hdf5 filter=lfs diff=lfs merge=lfs -text
28
+ *.pickle filter=lfs diff=lfs merge=lfs -text
29
+ *.pkl filter=lfs diff=lfs merge=lfs -text
30
  *.tflite filter=lfs diff=lfs merge=lfs -text
31
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
35
+ *.tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.db* filter=lfs diff=lfs merge=lfs -text
37
+ *.ark* filter=lfs diff=lfs merge=lfs -text
38
+ **/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
39
+ **/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
40
+ **/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
41
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
42
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
43
  *.jpg filter=lfs diff=lfs merge=lfs -text
44
+ *.png filter=lfs diff=lfs merge=lfs -text
45
  *.jpeg filter=lfs diff=lfs merge=lfs -text
46
+ *.bmp filter=lfs diff=lfs merge=lfs -text
47
+ *.gif filter=lfs diff=lfs merge=lfs -text
48
  *.webp filter=lfs diff=lfs merge=lfs -text
49
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
50
+ *.wav filter=lfs diff=lfs merge=lfs -text
51
+ *.wma filter=lfs diff=lfs merge=lfs -text
52
+ *.aac filter=lfs diff=lfs merge=lfs -text
53
+ *.ogg filter=lfs diff=lfs merge=lfs -text
54
+ *.m4a filter=lfs diff=lfs merge=lfs -text
55
+ *.m3u8 filter=lfs diff=lfs merge=lfs -text
56
+ *.amr filter=lfs diff=lfs merge=lfs -text
57
+ *.audio filter=lfs diff=lfs merge=lfs -text
58
+ *.avi filter=lfs diff=lfs merge=lfs -text
59
+ *.flv filter=lfs diff=lfs merge=lfs -text
60
  *.mp4 filter=lfs diff=lfs merge=lfs -text
61
+ *.mpg filter=lfs diff=lfs merge=lfs -text
62
+ *.asf filter=lfs diff=lfs merge=lfs -text
63
+ *.mov filter=lfs diff=lfs merge=lfs -text
64
+ *.mpeg filter=lfs diff=lfs merge=lfs -text
65
+ *.3gp filter=lfs diff=lfs merge=lfs -text
66
+ *.wmv filter=lfs diff=lfs merge=lfs -text
67
+ *.rmvb filter=lfs diff=lfs merge=lfs -text
68
+ *.rm filter=lfs diff=lfs merge=lfs -text
69
+ *.ts filter=lfs diff=lfs merge=lfs -text
70
+ *.mkv filter=lfs diff=lfs merge=lfs -text
71
+ *.flash filter=lfs diff=lfs merge=lfs -text
72
+ *.vob filter=lfs diff=lfs merge=lfs -text
73
+ *.pdf filter=lfs diff=lfs merge=lfs -text
74
+ *.ost filter=lfs diff=lfs merge=lfs -text
75
+ *.pst filter=lfs diff=lfs merge=lfs -text
76
+ *.doc filter=lfs diff=lfs merge=lfs -text
77
+ *.docx filter=lfs diff=lfs merge=lfs -text
78
+ *.txt filter=lfs diff=lfs merge=lfs -text
79
+ *.ppt filter=lfs diff=lfs merge=lfs -text
80
+ *.pptx filter=lfs diff=lfs merge=lfs -text
81
+ *.xls filter=lfs diff=lfs merge=lfs -text
82
+ *.xlsx filter=lfs diff=lfs merge=lfs -text
83
+ *.vsd filter=lfs diff=lfs merge=lfs -text
84
+ *.vsdx filter=lfs diff=lfs merge=lfs -text
85
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
86
+ *.json filter=lfs diff=lfs merge=lfs -text
87
+ dataset_infos.json ignore
88
+ *.csv filter=lfs diff=lfs merge=lfs -text
89
+ *.tsv filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-sa-4.0
3
+ annotations_creators:
4
+ - expert-generated
5
+ - found
6
+ language:
7
+ - en
8
+ language_creators:
9
+ - expert-generated
10
+ - found
11
+ multilinguality:
12
+ - monolingual
13
+ paperswithcode_id: scienceqa
14
+ pretty_name: ScienceQA
15
+ size_categories:
16
+ - 10K<n<100K
17
+ source_datasets:
18
+ - original
19
+ tags:
20
+ - multi-modal-qa
21
+ - science
22
+ - chemistry
23
+ - biology
24
+ - physics
25
+ - earth-science
26
+ - engineering
27
+ - geography
28
+ - history
29
+ - world-history
30
+ - civics
31
+ - economics
32
+ - global-studies
33
+ - grammar
34
+ - writing
35
+ - vocabulary
36
+ - natural-science
37
+ - language-science
38
+ - social-science
39
+ task_categories:
40
+ - multiple-choice
41
+ - question-answering
42
+ - other
43
+ - visual-question-answering
44
+ - text-classification
45
+ task_ids:
46
+ - multiple-choice-qa
47
+ - closed-domain-qa
48
+ - open-domain-qa
49
+ - visual-question-answering
50
+ - multi-class-classification
51
+ dataset_info:
52
+ features:
53
+ - name: image
54
+ dtype: str
55
+ - name: question
56
+ dtype: string
57
+ - name: choices
58
+ sequence: string
59
+ - name: answer
60
+ dtype: string
61
+ - name: hint
62
+ dtype: string
63
+ - name: task
64
+ dtype: string
65
+ - name: grade
66
+ dtype: string
67
+ - name: subject
68
+ dtype: string
69
+ - name: topic
70
+ dtype: string
71
+ - name: category
72
+ dtype: string
73
+ - name: skill
74
+ dtype: string
75
+ - name: lecture
76
+ dtype: string
77
+ - name: solution
78
+ dtype: string
79
+ - name: split
80
+ dtype: string
81
+ - name: index
82
+ dtype: int64
83
+ - name: image
84
+ dtype: string
85
+ - name: A
86
+ dtype: string
87
+ - name: B
88
+ dtype: string
89
+ - name: C
90
+ dtype: string
91
+ - name: D
92
+ dtype: string
93
+ - name: E
94
+ dtype: string
95
+ splits:
96
+ - name: train
97
+ num_bytes: 16416902
98
+ num_examples: 12726
99
+ - name: validation
100
+ num_bytes: 5404896
101
+ num_examples: 4241
102
+ - name: test
103
+ num_bytes: 5441676
104
+ num_examples: 4241
105
+ download_size: 0
106
+ dataset_size: 27263474
107
+ ---
108
+ # This is a reformated version of ScienceQA dataset for VLMEvalKit.
109
+
110
+ Below is the original description of ScienceQA dataset:
111
+
112
+ # Dataset Card Creation Guide
113
+
114
+ ## Table of Contents
115
+ - [Dataset Card Creation Guide](#dataset-card-creation-guide)
116
+ - [Table of Contents](#table-of-contents)
117
+ - [Dataset Description](#dataset-description)
118
+ - [Dataset Summary](#dataset-summary)
119
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
120
+ - [Languages](#languages)
121
+ - [Dataset Structure](#dataset-structure)
122
+ - [Data Instances](#data-instances)
123
+ - [Data Fields](#data-fields)
124
+ - [Data Splits](#data-splits)
125
+ - [Dataset Creation](#dataset-creation)
126
+ - [Curation Rationale](#curation-rationale)
127
+ - [Source Data](#source-data)
128
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
129
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
130
+ - [Annotations](#annotations)
131
+ - [Annotation process](#annotation-process)
132
+ - [Who are the annotators?](#who-are-the-annotators)
133
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
134
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
135
+ - [Social Impact of Dataset](#social-impact-of-dataset)
136
+ - [Discussion of Biases](#discussion-of-biases)
137
+ - [Other Known Limitations](#other-known-limitations)
138
+ - [Additional Information](#additional-information)
139
+ - [Dataset Curators](#dataset-curators)
140
+ - [Licensing Information](#licensing-information)
141
+ - [Citation Information](#citation-information)
142
+ - [Contributions](#contributions)
143
+
144
+ ## Dataset Description
145
+
146
+ - **Homepage:** [https://scienceqa.github.io/index.html#home](https://scienceqa.github.io/index.html#home)
147
+ - **Repository:** [https://github.com/lupantech/ScienceQA](https://github.com/lupantech/ScienceQA)
148
+ - **Paper:** [https://arxiv.org/abs/2209.09513](https://arxiv.org/abs/2209.09513)
149
+ - **Leaderboard:** [https://paperswithcode.com/dataset/scienceqa](https://paperswithcode.com/dataset/scienceqa)
150
+ - **Point of Contact:** [Pan Lu](https://lupantech.github.io/) or file an issue on [Github](https://github.com/lupantech/ScienceQA/issues)
151
+
152
+ ### Dataset Summary
153
+
154
+ Learn to Explain: Multimodal Reasoning via Thought Chains for Science Question Answering
155
+
156
+ ### Supported Tasks and Leaderboards
157
+
158
+ Multi-modal Multiple Choice
159
+
160
+ ### Languages
161
+
162
+ English
163
+
164
+ ## Dataset Structure
165
+
166
+ ### Data Instances
167
+
168
+ Explore more samples [here](https://scienceqa.github.io/explore.html).
169
+
170
+ ``` json
171
+ {'image': Image,
172
+ 'question': 'Which of these states is farthest north?',
173
+ 'choices': ['West Virginia', 'Louisiana', 'Arizona', 'Oklahoma'],
174
+ 'answer': 0,
175
+ 'hint': '',
176
+ 'task': 'closed choice',
177
+ 'grade': 'grade2',
178
+ 'subject': 'social science',
179
+ 'topic': 'geography',
180
+ 'category': 'Geography',
181
+ 'skill': 'Read a map: cardinal directions',
182
+ 'lecture': 'Maps have four cardinal directions, or main directions. Those directions are north, south, east, and west.\nA compass rose is a set of arrows that point to the cardinal directions. A compass rose usually shows only the first letter of each cardinal direction.\nThe north arrow points to the North Pole. On most maps, north is at the top of the map.',
183
+ 'solution': 'To find the answer, look at the compass rose. Look at which way the north arrow is pointing. West Virginia is farthest north.'}
184
+ ```
185
+
186
+ Some records might be missing any or all of image, lecture, solution.
187
+
188
+ ### Data Fields
189
+
190
+ - `image` : Contextual image
191
+ - `question` : Prompt relating to the `lecture`
192
+ - `choices` : Multiple choice answer with 1 correct to the `question`
193
+ - `answer` : Index of choices corresponding to the correct answer
194
+ - `hint` : Hint to help answer the `question`
195
+ - `task` : Task description
196
+ - `grade` : Grade level from K-12
197
+ - `subject` : High level
198
+ - `topic` : natural-sciences, social-science, or language-science
199
+ - `category` : A subcategory of `topic`
200
+ - `skill` : A description of the task required
201
+ - `lecture` : A relevant lecture that a `question` is generated from
202
+ - `solution` : Instructions on how to solve the `question`
203
+
204
+
205
+ Note that the descriptions can be initialized with the **Show Markdown Data Fields** output of the [Datasets Tagging app](https://huggingface.co/spaces/huggingface/datasets-tagging), you will then only need to refine the generated descriptions.
206
+
207
+ ### Data Splits
208
+ - name: train
209
+ - num_bytes: 16416902
210
+ - num_examples: 12726
211
+ - name: validation
212
+ - num_bytes: 5404896
213
+ - num_examples: 4241
214
+ - name: test
215
+ - num_bytes: 5441676
216
+ - num_examples: 4241
217
+
218
+ ## Dataset Creation
219
+
220
+ ### Curation Rationale
221
+
222
+ When answering a question, humans utilize the information available across different modalities to synthesize a consistent and complete chain of thought (CoT). This process is normally a black box in the case of deep learning models like large-scale language models. Recently, science question benchmarks have been used to diagnose the multi-hop reasoning ability and interpretability of an AI system. However, existing datasets fail to provide annotations for the answers, or are restricted to the textual-only modality, small scales, and limited domain diversity. To this end, we present Science Question Answering (ScienceQA).
223
+
224
+ ### Source Data
225
+
226
+ ScienceQA is collected from elementary and high school science curricula.
227
+
228
+ #### Initial Data Collection and Normalization
229
+
230
+ See Below
231
+
232
+ #### Who are the source language producers?
233
+
234
+ See Below
235
+
236
+ ### Annotations
237
+
238
+ Questions in the ScienceQA dataset are sourced from open resources managed by IXL Learning,
239
+ an online learning platform curated by experts in the field of K-12 education. The dataset includes
240
+ problems that align with California Common Core Content Standards. To construct ScienceQA, we
241
+ downloaded the original science problems and then extracted individual components (e.g. questions,
242
+ hints, images, options, answers, lectures, and solutions) from them based on heuristic rules.
243
+ We manually removed invalid questions, such as questions that have only one choice, questions that
244
+ contain faulty data, and questions that are duplicated, to comply with fair use and transformative
245
+ use of the law. If there were multiple correct answers that applied, we kept only one correct answer.
246
+ Also, we shuffled the answer options of each question to ensure the choices do not follow any
247
+ specific pattern. To make the dataset easy to use, we then used semi-automated scripts to reformat
248
+ the lectures and solutions. Therefore, special structures in the texts, such as tables and lists, are
249
+ easily distinguishable from simple text passages. Similar to ImageNet, ReClor, and PMR datasets,
250
+ ScienceQA is available for non-commercial research purposes only and the copyright belongs to
251
+ the original authors. To ensure data quality, we developed a data exploration tool to review examples
252
+ in the collected dataset, and incorrect annotations were further manually revised by experts. The tool
253
+ can be accessed at https://scienceqa.github.io/explore.html.
254
+
255
+ #### Annotation process
256
+
257
+ See above
258
+
259
+ #### Who are the annotators?
260
+
261
+ See above
262
+
263
+ ### Personal and Sensitive Information
264
+
265
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
266
+
267
+ ## Considerations for Using the Data
268
+
269
+ ### Social Impact of Dataset
270
+
271
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
272
+
273
+ ### Discussion of Biases
274
+
275
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
276
+
277
+ ### Other Known Limitations
278
+
279
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
280
+
281
+ ## Additional Information
282
+
283
+ ### Dataset Curators
284
+
285
+ - Pan Lu1,3
286
+ - Swaroop Mishra2,3
287
+ - Tony Xia1
288
+ - Liang Qiu1
289
+ - Kai-Wei Chang1
290
+ - Song-Chun Zhu1
291
+ - Oyvind Tafjord3
292
+ - Peter Clark3
293
+ - Ashwin Kalyan3
294
+
295
+ From:
296
+ 1. University of California, Los Angeles
297
+ 2. Arizona State University
298
+ 3. Allen Institute for AI
299
+
300
+
301
+
302
+ ### Licensing Information
303
+
304
+ [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)
305
+ ](https://creativecommons.org/licenses/by-nc-sa/4.0/)
306
+
307
+ ### Citation Information
308
+
309
+ Provide the [BibTex](http://www.bibtex.org/)-formatted reference for the dataset. For example:
310
+ ```
311
+ @inproceedings{lu2022learn,
312
+ title={Learn to Explain: Multimodal Reasoning via Thought Chains for Science Question Answering},
313
+ author={Lu, Pan and Mishra, Swaroop and Xia, Tony and Qiu, Liang and Chang, Kai-Wei and Zhu, Song-Chun and Tafjord, Oyvind and Clark, Peter and Ashwin Kalyan},
314
+ booktitle={The 36th Conference on Neural Information Processing Systems (NeurIPS)},
315
+ year={2022}
316
+ }
317
+ ```
318
+ ### Contributions
319
+
320
+ Thanks to [Derek Thomas](https://huggingface.co/derek-thomas) [@datavistics](https://github.com/datavistics) for adding this dataset.
data/ScienceQA_test-fixed.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1838831840f4a29b13bae951a2066e14053a32204138a40e1a6757b5aff1aa99
3
+ size 183908531
data/ScienceQA_validation-fixed.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b41884e595015cef5bfa06f7e88508f1ef5b761ec4939629cbab45eb119ed0e6
3
+ size 186421762
fix_and_export.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
info-default-validation.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0596c9fa6f3d81a3a2f625deb7661b847ead7d85860902fd2045333adfba12a
3
+ size 3388