SaulLu commited on
Commit
942378c
·
1 Parent(s): 3652be9

add working loading script

Browse files
Files changed (1) hide show
  1. AdVQA.py +173 -0
AdVQA.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """adVQA loading script."""
15
+
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+ from pathlib import Path
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @InProceedings{sheng2021human,
27
+ author = {Sheng, Sasha and Singh, Amanpreet and Goswami, Vedanuj and Magana, Jose Alberto Lopez and Galuba, Wojciech and Parikh, Devi and Kiela, Douwe},
28
+ title = {Human-Adversarial Visual Question Answering},
29
+ journal={arXiv preprint arXiv:2106.02280},
30
+ year = {2021},
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ This is v1.0 of the ADVQA dataset.
36
+ """
37
+
38
+ _HOMEPAGE = "https://adversarialvqa.org"
39
+
40
+ _LICENSE = "CC BY-NC 4.0" # In json file
41
+
42
+ _URLS = {
43
+ "questions": {
44
+ "val": "https://dl.fbaipublicfiles.com/advqa/v1_OpenEnded_mscoco_val2017_advqa_questions.json",
45
+ "test-dev": "https://dl.fbaipublicfiles.com/advqa/v1_OpenEnded_mscoco_testdev2015_advqa_questions.json",
46
+ },
47
+ "annotations": {
48
+ "val": "https://dl.fbaipublicfiles.com/advqa/v1_mscoco_val2017_advqa_annotations.json",
49
+ },
50
+ "images": {
51
+ "val": "http://images.cocodataset.org/zips/val2014.zip",
52
+ "test-dev": "http://images.cocodataset.org/zips/test2015.zip",
53
+ },
54
+ }
55
+ _SUB_FOLDER_OR_FILE_NAME = {
56
+ "questions": {
57
+ "val": None,
58
+ "test-dev": None,
59
+ },
60
+ "annotations": {
61
+ "val": None,
62
+ },
63
+ "images": {
64
+ "val": "val2014",
65
+ "test-dev": "test2015",
66
+ },
67
+ }
68
+
69
+
70
+ class VQAv2Dataset(datasets.GeneratorBasedBuilder):
71
+
72
+ VERSION = datasets.Version("1.0.0")
73
+
74
+ # BUILDER_CONFIGS = [
75
+ # datasets.BuilderConfig(name="v2", version=VERSION, description="TODO later"), coco version in-domain
76
+ # datasets.BuilderConfig(name="v1", version=VERSION, description="TODO later"), AVQA out-of-domain
77
+ # ]
78
+
79
+ def _info(self):
80
+ features = datasets.Features(
81
+ {
82
+ "answers": [
83
+ {
84
+ "answer": datasets.Value("string"),
85
+ "answer_id": datasets.Value("int64"),
86
+ }
87
+ ],
88
+ "image_id": datasets.Value("int64"),
89
+ "answer_type": datasets.Value("string"),
90
+ "question_id": datasets.Value("int64"),
91
+ "question": datasets.Value("string"),
92
+ "image": datasets.Image(),
93
+ }
94
+ )
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=features,
98
+ homepage=_HOMEPAGE,
99
+ license=_LICENSE,
100
+ citation=_CITATION,
101
+ )
102
+
103
+ def _split_generators(self, dl_manager):
104
+ # urls = _URLS[self.config.name] # TODO later
105
+ data_dir = dl_manager.download_and_extract(_URLS)
106
+ gen_kwargs = {}
107
+ for split_name in ["val", "test-dev"]:
108
+ gen_kwargs_per_split = {}
109
+ for dir_name in _URLS.keys():
110
+ sub_folder_or_file_name = _SUB_FOLDER_OR_FILE_NAME.get(dir_name, None).get(split_name, None)
111
+ if split_name in data_dir[dir_name] and sub_folder_or_file_name is not None:
112
+ path = Path(data_dir[dir_name][split_name]) / sub_folder_or_file_name
113
+ elif split_name in data_dir[dir_name]:
114
+ path = Path(data_dir[dir_name][split_name])
115
+ else:
116
+ path = None
117
+ gen_kwargs_per_split[f"{dir_name}_path"] = path
118
+ gen_kwargs[split_name] = gen_kwargs_per_split
119
+
120
+ return [
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.VALIDATION,
123
+ gen_kwargs=gen_kwargs["val"],
124
+ ),
125
+ datasets.SplitGenerator(
126
+ name="testdev",
127
+ gen_kwargs=gen_kwargs["test-dev"],
128
+ ),
129
+ ]
130
+
131
+ def _generate_examples(self, questions_path, annotations_path, images_path):
132
+ questions = json.load(open(questions_path, "r"))
133
+
134
+ if annotations_path is not None:
135
+ dataset = json.load(open(annotations_path, "r"))
136
+
137
+ qa = {ann["question_id"]: [] for ann in dataset["annotations"]}
138
+ for ann in dataset["annotations"]:
139
+ qa[ann["question_id"]] = ann
140
+
141
+ for question in questions["questions"]:
142
+ annotation = qa[question["question_id"]]
143
+ # some checks
144
+ assert len(set(question.keys()) ^ set(["image_id", "question", "question_id"])) == 0
145
+ assert (
146
+ len(
147
+ set(annotation.keys())
148
+ ^ set(
149
+ [
150
+ "answers",
151
+ "image_id",
152
+ "answer_type",
153
+ "question_id",
154
+ ]
155
+ )
156
+ )
157
+ == 0
158
+ )
159
+ record = question
160
+ record.update(annotation)
161
+ record["image"] = str(images_path / f"COCO_{images_path.name}_{record['image_id']:0>12}.jpg")
162
+ yield question["question_id"], record
163
+ else:
164
+ # No annotations for the test split
165
+ for question in questions["questions"]:
166
+ question.update(
167
+ {
168
+ "answers": None,
169
+ "answer_type": None,
170
+ }
171
+ )
172
+ question["image"] = str(images_path / f"COCO_{images_path.name}_{question['image_id']:0>12}.jpg")
173
+ yield question["question_id"], question