Dodon commited on
Commit
6876600
·
1 Parent(s): a89ff12

Update PlotQA_dataset.py

Browse files
Files changed (1) hide show
  1. PlotQA_dataset.py +186 -0
PlotQA_dataset.py CHANGED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Builder script
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ # MIT License
19
+
20
+ # Copyright (c) PlotQA.
21
+
22
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
23
+ # of this software and associated documentation files (the "Software"), to deal
24
+ # in the Software without restriction, including without limitation the rights
25
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
26
+ # copies of the Software, and to permit persons to whom the Software is
27
+ # furnished to do so, subject to the following conditions:
28
+
29
+ # The above copyright notice and this permission notice shall be included in all
30
+ # copies or substantial portions of the Software.
31
+
32
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38
+ # SOFTWARE
39
+ """PlotQA dataset"""
40
+
41
+ import copy
42
+ import json
43
+ import os
44
+ import pandas as pd
45
+
46
+
47
+ # importing the "tarfile" module
48
+ import tarfile
49
+ import datasets
50
+ from datasets import load_dataset
51
+
52
+ _CITATION = """\
53
+ @inproceedings{methani2020plotqa,
54
+ title={Plotqa: Reasoning over scientific plots},
55
+ author={Methani, Nitesh and Ganguly, Pritha and Khapra, Mitesh M and Kumar, Pratyush},
56
+ booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision},
57
+ pages={1527--1536},
58
+ year={2020}
59
+ }
60
+ """
61
+ _DESCRIPTION = """\
62
+ PlotQA dataset
63
+ Chart images, tables, image annotations, questions, answers
64
+ """
65
+
66
+ _LICENSE = "CC-BY-4.0 license"
67
+ _SPLITS = ["train", "val", "test"]
68
+
69
+ _URL = "https://huggingface.co/datasets/Dodon/PlotQA_dataset/resolve/main/PlotQA.zip"
70
+
71
+ class ChartQA(datasets.GeneratorBasedBuilder):
72
+
73
+ def _info(self):
74
+ features = datasets.Features(
75
+ {
76
+ "imgname": datasets.Value("string"),
77
+ "image": datasets.Image(),
78
+ "human": datasets.Value("bool"),
79
+ "query": datasets.Value("string"),
80
+ "query_token": datasets.Sequence(datasets.Value("string")),
81
+ "label": datasets.Value("string"),
82
+ "img_ann": datasets.Value("string"),
83
+ "qid": datasets.Value('int64')
84
+ #This format required change
85
+ ## "table_name": datasets.Value("string"),
86
+ ## "table": datasets.Value("string"),
87
+ #"table": datasets.table.Table(),
88
+ }
89
+ )
90
+
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=features,
94
+ supervised_keys=None,
95
+ license=_LICENSE,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ downloaded_file = dl_manager.download_and_extract(_URL) + "/PlotQA"
100
+
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ gen_kwargs={
105
+ "annotation_v1_path": downloaded_file + "/train/qa_pairs_V1.json",
106
+ "annotation_v2_path": downloaded_file + "/train/qa_pairs_V2.json",
107
+ "images_path": downloaded_file + "/train/png.tar.gz",
108
+ "img_anno_path": downloaded_file + "/train/annotations.json",
109
+ ##"table_path": downloaded_file + "/train/tables",
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.VALIDATION,
114
+ gen_kwargs={
115
+ "annotation_v1_path": downloaded_file + "/validation/qa_pairs_V1.json",
116
+ "annotation_v2_path": downloaded_file + "/validation/qa_pairs_V2.json",
117
+ "images_path": downloaded_file + "/validation/png.tar.gz",
118
+ "img_anno_path": downloaded_file + "/validation/annotations.json",
119
+ ##"table_path": downloaded_file + "/train/tables",
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TEST,
124
+ gen_kwargs={
125
+ "annotation_v1_path": downloaded_file + "/test/qa_pairs_V1.json",
126
+ "annotation_v2_path": downloaded_file + "/test/qa_pairs_V2.json",
127
+ "images_path": downloaded_file + "/test/png.tar.gz",
128
+ "img_anno_path": downloaded_file + "/test/annotations.json",
129
+ ##"table_path": downloaded_file + "/train/tables",
130
+ },
131
+ ),
132
+ ]
133
+
134
+ def _generate_examples(self, annotations_path:str, human_path:str, img_anno_path:str ,images_path: str):
135
+ #Load image folder
136
+ # open file
137
+ file = tarfile.open(images_path)
138
+ # extracting file
139
+ file.extractall('./imgs')
140
+ file.close()
141
+ _multi_anno = [annotation_v1_path, annotation_v2_path]
142
+ idx = 0
143
+
144
+ ###
145
+ for anno_path in _multi_anno:
146
+ with open(anno_path, "r", encoding="utf-8") as f:
147
+ data = json.load(f)
148
+ #returns the examples in the raw in json file
149
+ for item in data:
150
+ item = copy.deepcopy(item)
151
+ item["image"] = os.path.join('./imgs',item["image_index"])
152
+ item["query_token"] = []
153
+ item["img_ann"] = item["question_string"]
154
+ item["label"] = item["answer"]
155
+
156
+
157
+
158
+ # item["image"] = os.path.join(images_path,item["imgname"])
159
+ # item["query_token"] = []
160
+ # item["table_name"] = os.path.splitext(item["imgname"])[0]+'.csv'
161
+ # #item["table"] = os.path.join(table_path,item[idx]["table_name"])
162
+ # file_name = os.path.basename(anno_path)
163
+ # #Table load
164
+ # df = pd.read_csv (os.path.join(table_path,item["imgname"].split('.')[0]+'.csv'))
165
+ # item["table"] = df.to_dict()
166
+
167
+ # file = os.path.splitext(file_name)
168
+ # if file == "test_augmented":
169
+ # item["human"] = False
170
+ # else:
171
+ # item["human"] = True
172
+
173
+ # img_anot_file = os.path.splitext(item["imgname"])[0]+'.json'
174
+ # img_anot = os.path.join(img_anno_path, img_anot_file)
175
+ # with open(img_anot) as f:
176
+ # item["img_ann"] = json.load(f)
177
+ """
178
+ item['table'] = os.path.join(images_path,item["imgname"])
179
+ # annotation
180
+ item["img_anno"] = load json file...
181
+ t_path = os.path.join(table_path,item["table_name"])
182
+ table_data = load_dataset("csv", data_files=[t_path])
183
+ yield table_data
184
+ """
185
+ yield idx, item
186
+ idx += 1