PlotQA_dataset / PlotQA_dataset.py
Dodon's picture
Update PlotQA_dataset.py
0d448b3
# Builder script
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
# Copyright (c) PlotQA.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
"""PlotQA dataset"""
import copy
import json
import os
import pandas as pd
# importing the "tarfile" module
import tarfile
import datasets
from datasets import load_dataset
_CITATION = """\
@inproceedings{methani2020plotqa,
title={Plotqa: Reasoning over scientific plots},
author={Methani, Nitesh and Ganguly, Pritha and Khapra, Mitesh M and Kumar, Pratyush},
booktitle={Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision},
pages={1527--1536},
year={2020}
}
"""
_DESCRIPTION = """\
PlotQA dataset
Chart images, tables, image annotations, questions, answers
"""
_LICENSE = "CC-BY-4.0 license"
_SPLITS = ["train", "val", "test"]
_URL = "https://huggingface.co/datasets/Dodon/PlotQA_dataset/resolve/main/PlotQA.zip"
class PlotQA(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"imgname": datasets.Value("string"),
#"image": datasets.Image(),
#"version": datasets.Value("string"),
"query": datasets.Sequence(datasets.Value("string")),
"query_token": datasets.Sequence(datasets.Value("string")),
"label": datasets.Sequence(datasets.Value("string")),
"img_ann": datasets.Value("string"),
###
"image_index": datasets.Value("string"),
#"question_string": datasets.Value("string"),
#"answer": datasets.Value("string"),
#"answer_bbox": datasets.Value("string"),
#"template": datasets.Value("string"),
#"type": datasets.Value("string"),
#This format required change
## "table_name": datasets.Value("string"),
## "table": datasets.Value("string"),
#"table": datasets.table.Table(),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
downloaded_file = dl_manager.download_and_extract(_URL) + "/PlotQA"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"annotation_v1_path": downloaded_file + "/train/qa_pairs_V1.json",
"annotation_v2_path": downloaded_file + "/train/qa_pairs_V2.json",
"images_path": downloaded_file + "/train/png.tar.gz",
"img_anno_path": downloaded_file + "/train/annotations.json",
##"table_path": downloaded_file + "/train/tables",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"annotation_v1_path": downloaded_file + "/validation/qa_pairs_V1.json",
"annotation_v2_path": downloaded_file + "/validation/qa_pairs_V2.json",
"images_path": downloaded_file + "/validation/png.tar.gz",
"img_anno_path": downloaded_file + "/validation/annotations.json",
##"table_path": downloaded_file + "/train/tables",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"annotation_v1_path": downloaded_file + "/test/qa_pairs_V1.json",
"annotation_v2_path": downloaded_file + "/test/qa_pairs_V2.json",
"images_path": downloaded_file + "/test/png.tar.gz",
"img_anno_path": downloaded_file + "/test/annotations.json",
##"table_path": downloaded_file + "/train/tables",
},
),
]
# def find_qa(self, annotation_v1_path:str, annotation_v2_path:str, img_idx:int):
# with open(annotation_v1_path, "r", encoding="utf-8") as v1:
# data_v1 = json.load(v1)
# with open(annotation_v2_path, "r", encoding="utf-8") as v2:
# data_v2 = json.load(v2)
# #1.
# version = 1
# _temp_item = []
# for ele in data_v1:
# if ele['image_index'] == img_idx:
# _temp_item.append(ele)
# for ele in data_v2:
# if ele['image_index'] == img_idx:
# _temp_item.append(ele)
# return _temp_item
def _generate_examples(self, annotation_v1_path:str, annotation_v2_path:str, img_anno_path:str ,images_path: str):
#Load image folder
# open file
## file = tarfile.open(images_path)
# extracting file
## file.extractall('./imgs')
## file.close()
_multi_anno = [annotation_v1_path, annotation_v2_path]
with open(_multi_anno, "r", encoding="utf-8") as v:
data = json.load(v)
idx = 0
with open(img_anno_path, "r", encoding="utf-8") as a:
img_data = json.load(a)
for ele in img_data:
item = {}
item["img_ann"] = ele
item["imgname"] = str(ele['image_index'])+'.png'
# item['image'] = os.path.join('./imgs',item['imgname'])
item["query_token"] = []
_temp_item = []
for element in data_v1:
if element['image_index'] == ele['image_index']:
_temp_item.append(element)
#qa_returns = find_qa(annotation_v1_path,annotation_v2_path, item['image_index'])
_question = []
_label = []
for pair in qa_returns:
_question.append(pair['question_string'])
_label.append(pair["answer"])
# file = os.path.splitext(file_name)
# if file == "test_augmented":
# item["human"] = False
# else:
# item["human"] = True
# img_anot_file = os.path.splitext(item["imgname"])[0]+'.json'
# img_anot = os.path.join(img_anno_path, img_anot_file)
# with open(img_anot) as f:
# item["img_ann"] = json.load(f)
"""
item['table'] = os.path.join(images_path,item["imgname"])
# annotation
item["img_anno"] = load json file...
t_path = os.path.join(table_path,item["table_name"])
table_data = load_dataset("csv", data_files=[t_path])
yield table_data
"""
yield idx, item
idx += 1