Commit
·
5457a22
1
Parent(s):
b95d181
add vizwiz
Browse files
VizWiz.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
import datasets
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
_CITATION = """
|
| 8 |
+
@misc{gurari2018vizwiz,
|
| 9 |
+
title={VizWiz Grand Challenge: Answering Visual Questions from Blind People},
|
| 10 |
+
author={Danna Gurari and Qing Li and Abigale J. Stangl and Anhong Guo and Chi Lin and Kristen Grauman and Jiebo Luo and Jeffrey P. Bigham},
|
| 11 |
+
year={2018},
|
| 12 |
+
eprint={1802.08218},
|
| 13 |
+
archivePrefix={arXiv},
|
| 14 |
+
primaryClass={cs.CV}
|
| 15 |
+
}
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
_HOMEPAGE = "https://vizwiz.org/tasks-and-datasets/vqa/"
|
| 19 |
+
|
| 20 |
+
_DESCRIPTION = """
|
| 21 |
+
The VizWiz-VQA dataset originates from a natural visual question answering setting where blind people
|
| 22 |
+
each took an image and recorded a spoken question about it, together with 10 crowdsourced answers per
|
| 23 |
+
visual question. The proposed challenge addresses the following two tasks for this dataset: predict the
|
| 24 |
+
answer to a visual question and (2) predict whether a visual question cannot be answered.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
_LICENSE = " Creative Commons Attribution 4.0 International License."
|
| 28 |
+
|
| 29 |
+
_DATA_URL = {"train" : "https://vizwiz.cs.colorado.edu/VizWiz_final/images/train.zip",
|
| 30 |
+
"test" : "https://vizwiz.cs.colorado.edu/VizWiz_final/images/test.zip",
|
| 31 |
+
"val" : "https://vizwiz.cs.colorado.edu/VizWiz_final/images/val.zip" }
|
| 32 |
+
|
| 33 |
+
_ANNOTATION_URL = "https://vizwiz.cs.colorado.edu/VizWiz_final/vqa_data/Annotations.zip"
|
| 34 |
+
|
| 35 |
+
_FEATURES = datasets.Features(
|
| 36 |
+
{
|
| 37 |
+
"id" : datasets.Value("int32"),
|
| 38 |
+
"image": datasets.Image(),
|
| 39 |
+
"filename": datasets.Value("string"),
|
| 40 |
+
"question": datasets.Value("string"),
|
| 41 |
+
"answers": datasets.Sequence(datasets.Value("string")),
|
| 42 |
+
"answers_original": [
|
| 43 |
+
{
|
| 44 |
+
"answer": datasets.Value("string"),
|
| 45 |
+
"answer_confidence": datasets.Value("string"),
|
| 46 |
+
}
|
| 47 |
+
],
|
| 48 |
+
"answer_type": datasets.Value("string"),
|
| 49 |
+
"answerable": datasets.Value("int32")
|
| 50 |
+
}
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class VizWiz(datasets.GeneratorBasedBuilder):
|
| 55 |
+
VERSION = datasets.Version("1.0.0")
|
| 56 |
+
def _info(self):
|
| 57 |
+
return datasets.DatasetInfo(
|
| 58 |
+
description = _DESCRIPTION,
|
| 59 |
+
features = _FEATURES,
|
| 60 |
+
homepage = _HOMEPAGE,
|
| 61 |
+
license = _LICENSE,
|
| 62 |
+
citation = _CITATION,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def _split_generators(self, dl_manager):
|
| 66 |
+
ann_file_train = os.path.join(dl_manager.download_and_extract(_ANNOTATION_URL), "train.json")
|
| 67 |
+
ann_file_val = os.path.join(dl_manager.download_and_extract(_ANNOTATION_URL), "val.json")
|
| 68 |
+
ann_file_test = os.path.join(dl_manager.download_and_extract(_ANNOTATION_URL), "test.json")
|
| 69 |
+
image_folders = {k: Path(v) for k, v in dl_manager.download_and_extract(_DATA_URL).items()}
|
| 70 |
+
|
| 71 |
+
return [
|
| 72 |
+
datasets.SplitGenerator(
|
| 73 |
+
name=datasets.Split.TRAIN,
|
| 74 |
+
gen_kwargs={
|
| 75 |
+
"annotation_file": ann_file_train,
|
| 76 |
+
"image_folders": image_folders,
|
| 77 |
+
"split_key": 'train'
|
| 78 |
+
},
|
| 79 |
+
),
|
| 80 |
+
datasets.SplitGenerator(
|
| 81 |
+
name=datasets.Split.VALIDATION,
|
| 82 |
+
gen_kwargs={
|
| 83 |
+
"annotation_file": ann_file_val,
|
| 84 |
+
"image_folders": image_folders,
|
| 85 |
+
"split_key": "val"
|
| 86 |
+
},
|
| 87 |
+
),
|
| 88 |
+
datasets.SplitGenerator(
|
| 89 |
+
name=datasets.Split.TEST,
|
| 90 |
+
gen_kwargs={
|
| 91 |
+
"annotation_file": ann_file_test,
|
| 92 |
+
"image_folders": image_folders,
|
| 93 |
+
"split_key": "test"
|
| 94 |
+
},
|
| 95 |
+
),
|
| 96 |
+
]
|
| 97 |
+
|
| 98 |
+
def _generate_examples(self, annotation_file,image_folders,split_key):
|
| 99 |
+
counter = 0
|
| 100 |
+
annotations = json.load(open(annotation_file))
|
| 101 |
+
for ann in annotations:
|
| 102 |
+
if split_key in ['train','val']:
|
| 103 |
+
answers = [answer["answer"] for answer in ann["answers"]]
|
| 104 |
+
answers_original = ann['answers']
|
| 105 |
+
answer_type = ann["answer_type"]
|
| 106 |
+
answerable = ann["answerable"]
|
| 107 |
+
|
| 108 |
+
else:
|
| 109 |
+
|
| 110 |
+
answers = None
|
| 111 |
+
answers_original = None
|
| 112 |
+
answer_type = None
|
| 113 |
+
answerable = None
|
| 114 |
+
|
| 115 |
+
yield counter, {
|
| 116 |
+
"id" : counter,
|
| 117 |
+
"image": str(image_folders[split_key]/split_key/ann['image']),
|
| 118 |
+
"filename" : ann['image'],
|
| 119 |
+
"question" : ann["question"],
|
| 120 |
+
"answers" : answers,
|
| 121 |
+
"answers_original" : answers_original,
|
| 122 |
+
"answer_type" : answer_type,
|
| 123 |
+
"answerable" : answerable
|
| 124 |
+
}
|
| 125 |
+
counter += 1
|