NLVR2 / NLVR2.py
Leyo's picture
take off test2
0b1046e
raw
history blame
4.02 kB
"""NVLR2 loading script."""
import json
import os
import datasets
_CITATION = """\
@article{DBLP:journals/corr/abs-2202-01994,
author = {Yamini Bansal and
Behrooz Ghorbani and
Ankush Garg and
Biao Zhang and
Maxim Krikun and
Colin Cherry and
Behnam Neyshabur and
Orhan Firat},
title = {Data Scaling Laws in {NMT:} The Effect of Noise and Architecture},
journal = {CoRR},
volume = {abs/2202.01994},
year = {2022},
url = {https://arxiv.org/abs/2202.01994},
eprinttype = {arXiv},
eprint = {2202.01994},
timestamp = {Mon, 24 Oct 2022 10:21:23 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2202-01994.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """\
The Natural Language for Visual Reasoning corpora are two language grounding datasets containing natural language sentences grounded in images. The task is to determine whether a sentence is true about a visual input. The data was collected through crowdsourcings, and solving the task requires reasoning about sets of objects, comparisons, and spatial relations. This includes two corpora: NLVR, with synthetically generated images, and NLVR2, which includes natural photographs.
"""
_HOMEPAGE = "https://lil.nlp.cornell.edu/nlvr/"
_LICENSE = "CC BY 4.0" # TODO need to credit both ms coco and vqa authors!
_URL_BASE = "https://github.com/lil-lab/nlvr/tree/master/nlvr2/data"
_SPLITS = {
"train": "train.json",
"validation": "dev.json",
"test1": "test1.json",
"test2": "test2.json",
}
class NLVR2Dataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = "Default"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"identifier": datasets.Value("string"),
"sentence": datasets.Value("string"),
"left_image": datasets.Image(),
"right_image": datasets.Image(),
"label": datasets.Value("string"),
}
),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = {
"Default": {
"train": os.path.join(_URL_BASE, _SPLITS["train"]),
"validation": os.path.join(_URL_BASE, _SPLITS["validation"]),
"test1": os.path.join(_URL_BASE, _SPLITS["test1"]),
"test2": os.path.join(_URL_BASE, _SPLITS["test2"]),
},
}
files_path = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"files_path": files_path[self.config.name]["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"files_path": files_path[self.config.name]["validation"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"files_path": files_path[self.config.name]["test1"]},
),
# datasets.SplitGenerator(
# name=datasets.Split.TEST2,
# gen_kwargs={"files_path": files_path[self.config.name]["test2"]},
# ),
]
def _generate_examples(self, files_path):
json_file = json.load(open(files_path, "r"))
for i, ex in enumerate(json_file):
record = {
"identifier": ex["identifier"],
"sentence": ex["sentence"],
"left_image": str(ex["left_url"]),
"right_image": str(ex["right_url"]),
"label": ex["label"],
}
yield i, record