NLVR2 / NLVR2.py
Leyo's picture
switch to tar
bd9f81a
raw
history blame
5.87 kB
"""NVLR2 loading script."""
import json
import os
import datasets
_CITATION = """\
@article{DBLP:journals/corr/abs-2202-01994,
author = {Yamini Bansal and
Behrooz Ghorbani and
Ankush Garg and
Biao Zhang and
Maxim Krikun and
Colin Cherry and
Behnam Neyshabur and
Orhan Firat},
title = {Data Scaling Laws in {NMT:} The Effect of Noise and Architecture},
journal = {CoRR},
volume = {abs/2202.01994},
year = {2022},
url = {https://arxiv.org/abs/2202.01994},
eprinttype = {arXiv},
eprint = {2202.01994},
timestamp = {Mon, 24 Oct 2022 10:21:23 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2202-01994.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """\
The Natural Language for Visual Reasoning corpora are two language grounding datasets containing natural language sentences grounded in images. The task is to determine whether a sentence is true about a visual input. The data was collected through crowdsourcings, and solving the task requires reasoning about sets of objects, comparisons, and spatial relations. This includes two corpora: NLVR, with synthetically generated images, and NLVR2, which includes natural photographs.
"""
_HOMEPAGE = "https://lil.nlp.cornell.edu/nlvr/"
_LICENSE = "CC BY 4.0"
_URL_BASE = "https://raw.githubusercontent.com/lil-lab/nlvr/master/nlvr2/data/"
_SPLITS = {
# "train": "train.json",
"validation": "dev.json",
# "test1": "test1.json",
# "test2": "test2.json",
}
_JZ_IMG_FOLDER_PATH = f"{os.environ['cnw_ALL_CCFRSCRATCH']}/local_datasets/nlvr2/"
_IMG_SPLITS = {
# "train": "train_nlvr2.tar.gz",
"validation": "dev_nlvr2.tar.gz",
# "test1": "test1_nlvr2.tar.gz",
# "test2": "test2_nlvr2.tar.gz",
}
class NLVR2Dataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = "Default"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"identifier": datasets.Value("string"),
"sentence": datasets.Value("string"),
"left_image": datasets.Image(),
"right_image": datasets.Image(),
"label": datasets.Value("string"),
}
),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = {
"Default": {
# "train": os.path.join(_URL_BASE, _SPLITS["train"]),
"validation": os.path.join(_URL_BASE, _SPLITS["validation"]),
# "test1": os.path.join(_URL_BASE, _SPLITS["test1"]),
# "test2": os.path.join(_URL_BASE, _SPLITS["test2"]),
},
}
files_path = dl_manager.download_and_extract(urls)
images_files = {
# "train": os.path.join(_JZ_IMG_FOLDER_PATH, _IMG_SPLITS["train"]),
"validation": os.path.join(_JZ_IMG_FOLDER_PATH, _IMG_SPLITS["validation"]),
# "test1": os.path.join(_JZ_IMG_FOLDER_PATH, _IMG_SPLITS["test1"]),
# "test2": os.path.join(_JZ_IMG_FOLDER_PATH, _IMG_SPLITS["test2"]),
}
print(f"img_path:{images_files['validation']}")
# train_img_path = os.path.join(dl_manager.extract(images_files["train"]), "train_nlvr2")
validation_img_path = os.path.join(dl_manager.extract(images_files["validation"]), "dev_nlvr2")
print(f"img_path2:{validation_img_path}")
print(f"{os.path.exists(os.path.join(validation_img_path,'dev-946-1-img0.png'))}")
# test1_img_path = os.path.join(dl_manager.extract(images_files["test1"]), "test1_nlvr2")
return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# gen_kwargs={"files_paths": [files_path[self.config.name]["train"]], "images_path": train_img_path, "split_name": "train"},
# ),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"files_paths": [files_path[self.config.name]["validation"]], "images_path": validation_img_path, "split_name": "dev"},
),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# gen_kwargs={"files_paths": [files_path[self.config.name]["test1"], files_path[self.config.name]["test2"]], "images_path": test1_img_path, "split_name": "test1"},
# ),
]
def _generate_examples(self, files_paths, images_path, split_name):
idx = 0
for path in files_paths:
for line in open(path).readlines():
ex = json.loads(line)
common_img_identifier = ex["identifier"].split("-")
left_img_identifier = f"{split_name}-{common_img_identifier[1]}-{common_img_identifier[2]}-img0.png"
right_img_identifier = f"{split_name}-{common_img_identifier[1]}-{common_img_identifier[2]}-img1.png"
left_image_path = str(os.path.join(images_path, left_img_identifier))
right_image_path = str(os.path.join(images_path, right_img_identifier))
if os.path.exists(left_image_path) and os.path.exists(right_image_path):
record = {
"identifier": ex["identifier"],
"sentence": ex["sentence"],
"left_image": left_image_path,
"right_image": right_image_path,
"label": ex["label"],
}
idx += 1
yield idx, record