| """NVLR2 loading script.""" |
|
|
|
|
| import json |
| import os |
| import datasets |
|
|
|
|
| _CITATION = """\ |
| @article{DBLP:journals/corr/abs-2202-01994, |
| author = {Yamini Bansal and |
| Behrooz Ghorbani and |
| Ankush Garg and |
| Biao Zhang and |
| Maxim Krikun and |
| Colin Cherry and |
| Behnam Neyshabur and |
| Orhan Firat}, |
| title = {Data Scaling Laws in {NMT:} The Effect of Noise and Architecture}, |
| journal = {CoRR}, |
| volume = {abs/2202.01994}, |
| year = {2022}, |
| url = {https://arxiv.org/abs/2202.01994}, |
| eprinttype = {arXiv}, |
| eprint = {2202.01994}, |
| timestamp = {Mon, 24 Oct 2022 10:21:23 +0200}, |
| biburl = {https://dblp.org/rec/journals/corr/abs-2202-01994.bib}, |
| bibsource = {dblp computer science bibliography, https://dblp.org} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| The Natural Language for Visual Reasoning corpora are two language grounding datasets containing natural language sentences grounded in images. The task is to determine whether a sentence is true about a visual input. The data was collected through crowdsourcings, and solving the task requires reasoning about sets of objects, comparisons, and spatial relations. This includes two corpora: NLVR, with synthetically generated images, and NLVR2, which includes natural photographs. |
| """ |
|
|
| _HOMEPAGE = "https://lil.nlp.cornell.edu/nlvr/" |
|
|
| _LICENSE = "CC BY 4.0" |
|
|
| _URL_BASE = "https://raw.githubusercontent.com/lil-lab/nlvr/master/nlvr2/data/" |
| _SPLITS = { |
| |
| "validation": "dev.json", |
| |
| |
| } |
| _JZ_IMG_FOLDER_PATH = f"{os.environ['cnw_ALL_CCFRSCRATCH']}/local_datasets/nlvr2/" |
| _IMG_SPLITS = { |
| |
| "validation": "dev_nlvr2.tar.gz", |
| |
| |
| } |
|
|
|
|
| class NLVR2Dataset(datasets.GeneratorBasedBuilder): |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
|
|
| DEFAULT_CONFIG_NAME = "Default" |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "identifier": datasets.Value("string"), |
| "sentence": datasets.Value("string"), |
| "left_image": datasets.Image(), |
| "right_image": datasets.Image(), |
| "label": datasets.Value("string"), |
| } |
| ), |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| urls = { |
| "Default": { |
| |
| "validation": os.path.join(_URL_BASE, _SPLITS["validation"]), |
| |
| |
| }, |
| } |
| files_path = dl_manager.download_and_extract(urls) |
|
|
| images_files = { |
| |
| "validation": os.path.join(_JZ_IMG_FOLDER_PATH, _IMG_SPLITS["validation"]), |
| |
| |
| } |
| print(f"img_path:{images_files['validation']}") |
| |
| validation_img_path = os.path.join(dl_manager.extract(images_files["validation"]), "dev_nlvr2") |
| print(f"img_path2:{validation_img_path}") |
| print(f"{os.path.exists(os.path.join(validation_img_path,'dev-946-1-img0.png'))}") |
| |
| return [ |
| |
| |
| |
| |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={"files_paths": [files_path[self.config.name]["validation"]], "images_path": validation_img_path, "split_name": "dev"}, |
| ), |
| |
| |
| |
| |
| ] |
|
|
| def _generate_examples(self, files_paths, images_path, split_name): |
| idx = 0 |
| for path in files_paths: |
| for line in open(path).readlines(): |
| ex = json.loads(line) |
| common_img_identifier = ex["identifier"].split("-") |
| left_img_identifier = f"{split_name}-{common_img_identifier[1]}-{common_img_identifier[2]}-img0.png" |
| right_img_identifier = f"{split_name}-{common_img_identifier[1]}-{common_img_identifier[2]}-img1.png" |
| left_image_path = str(os.path.join(images_path, left_img_identifier)) |
| right_image_path = str(os.path.join(images_path, right_img_identifier)) |
| if os.path.exists(left_image_path) and os.path.exists(right_image_path): |
| record = { |
| "identifier": ex["identifier"], |
| "sentence": ex["sentence"], |
| "left_image": left_image_path, |
| "right_image": right_image_path, |
| "label": ex["label"], |
| } |
| idx += 1 |
| yield idx, record |
|
|