Create new file
Browse files- inquisitiveqg.py +163 -0
inquisitiveqg.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2020 HuggingFace Datasets Authors.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
# Lint as: python3
|
| 17 |
+
"""Inquisitive Question Generation for High Level Text Comprehension"""
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
import itertools
|
| 21 |
+
|
| 22 |
+
import datasets
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
_CITATION = """\
|
| 26 |
+
@InProceedings{ko2020inquisitive,
|
| 27 |
+
author = {Ko, Wei-Jen and Chen, Te-Yuan and Huang, Yiyan and Durrett, Greg and Li, Junyi Jessy},
|
| 28 |
+
title = {Inquisitive Question Generation for High Level Text Comprehension},
|
| 29 |
+
booktitle = {Proceedings of EMNLP},
|
| 30 |
+
year = {2020},
|
| 31 |
+
}
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
_DESCRIPTION = """\
|
| 35 |
+
A dataset of about 20k questions that are elicited from readers as they naturally read through a document sentence by sentence. \
|
| 36 |
+
Compared to existing datasets, INQUISITIVE questions target more towards high-level (semantic and discourse) comprehension of text. \
|
| 37 |
+
Because these questions are generated while the readers are processing the information, the questions directly communicate gaps between \
|
| 38 |
+
the reader’s and writer’s knowledge about the events described in the text, and are not necessarily answered in the document itself. \
|
| 39 |
+
This type of question reflects a real-world scenario: if one has questions during reading, some of them are answered by the text later on, \
|
| 40 |
+
the rest are not, but any of them would help further the reader’s understanding at the particular point when they asked it. \
|
| 41 |
+
This resource could enable question generation models to simulate human-like curiosity and cognitive processing, which may open up a new realm of applications.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
_ARTICLES_URL = "https://github.com/wjko2/INQUISITIVE/raw/d0bc26276851fdcd375ec5c91fb2a593a2171a8e/article.zip"
|
| 45 |
+
_QUESTIONS_URL = "https://github.com/wjko2/INQUISITIVE/raw/master/questions.txt"
|
| 46 |
+
|
| 47 |
+
ALL_ARTICLE_IDS = list(range(1, 1501))
|
| 48 |
+
DEV_ARTICLE_IDS = list(itertools.chain(range(1, 101), range(1051, 1101)))
|
| 49 |
+
TEST_ARTICLE_IDS = list(itertools.chain(range(101, 151), range(501, 551), range(1101, 1151)))
|
| 50 |
+
DEV_AND_TEST_IDS = DEV_ARTICLE_IDS + TEST_ARTICLE_IDS
|
| 51 |
+
TRAIN_ARTICLE_IDS = [id_ for id_ in ALL_ARTICLE_IDS if id_ not in DEV_AND_TEST_IDS]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class InquisitiveQgConfig(datasets.BuilderConfig):
|
| 55 |
+
"""BuilderConfig for INQUISITIVE."""
|
| 56 |
+
|
| 57 |
+
def __init__(self, **kwrags):
|
| 58 |
+
"""BuilderConfig for INQUISITIVE.
|
| 59 |
+
Args:
|
| 60 |
+
**kwargs: keyword arguments forwarded to super.
|
| 61 |
+
"""
|
| 62 |
+
super(InquisitiveQgConfig, self).__init__(**kwrags)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class InquisitiveQg(datasets.GeneratorBasedBuilder):
|
| 66 |
+
"""Inquisitive Question Generation for High Level Text Comprehension"""
|
| 67 |
+
|
| 68 |
+
VERSION = datasets.Version("1.0.0")
|
| 69 |
+
BUILDER_CONFIGS = [
|
| 70 |
+
InquisitiveQgConfig(name="plain_text", version=datasets.Version("1.0.0", ""), description="plain_text"),
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
def _info(self):
|
| 74 |
+
return datasets.DatasetInfo(
|
| 75 |
+
description=_DESCRIPTION,
|
| 76 |
+
features=datasets.Features(
|
| 77 |
+
{
|
| 78 |
+
"id": datasets.Value("int32"),
|
| 79 |
+
"article_id": datasets.Value("int32"),
|
| 80 |
+
"article": datasets.Value("string"),
|
| 81 |
+
"sentence_id": datasets.Value("int32"),
|
| 82 |
+
"sentence": datasets.Value("string"),
|
| 83 |
+
"span": datasets.Value("string"),
|
| 84 |
+
"question": datasets.Value("string"),
|
| 85 |
+
"span_start_position": datasets.Value("int32"),
|
| 86 |
+
"span_end_position": datasets.Value("int32"),
|
| 87 |
+
}
|
| 88 |
+
),
|
| 89 |
+
supervised_keys=None,
|
| 90 |
+
homepage="https://github.com/wjko2/INQUISITIVE",
|
| 91 |
+
citation=_CITATION,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
def _split_generators(self, dl_manager):
|
| 95 |
+
questions_file = dl_manager.download(_QUESTIONS_URL)
|
| 96 |
+
archive = dl_manager.download(_ARTICLES_URL)
|
| 97 |
+
articles_dir = "article"
|
| 98 |
+
|
| 99 |
+
return [
|
| 100 |
+
datasets.SplitGenerator(
|
| 101 |
+
name=datasets.Split.TRAIN,
|
| 102 |
+
gen_kwargs={
|
| 103 |
+
"articles_dir": articles_dir,
|
| 104 |
+
"questions_file": questions_file,
|
| 105 |
+
"article_ids": TRAIN_ARTICLE_IDS,
|
| 106 |
+
"files": dl_manager.iter_archive(archive),
|
| 107 |
+
},
|
| 108 |
+
),
|
| 109 |
+
datasets.SplitGenerator(
|
| 110 |
+
name=datasets.Split.VALIDATION,
|
| 111 |
+
gen_kwargs={
|
| 112 |
+
"articles_dir": articles_dir,
|
| 113 |
+
"questions_file": questions_file,
|
| 114 |
+
"article_ids": DEV_ARTICLE_IDS,
|
| 115 |
+
"files": dl_manager.iter_archive(archive),
|
| 116 |
+
},
|
| 117 |
+
),
|
| 118 |
+
datasets.SplitGenerator(
|
| 119 |
+
name=datasets.Split.TEST,
|
| 120 |
+
gen_kwargs={
|
| 121 |
+
"articles_dir": articles_dir,
|
| 122 |
+
"questions_file": questions_file,
|
| 123 |
+
"article_ids": TEST_ARTICLE_IDS,
|
| 124 |
+
"files": dl_manager.iter_archive(archive),
|
| 125 |
+
},
|
| 126 |
+
),
|
| 127 |
+
]
|
| 128 |
+
|
| 129 |
+
def _generate_examples(self, articles_dir, questions_file, article_ids, files):
|
| 130 |
+
articles = {}
|
| 131 |
+
for path, f in files:
|
| 132 |
+
articles[path] = f.read().decode("utf-8")
|
| 133 |
+
with open(questions_file, encoding="utf-8") as f:
|
| 134 |
+
questions_counter = 0
|
| 135 |
+
rows = f.readlines()
|
| 136 |
+
for i, row in enumerate(rows):
|
| 137 |
+
if i == 0:
|
| 138 |
+
continue # skip header line
|
| 139 |
+
row = row.strip()
|
| 140 |
+
cols = row.split("\t")
|
| 141 |
+
|
| 142 |
+
article_id = int(cols[0])
|
| 143 |
+
if article_id not in article_ids:
|
| 144 |
+
continue
|
| 145 |
+
|
| 146 |
+
fname = str(article_id).rjust(4, "0") + ".txt"
|
| 147 |
+
article_path = articles_dir + "/" + fname
|
| 148 |
+
article = articles[article_path]
|
| 149 |
+
|
| 150 |
+
id_ = str(questions_counter)
|
| 151 |
+
example = {
|
| 152 |
+
"article_id": article_id,
|
| 153 |
+
"sentence_id": int(cols[1]),
|
| 154 |
+
"sentence": cols[2],
|
| 155 |
+
"span": cols[3],
|
| 156 |
+
"question": cols[4],
|
| 157 |
+
"span_start_position": cols[5],
|
| 158 |
+
"span_end_position": cols[6],
|
| 159 |
+
"id": id_,
|
| 160 |
+
"article": article,
|
| 161 |
+
}
|
| 162 |
+
yield id_, example
|
| 163 |
+
questions_counter += 1
|