Datasets:

Modalities:
Text
Sub-tasks:
extractive-qa
Languages:
English
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
8f1e728
·
1 Parent(s): 9396a0e

Create qa_squad.py

Browse files
Files changed (1) hide show
  1. qa_squad.py +107 -0
qa_squad.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ from datasets.tasks import QuestionAnsweringExtractive
4
+
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _VERSION = "0.0.0"
8
+ _NAME = "qa_squad"
9
+ _DESCRIPTION = """SQuAD with the train/validation/test split used in SQuAD QG"""
10
+ _CITATION = """
11
+ @inproceedings{du-cardie-2018-harvesting,
12
+ title = "Harvesting Paragraph-level Question-Answer Pairs from {W}ikipedia",
13
+ author = "Du, Xinya and
14
+ Cardie, Claire",
15
+ booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
16
+ month = jul,
17
+ year = "2018",
18
+ address = "Melbourne, Australia",
19
+ publisher = "Association for Computational Linguistics",
20
+ url = "https://aclanthology.org/P18-1177",
21
+ doi = "10.18653/v1/P18-1177",
22
+ pages = "1907--1917",
23
+ abstract = "We study the task of generating from Wikipedia articles question-answer pairs that cover content beyond a single sentence. We propose a neural network approach that incorporates coreference knowledge via a novel gating mechanism. As compared to models that only take into account sentence-level information (Heilman and Smith, 2010; Du et al., 2017; Zhou et al., 2017), we find that the linguistic knowledge introduced by the coreference representation aids question generation significantly, producing models that outperform the current state-of-the-art. We apply our system (composed of an answer span extraction system and the passage-level QG system) to the 10,000 top ranking Wikipedia articles and create a corpus of over one million question-answer pairs. We provide qualitative analysis for the this large-scale generated corpus from Wikipedia.",
24
+ }
25
+ """
26
+ _BASE_URL = "https://huggingface.co/datasets/lmqg/qa_squad/resolve/main/dataset"
27
+ _URLS = {
28
+ str(datasets.Split.TRAIN): f'{_BASE_URL}/train.json',
29
+ str(datasets.Split.VALIDATION): f'{_BASE_URL}/dev.json',
30
+ str(datasets.Split.TEST): f'{_BASE_URL}/test.json'
31
+ }
32
+
33
+
34
+ class QASquadConfig(datasets.BuilderConfig):
35
+ """BuilderConfig"""
36
+
37
+ def __init__(self, **kwargs):
38
+ """BuilderConfig
39
+ Args:
40
+ **kwargs: keyword arguments forwarded to super.
41
+ """
42
+ super(QASquadConfig, self).__init__(**kwargs)
43
+
44
+
45
+ class QASquad(datasets.GeneratorBasedBuilder):
46
+
47
+ BUILDER_CONFIGS = [
48
+ QASquadConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
49
+ ]
50
+
51
+ def _info(self):
52
+ return datasets.DatasetInfo(
53
+ description=_DESCRIPTION,
54
+ features=datasets.Features(
55
+ {
56
+ "id": datasets.Value("string"),
57
+ "title": datasets.Value("string"),
58
+ "context": datasets.Value("string"),
59
+ "question": datasets.Value("string"),
60
+ "answers": datasets.features.Sequence(
61
+ {
62
+ "text": datasets.Value("string"),
63
+ "answer_start": datasets.Value("int32"),
64
+ }
65
+ ),
66
+ }
67
+ ),
68
+ supervised_keys=None,
69
+ homepage="https://github.com/asahi417/lm-question-generation",
70
+ task_templates=[
71
+ QuestionAnsweringExtractive(
72
+ question_column="question", context_column="context", answers_column="answers"
73
+ )
74
+ ],
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+ downloaded_file = dl_manager.download_and_extract(_URLS)
79
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[str(i)]})
80
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
81
+
82
+ def _generate_examples(self, filepath):
83
+ """This function returns the examples in the raw (text) form."""
84
+ logger.info("generating examples from = %s", filepath)
85
+ key = 0
86
+ with open(filepath, encoding="utf-8") as f:
87
+ squad = json.load(f)
88
+ for article in squad["data"]:
89
+ title = article.get("title", "")
90
+ for paragraph in article["paragraphs"]:
91
+ context = paragraph["context"] # do not strip leading blank spaces GH-2585
92
+ for qa in paragraph["qas"]:
93
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
94
+ answers = [answer["text"] for answer in qa["answers"]]
95
+ # Features currently used are "context", "question", and "answers".
96
+ # Others are extracted here for the ease of future expansions.
97
+ yield key, {
98
+ "title": title,
99
+ "context": context,
100
+ "question": qa["question"],
101
+ "id": qa["id"],
102
+ "answers": {
103
+ "answer_start": answer_starts,
104
+ "text": answers,
105
+ },
106
+ }
107
+ key += 1