DrakuTheDragon commited on
Commit
6fa418d
·
1 Parent(s): cbce910

Create Test.py

Browse files
Files changed (1) hide show
  1. Test.py +105 -0
Test.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ from datasets.tasks import QuestionAnsweringExtractive
4
+
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _VERSION = "1.0.0"
8
+ _NAME = "qa_harvesting_from_wikipedia"
9
+ _DESCRIPTION = """QA pairs generated in https://aclanthology.org/P18-1177/"""
10
+ _CITATION = """
11
+ @inproceedings{du-cardie-2018-harvesting,
12
+ title = "Harvesting Paragraph-level Question-Answer Pairs from {W}ikipedia",
13
+ author = "Du, Xinya and
14
+ Cardie, Claire",
15
+ booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
16
+ month = jul,
17
+ year = "2018",
18
+ address = "Melbourne, Australia",
19
+ publisher = "Association for Computational Linguistics",
20
+ url = "https://aclanthology.org/P18-1177",
21
+ doi = "10.18653/v1/P18-1177",
22
+ pages = "1907--1917",
23
+ abstract = "We study the task of generating from Wikipedia articles question-answer pairs that cover content beyond a single sentence. We propose a neural network approach that incorporates coreference knowledge via a novel gating mechanism. As compared to models that only take into account sentence-level information (Heilman and Smith, 2010; Du et al., 2017; Zhou et al., 2017), we find that the linguistic knowledge introduced by the coreference representation aids question generation significantly, producing models that outperform the current state-of-the-art. We apply our system (composed of an answer span extraction system and the passage-level QG system) to the 10,000 top ranking Wikipedia articles and create a corpus of over one million question-answer pairs. We provide qualitative analysis for the this large-scale generated corpus from Wikipedia.",
24
+ }
25
+ """
26
+ _BASE_URL = "https://huggingface.co/datasets/lmqg/qa_harvesting_from_wikipedia/resolve/main/dataset"
27
+ _URLS = {
28
+ str(datasets.Split.TEST): f'{_BASE_URL}/test.json'
29
+ }
30
+
31
+
32
+ class QAHarvestingFromWikipediaConfig(datasets.BuilderConfig):
33
+ """BuilderConfig"""
34
+
35
+ def __init__(self, **kwargs):
36
+ """BuilderConfig
37
+ Args:
38
+ **kwargs: keyword arguments forwarded to super.
39
+ """
40
+ super(QAHarvestingFromWikipediaConfig, self).__init__(**kwargs)
41
+
42
+
43
+ class QAHarvestingFromWikipedia(datasets.GeneratorBasedBuilder):
44
+
45
+ BUILDER_CONFIGS = [
46
+ QAHarvestingFromWikipediaConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
47
+ ]
48
+
49
+ def _info(self):
50
+ return datasets.DatasetInfo(
51
+ description=_DESCRIPTION,
52
+ features=datasets.Features(
53
+ {
54
+ "id": datasets.Value("string"),
55
+ "title": datasets.Value("string"),
56
+ "context": datasets.Value("string"),
57
+ "question": datasets.Value("string"),
58
+ "answers": datasets.features.Sequence(
59
+ {
60
+ "text": datasets.Value("string"),
61
+ "answer_start": datasets.Value("int32"),
62
+ }
63
+ ),
64
+ }
65
+ ),
66
+ supervised_keys=None,
67
+ homepage="https://github.com/asahi417/lm-question-generation",
68
+ task_templates=[
69
+ QuestionAnsweringExtractive(
70
+ question_column="question", context_column="context", answers_column="answers"
71
+ )
72
+ ],
73
+ )
74
+
75
+ def _split_generators(self, dl_manager):
76
+ downloaded_file = dl_manager.download_and_extract(_URLS)
77
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[str(i)]})
78
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
79
+
80
+ def _generate_examples(self, filepath):
81
+ """This function returns the examples in the raw (text) form."""
82
+ logger.info("generating examples from = %s", filepath)
83
+ key = 0
84
+ with open(filepath, encoding="utf-8") as f:
85
+ squad = json.load(f)
86
+ for article in squad["data"]:
87
+ title = article.get("title", "")
88
+ for paragraph in article["paragraphs"]:
89
+ context = paragraph["context"] # do not strip leading blank spaces GH-2585
90
+ for qa in paragraph["qas"]:
91
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
92
+ answers = [answer["text"] for answer in qa["answers"]]
93
+ # Features currently used are "context", "question", and "answers".
94
+ # Others are extracted here for the ease of future expansions.
95
+ yield key, {
96
+ "title": title,
97
+ "context": context,
98
+ "question": qa["question"],
99
+ "id": qa["id"],
100
+ "answers": {
101
+ "answer_start": answer_starts,
102
+ "text": answers,
103
+ },
104
+ }
105
+ key += 1