Zaid commited on
Commit
5a3b1df
·
1 Parent(s): 0963a26

Create coqa_expanded.py

Browse files
Files changed (1) hide show
  1. coqa_expanded.py +98 -0
coqa_expanded.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(coqa): Add a description here."""
2
+
3
+
4
+ import json
5
+
6
+ import datasets
7
+
8
+
9
+ # TODO(coqa): BibTeX citation
10
+ _CITATION = """\
11
+ @InProceedings{SivaAndAl:Coca,
12
+ author = {Siva, Reddy and Danqi, Chen and Christopher D., Manning},
13
+ title = {WikiQA: A Challenge Dataset for Open-Domain Question Answering},
14
+ journal = { arXiv},
15
+ year = {2018},
16
+
17
+ }
18
+ """
19
+
20
+ # TODO(coqa):
21
+ _DESCRIPTION = """\
22
+ CoQA: A Conversational Question Answering Challenge
23
+ """
24
+
25
+ _TRAIN_DATA_URL = "https://nlp.stanford.edu/data/coqa/coqa-train-v1.0.json"
26
+ _DEV_DATA_URL = "https://nlp.stanford.edu/data/coqa/coqa-dev-v1.0.json"
27
+
28
+
29
+ class Coqa(datasets.GeneratorBasedBuilder):
30
+ """TODO(coqa): Short description of my dataset."""
31
+
32
+ # TODO(coqa): Set up version.
33
+ VERSION = datasets.Version("1.0.0")
34
+
35
+ def _info(self):
36
+ # TODO(coqa): Specifies the datasets.DatasetInfo object
37
+ return datasets.DatasetInfo(
38
+ # This is the description that will appear on the datasets page.
39
+ description=_DESCRIPTION,
40
+ # datasets.features.FeatureConnectors
41
+ features=datasets.Features(
42
+ {
43
+ "source": datasets.Value("string"),
44
+ "story": datasets.Value("string"),
45
+ "question": datasets.Value("string"),
46
+ "answer":
47
+ {
48
+ "input_text": datasets.Value("string"),
49
+ "answer_start": datasets.Value("int32"),
50
+ "answer_end": datasets.Value("int32"),
51
+ }
52
+ ,
53
+ }
54
+ ),
55
+ # If there's a common (input, target) tuple from the features,
56
+ # specify them here. They'll be used if as_supervised=True in
57
+ # builder.as_dataset.
58
+ supervised_keys=None,
59
+ # Homepage of the dataset for documentation
60
+ homepage="https://stanfordnlp.github.io/coqa/",
61
+ citation=_CITATION,
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ """Returns SplitGenerators."""
66
+ # TODO(coqa): Downloads the data and defines the splits
67
+ # dl_manager is a datasets.download.DownloadManager that can be used to
68
+ # download and extract URLs
69
+ urls_to_download = {"train": _TRAIN_DATA_URL, "dev": _DEV_DATA_URL}
70
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
71
+
72
+ return [
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "split": "train"}
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "split": "validation"}
78
+ ),
79
+ ]
80
+
81
+ def _generate_examples(self, filepath, split):
82
+ """Yields examples."""
83
+ # TODO(coqa): Yields (key, example) tuples from the dataset
84
+ _id = 0
85
+ with open(filepath, encoding="utf-8") as f:
86
+ data = json.load(f)
87
+ for row in data["data"]:
88
+ story = row["story"]
89
+ source = row["source"]
90
+ for i,answer in enumerate(row['answers']):
91
+ question = row["questions"][i]["input_text"]
92
+ yield _id, {
93
+ "source": source,
94
+ "story": story,
95
+ "question": question ,
96
+ "answer": {"input_text": answer["input_text"], "answer_start": answer["span_start"], "answer_end": answer["span_end"]},
97
+ }
98
+ _id += 1