mcemilg commited on
Commit
2e950c2
·
1 Parent(s): 94fc7d7

Upload tquad.py

Browse files
Files changed (1) hide show
  1. tquad.py +108 -0
tquad.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Turkish Question Answering Dataset"""
2
+
3
+
4
+ import json
5
+
6
+ import datasets
7
+ from datasets.tasks import QuestionAnsweringExtractive
8
+
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+ _CITATION = """"""
13
+
14
+ _DESCRIPTION = """"""
15
+
16
+ _URL = "https://raw.githubusercontent.com/TQuad/turkish-nlp-qa-dataset/master/"
17
+ _URLS = {
18
+ "train": _URL + "train-v0.1.json",
19
+ "dev": _URL + "dev-v0.1.json",
20
+ }
21
+
22
+
23
+ class TquadConfig(datasets.BuilderConfig):
24
+ """BuilderConfig for TQuad."""
25
+
26
+ def __init__(self, **kwargs):
27
+ """BuilderConfig for TQUAD.
28
+
29
+ Args:
30
+ **kwargs: keyword arguments forwarded to super.
31
+ """
32
+ super(TquadConfig, self).__init__(**kwargs)
33
+
34
+
35
+ class Tquad(datasets.GeneratorBasedBuilder):
36
+ """TQuad: Turkish Question Answering Dataset"""
37
+
38
+ BUILDER_CONFIGS = [
39
+ TquadConfig(
40
+ name="plain_text",
41
+ version=datasets.Version("1.0.0", ""),
42
+ description="Plain text",
43
+ ),
44
+ ]
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=datasets.Features(
50
+ {
51
+ "id": datasets.Value("string"),
52
+ "title": datasets.Value("string"),
53
+ "context": datasets.Value("string"),
54
+ "question": datasets.Value("string"),
55
+ "answers": datasets.features.Sequence(
56
+ {
57
+ "text": datasets.Value("string"),
58
+ "answer_start": datasets.Value("int32"),
59
+ }
60
+ ),
61
+ }
62
+ ),
63
+ # No default supervised_keys (as we have to pass both question
64
+ # and context as input).
65
+ supervised_keys=None,
66
+ homepage="https://github.com/TQuad/turkish-nlp-qa-dataset",
67
+ citation=_CITATION,
68
+ task_templates=[
69
+ QuestionAnsweringExtractive(
70
+ question_column="question", context_column="context", answers_column="answers"
71
+ )
72
+ ],
73
+ )
74
+
75
+ def _split_generators(self, dl_manager):
76
+ downloaded_files = dl_manager.download_and_extract(_URLS)
77
+
78
+ return [
79
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
80
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
81
+ ]
82
+
83
+ def _generate_examples(self, filepath):
84
+ """This function returns the examples in the raw (text) form."""
85
+ logger.info("generating examples from = %s", filepath)
86
+ key = 0
87
+ with open(filepath, encoding="utf-8") as f:
88
+ tquad = json.load(f)
89
+ for article in tquad["data"]:
90
+ title = article.get("title", "")
91
+ for paragraph in article["paragraphs"]:
92
+ context = paragraph["context"] # do not strip leading blank spaces GH-2585
93
+ for qa in paragraph["qas"]:
94
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
95
+ answers = [answer["text"] for answer in qa["answers"]]
96
+ # Features currently used are "context", "question", and "answers".
97
+ # Others are extracted here for the ease of future expansions.
98
+ yield key, {
99
+ "title": title,
100
+ "context": context,
101
+ "question": qa["question"],
102
+ "id": qa["id"],
103
+ "answers": {
104
+ "answer_start": answer_starts,
105
+ "text": answers,
106
+ },
107
+ }
108
+ key += 1