Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
9f09587
·
1 Parent(s): 85bccff

Update qag_squad.py

Browse files
Files changed (1) hide show
  1. qag_squad.py +70 -0
qag_squad.py CHANGED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ logger = datasets.logging.get_logger(__name__)
5
+ _VERSION = "1.0.1"
6
+ _NAME = "qag_squad"
7
+ _CITATION = """
8
+ TBA
9
+ """
10
+ _DESCRIPTION = """Question & answer generation dataset based on SQuAD."""
11
+ _URL = f"https://huggingface.co/datasets/lmqg/{_NAME}/resolve/main/data/processed"
12
+ _URLS = {
13
+ 'train': f'{_URL}/train.jsonl',
14
+ 'test': f'{_URL}/test.jsonl',
15
+ 'validation': f'{_URL}/validation.jsonl'
16
+ }
17
+
18
+
19
+ class QAGSQuADConfig(datasets.BuilderConfig):
20
+ """BuilderConfig"""
21
+
22
+ def __init__(self, **kwargs):
23
+ """BuilderConfig.
24
+ Args:
25
+ **kwargs: keyword arguments forwarded to super.
26
+ """
27
+ super(QAGSQuADConfig, self).__init__(**kwargs)
28
+
29
+
30
+ class QAGSQuAD(datasets.GeneratorBasedBuilder):
31
+
32
+ BUILDER_CONFIGS = [
33
+ QAGSQuADConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
34
+ ]
35
+
36
+ def _info(self):
37
+ return datasets.DatasetInfo(
38
+ description=_DESCRIPTION,
39
+ features=datasets.Features(
40
+ {
41
+ "answers": datasets.Sequence(datasets.Value("string")),
42
+ "questions": datasets.Sequence(datasets.Value("string")),
43
+ "paragraph": datasets.Value("string"),
44
+ "paragraph_id": datasets.Value("string"),
45
+ "questions_answers": datasets.Value("string")
46
+ }
47
+ ),
48
+ supervised_keys=None,
49
+ homepage="https://github.com/asahi417/lm-question-generation"
50
+ )
51
+
52
+ def _split_generators(self, dl_manager):
53
+ downloaded_file = dl_manager.download_and_extract(_URLS)
54
+ return [
55
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file["train"]}),
56
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_file["validation"]}),
57
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_file["test"]}),
58
+ ]
59
+
60
+ def _generate_examples(self, filepath):
61
+ _key = 0
62
+ logger.info("generating examples from = %s", filepath)
63
+ with open(filepath, encoding="utf-8") as f:
64
+ _list = f.read().split('\n')
65
+ if _list[-1] == '':
66
+ _list = _list[:-1]
67
+ for i in _list:
68
+ data = json.loads(i)
69
+ yield _key, data
70
+ _key += 1