Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
b4896a7
·
verified ·
1 Parent(s): 36205fa

Upload vlogqa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vlogqa.py +208 -0
vlogqa.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import (SCHEMA_TO_FEATURES, TASK_TO_SCHEMA,
24
+ Licenses, Tasks)
25
+
26
+ _CITATION = """\
27
+ @inproceedings{ngo-etal-2024-vlogqa,
28
+ title = "{V}log{QA}: Task, Dataset, and Baseline Models for {V}ietnamese Spoken-Based Machine Reading Comprehension",
29
+ author = "Ngo, Thinh and
30
+ Dang, Khoa and
31
+ Luu, Son and
32
+ Nguyen, Kiet and
33
+ Nguyen, Ngan",
34
+ editor = "Graham, Yvette and
35
+ Purver, Matthew",
36
+ booktitle = "Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers)",
37
+ month = mar,
38
+ year = "2024",
39
+ address = "St. Julian{'}s, Malta",
40
+ publisher = "Association for Computational Linguistics",
41
+ url = "https://aclanthology.org/2024.eacl-long.79",
42
+ pages = "1310--1324",
43
+ }
44
+ """
45
+
46
+ _DATASETNAME = "vlogqa"
47
+
48
+ _DESCRIPTION = """\
49
+ VlogQA is a Vietnamese spoken language corpus for machine reading comprehension. It
50
+ consists of 10,076 question-answer pairs based on 1,230 transcript documents sourced from
51
+ YouTube videos around food and travel.
52
+ """
53
+
54
+ _HOMEPAGE = "https://github.com/sonlam1102/vlogqa"
55
+
56
+ _LANGUAGES = ["vie"]
57
+
58
+ _LICENSE = f"""{Licenses.OTHERS.value} |
59
+ The user of VlogQA developed by the NLP@UIT research group must respect the following
60
+ terms and conditions:
61
+ 1. The dataset is only used for non-profit research for natural language processing and
62
+ education.
63
+ 2. The dataset is not allowed to be used in commercial systems.
64
+ 3. Do not redistribute the dataset. This dataset may be modified or improved to serve a
65
+ research purpose better, but the edited dataset may not be distributed.
66
+ 4. Summaries, analyses, and interpretations of the properties of the dataset may be
67
+ derived and published, provided it is not possible to reconstruct the information from
68
+ these summaries.
69
+ 5. Published research works that use the dataset must cite the following paper:
70
+ Thinh Ngo, Khoa Dang, Son Luu, Kiet Nguyen, and Ngan Nguyen. 2024. VlogQA: Task,
71
+ Dataset, and Baseline Models for Vietnamese Spoken-Based Machine Reading Comprehension.
72
+ In Proceedings of the 18th Conference of the European Chapter of the Association for
73
+ Computational Linguistics (Volume 1: Long Papers), pages 1310–1324, St. Julian’s,
74
+ Malta. Association for Computational Linguistics.
75
+ """
76
+
77
+ _LOCAL = True # need to signed a user agreement, see _HOMEPAGE
78
+
79
+ _URLS = {} # local dataset
80
+
81
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
82
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # qa
83
+
84
+ _SOURCE_VERSION = "1.0.0"
85
+
86
+ _SEACROWD_VERSION = "2024.06.20"
87
+
88
+
89
+ class VlogQADataset(datasets.GeneratorBasedBuilder):
90
+ """Vietnamese spoken language corpus around food and travel for machine reading comprehension"""
91
+
92
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
93
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
94
+
95
+ BUILDER_CONFIGS = [
96
+ SEACrowdConfig(
97
+ name=f"{_DATASETNAME}_source",
98
+ version=SOURCE_VERSION,
99
+ description=f"{_DATASETNAME} source schema",
100
+ schema="source",
101
+ subset_id=_DATASETNAME,
102
+ ),
103
+ SEACrowdConfig(
104
+ name=f"{_DATASETNAME}_{_SEACROWD_SCHEMA}",
105
+ version=SEACROWD_VERSION,
106
+ description=f"{_DATASETNAME} SEACrowd schema",
107
+ schema=_SEACROWD_SCHEMA,
108
+ subset_id=_DATASETNAME,
109
+ ),
110
+ ]
111
+
112
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
113
+
114
+ def _info(self) -> datasets.DatasetInfo:
115
+ if self.config.schema == "source":
116
+ features = datasets.Features(
117
+ {
118
+ "id": datasets.Value("string"),
119
+ "title": datasets.Value("string"),
120
+ "context": datasets.Value("string"),
121
+ "question": datasets.Value("string"),
122
+ "answers": datasets.Sequence(
123
+ {
124
+ "text": datasets.Value("string"),
125
+ "answer_start": datasets.Value("int32"),
126
+ }
127
+ ),
128
+ }
129
+ )
130
+ elif self.config.schema == _SEACROWD_SCHEMA:
131
+ features = SCHEMA_TO_FEATURES[TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]] # qa_features
132
+ features["meta"] = {
133
+ "answers_start": datasets.Sequence(datasets.Value("int32")),
134
+ }
135
+
136
+ return datasets.DatasetInfo(
137
+ description=_DESCRIPTION,
138
+ features=features,
139
+ homepage=_HOMEPAGE,
140
+ license=_LICENSE,
141
+ citation=_CITATION,
142
+ )
143
+
144
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
145
+ """Returns SplitGenerators."""
146
+ if self.config.data_dir is None:
147
+ raise ValueError("This is a local dataset. Please pass the `data_dir` kwarg (where the .json is located) to load_dataset.")
148
+ else:
149
+ data_dir = Path(self.config.data_dir)
150
+
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ gen_kwargs={
155
+ "file_path": data_dir / "train.json",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.VALIDATION,
160
+ gen_kwargs={
161
+ "file_path": data_dir / "dev.json",
162
+ },
163
+ ),
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.TEST,
166
+ gen_kwargs={
167
+ "file_path": data_dir / "test.json",
168
+ },
169
+ ),
170
+ ]
171
+
172
+ def _generate_examples(self, file_path: Path) -> Tuple[int, Dict]:
173
+ """Yields examples as (key, example) tuples."""
174
+ with open(file_path, "r", encoding="utf-8") as file:
175
+ data = json.load(file)
176
+
177
+ key = 0
178
+ for example in data["data"]:
179
+
180
+ if self.config.schema == "source":
181
+ for paragraph in example["paragraphs"]:
182
+ for qa in paragraph["qas"]:
183
+ yield key, {
184
+ "id": qa["id"],
185
+ "title": example["title"],
186
+ "context": paragraph["context"],
187
+ "question": qa["question"],
188
+ "answers": qa["answers"],
189
+ }
190
+ key += 1
191
+
192
+ elif self.config.schema == _SEACROWD_SCHEMA:
193
+ for paragraph in example["paragraphs"]:
194
+ for qa in paragraph["qas"]:
195
+ yield key, {
196
+ "id": str(key),
197
+ "question_id": qa["id"],
198
+ "document_id": example["title"],
199
+ "question": qa["question"],
200
+ "type": None,
201
+ "choices": [], # escape multiple_choice qa seacrowd test, can't be None
202
+ "context": paragraph["context"],
203
+ "answer": [answer["text"] for answer in qa["answers"]],
204
+ "meta": {
205
+ "answers_start": [answer["answer_start"] for answer in qa["answers"]],
206
+ },
207
+ }
208
+ key += 1