leviethoang commited on
Commit
e18e4bf
·
1 Parent(s): 97450da

Update VBVLSP.py

Browse files
Files changed (1) hide show
  1. VBVLSP.py +168 -0
VBVLSP.py CHANGED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ VinDataVLSP Dataset"""
16
+
17
+
18
+ import datasets
19
+ from datasets.tasks import AutomaticSpeechRecognition
20
+ import pandas as pd
21
+ import re
22
+
23
+
24
+ _DATA_URL = "https://dutudn-my.sharepoint.com/:u:/g/personal/122180028_sv1_dut_udn_vn/ESeeV5dFDtVKmnvwJA3jUd4BLLJ7DhpOwsyb8QwpldKHwQ?download=1"
25
+ _PROMPTS_URLS = {
26
+ "train": "https://drive.google.com/uc?export=download&id=1eOOvCDz0uOBBRzsHK7NALcGA70-XbQrd",
27
+ "test": "https://drive.google.com/uc?export=download&id=1r2wy5K0VL7wL_iMdtzMhGEy-_k3M2Gdv",
28
+ "validation": "https://drive.google.com/uc?export=download&id=1c0YsA4x1Up9qjDpsj1VKH_86m85cTi79"
29
+ }
30
+
31
+ _DESCRIPTION = """\
32
+ """
33
+
34
+ _LANGUAGES = {
35
+ "vi": {
36
+ "Language": "Vietnamese",
37
+ "Date": "2021-12-11",
38
+ "Size": "11 GB",
39
+ "Version": "vi_100h_2021-12-11",
40
+ },
41
+ }
42
+
43
+
44
+ class VinDataVLSPConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for CommonVoice."""
46
+
47
+ def __init__(self, name, sub_version, **kwargs):
48
+ """
49
+ Args:
50
+ data_dir: `string`, the path to the folder containing the files in the
51
+ downloaded .tar
52
+ citation: `string`, citation for the data set
53
+ url: `string`, url for information about the data set
54
+ **kwargs: keyword arguments forwarded to super.
55
+ """
56
+ self.sub_version = sub_version
57
+ self.language = kwargs.pop("language", None)
58
+ self.date_of_snapshot = kwargs.pop("date", None)
59
+ self.size = kwargs.pop("size", None)
60
+ self.validated_hr_total = kwargs.pop("val_hrs", None)
61
+ self.total_hr_total = kwargs.pop("total_hrs", None)
62
+ self.num_of_voice = kwargs.pop("num_of_voice", None)
63
+ description = ""
64
+ super(VinDataVLSPConfig, self).__init__(
65
+ name=name, version=datasets.Version("0.1.0", ""), description=description, **kwargs
66
+ )
67
+
68
+
69
+ class VinDataVLSP(datasets.GeneratorBasedBuilder):
70
+
71
+ DEFAULT_WRITER_BATCH_SIZE = 1000
72
+ BUILDER_CONFIGS = [
73
+ VinDataVLSPConfig(
74
+ name=lang_id,
75
+ language=_LANGUAGES[lang_id]["Language"],
76
+ sub_version=_LANGUAGES[lang_id]["Version"],
77
+ # date=_LANGUAGES[lang_id]["Date"],
78
+ # size=_LANGUAGES[lang_id]["Size"],
79
+ # val_hrs=_LANGUAGES[lang_id]["Validated_Hr_Total"],
80
+ # total_hrs=_LANGUAGES[lang_id]["Overall_Hr_Total"],
81
+ # num_of_voice=_LANGUAGES[lang_id]["Number_Of_Voice"],
82
+ )
83
+ for lang_id in _LANGUAGES.keys()
84
+ ]
85
+
86
+ def _info(self):
87
+ features = datasets.Features(
88
+ {
89
+ "file_path": datasets.Value("string"),
90
+ "script": datasets.Value("string"),
91
+ "audio": datasets.Audio(sampling_rate=16_000),
92
+ }
93
+ )
94
+
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=features,
98
+ supervised_keys=None,
99
+ task_templates=[
100
+ AutomaticSpeechRecognition(audio_file_path_column="file_path", transcription_column="script")
101
+ ],
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ """Returns SplitGenerators."""
106
+ tsv_files = dl_manager.download(_PROMPTS_URLS)
107
+ archive = dl_manager.download(_DATA_URL)
108
+ path_to_clips = "./VinDataVLSP"
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ gen_kwargs={
114
+ "tsv_files": tsv_files["train"],
115
+ "audio_files": dl_manager.iter_archive(archive),
116
+ "path_to_clips": path_to_clips,
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TEST,
121
+ gen_kwargs={
122
+ "tsv_files": tsv_files["test"],
123
+ "audio_files": dl_manager.iter_archive(archive),
124
+ "path_to_clips": path_to_clips,
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.VALIDATION,
129
+ gen_kwargs={
130
+ "tsv_files": tsv_files["validation"],
131
+ "audio_files": dl_manager.iter_archive(archive),
132
+ "path_to_clips": path_to_clips,
133
+ },
134
+ ),
135
+ ]
136
+
137
+ def _generate_examples(self, tsv_files, audio_files, path_to_clips):
138
+ """Yields examples."""
139
+ data_fields = list(self._info().features.keys())
140
+
141
+ # audio is not a header of the csv files
142
+ data_fields.remove("audio")
143
+ examples = {}
144
+
145
+ df = pd.read_csv(tsv_files, sep="\t", header=0)
146
+ df = df.dropna()
147
+ chars_to_ignore_regex = r'[,?.!\-;:"“%\'�]'
148
+
149
+ for file_path, script in zip(df["file_path"], df["script"]):
150
+ # set full path for mp3 audio file
151
+ audio_path = path_to_clips + "/" + file_path
152
+ # Preprocessing script
153
+ if ":" in script:
154
+ two_dot_index = script.index(":")
155
+ script = script[two_dot_index + 1:]
156
+ script = script.replace("\n", " ")
157
+ script = re.sub(chars_to_ignore_regex, '', script).lower()
158
+
159
+ examples[audio_path] = {
160
+ "file_path": audio_path,
161
+ "script": script,
162
+ }
163
+
164
+ for path, f in audio_files:
165
+ if path.startswith(path_to_clips):
166
+ if path in examples:
167
+ audio = {"path": path, "bytes": f.read()}
168
+ yield path, {**examples[path], "audio": audio}