mnansary commited on
Commit
b86c335
·
1 Parent(s): 4d5f75c
Files changed (4) hide show
  1. cvbn.py +279 -0
  2. data/README.md +0 -3
  3. languages.py +1 -0
  4. release_stats.py +33 -0
cvbn.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Common Voice Dataset"""
16
+
17
+
18
+ import csv
19
+ import os
20
+ import urllib
21
+ import shutil
22
+ import datasets
23
+ import requests
24
+ from datasets.utils.py_utils import size_str
25
+ from huggingface_hub import HfApi, HfFolder
26
+
27
+ from .languages import LANGUAGES
28
+ from .release_stats import STATS
29
+
30
+ _CITATION = """\
31
+ @inproceedings{commonvoice:2020,
32
+ author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
33
+ title = {Common Voice: A Massively-Multilingual Speech Corpus},
34
+ booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
35
+ pages = {4211--4215},
36
+ year = 2020
37
+ }
38
+ """
39
+
40
+ _HOMEPAGE = "https://commonvoice.mozilla.org/bn/datasets"
41
+
42
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
43
+
44
+ _API_URL = "https://commonvoice.mozilla.org/api/v1"
45
+
46
+
47
+ class CommonVoiceConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for CommonVoice."""
49
+
50
+ def __init__(self, name, version, **kwargs):
51
+ self.language = kwargs.pop("language", None)
52
+ self.release_date = kwargs.pop("release_date", None)
53
+ self.num_clips = kwargs.pop("num_clips", None)
54
+ self.num_speakers = kwargs.pop("num_speakers", None)
55
+ self.validated_hr = kwargs.pop("validated_hr", None)
56
+ self.total_hr = kwargs.pop("total_hr", None)
57
+ self.size_bytes = kwargs.pop("size_bytes", None)
58
+ self.size_human = size_str(self.size_bytes)
59
+ description = (
60
+ f"Common Voice speech to text dataset in {self.language} released on {self.release_date}. "
61
+ f"The dataset comprises {self.validated_hr} hours of validated transcribed speech data "
62
+ f"out of {self.total_hr} hours in total from {self.num_speakers} speakers. "
63
+ f"The dataset contains {self.num_clips} audio clips and has a size of {self.size_human}."
64
+ )
65
+ super(CommonVoiceConfig, self).__init__(
66
+ name=name,
67
+ version=datasets.Version(version),
68
+ description=description,
69
+ **kwargs,
70
+ )
71
+
72
+
73
+ class CommonVoice(datasets.GeneratorBasedBuilder):
74
+ DEFAULT_CONFIG_NAME = "bn"
75
+ DEFAULT_WRITER_BATCH_SIZE = 1000
76
+
77
+ BUILDER_CONFIGS = [
78
+ CommonVoiceConfig(
79
+ name=lang,
80
+ version=STATS["version"],
81
+ language=LANGUAGES[lang],
82
+ release_date=STATS["date"],
83
+ num_clips=lang_stats["clips"],
84
+ num_speakers=lang_stats["users"],
85
+ validated_hr=float(lang_stats["validHrs"]),
86
+ total_hr=float(lang_stats["totalHrs"]),
87
+ size_bytes=int(lang_stats["size"]),
88
+ )
89
+ for lang, lang_stats in STATS["locales"].items()
90
+ ]
91
+
92
+ def _info(self):
93
+ total_languages = len(STATS["locales"])
94
+ total_valid_hours = STATS["totalValidHrs"]
95
+ description = (
96
+ "Common Voice is Mozilla's initiative to help teach machines how real people speak. "
97
+ f"The dataset currently consists of {total_valid_hours} validated hours of speech "
98
+ f" in {total_languages} languages, but more voices and languages are always added."
99
+ )
100
+ features = datasets.Features(
101
+ {
102
+ "client_id": datasets.Value("string"),
103
+ "path": datasets.Value("string"),
104
+ "audio": datasets.features.Audio(sampling_rate=48_000),
105
+ "sentence": datasets.Value("string"),
106
+ "up_votes": datasets.Value("int64"),
107
+ "down_votes": datasets.Value("int64"),
108
+ "age": datasets.Value("string"),
109
+ "gender": datasets.Value("string"),
110
+ "accent": datasets.Value("string"),
111
+ "locale": datasets.Value("string"),
112
+ "segment": datasets.Value("string"),
113
+ }
114
+ )
115
+
116
+ return datasets.DatasetInfo(
117
+ description=description,
118
+ features=features,
119
+ supervised_keys=None,
120
+ homepage=_HOMEPAGE,
121
+ license=_LICENSE,
122
+ citation=_CITATION,
123
+ version=self.config.version,
124
+ # task_templates=[
125
+ # AutomaticSpeechRecognition(audio_file_path_column="path", transcription_column="sentence")
126
+ # ],
127
+ )
128
+
129
+ def _get_bundle_url(self, locale, url_template):
130
+ # path = encodeURIComponent(path)
131
+ path = url_template.replace("{locale}", locale)
132
+ path = urllib.parse.quote(path.encode("utf-8"), safe="~()*!.'")
133
+ # use_cdn = self.config.size_bytes < 20 * 1024 * 1024 * 1024
134
+ # response = requests.get(f"{_API_URL}/bucket/dataset/{path}/{use_cdn}", timeout=10.0).json()
135
+ response = requests.get(f"{_API_URL}/bucket/dataset/{path}", timeout=10.0).json()
136
+ return response["url"]
137
+
138
+ def _log_download(self, locale, bundle_version, auth_token):
139
+ if isinstance(auth_token, bool):
140
+ auth_token = HfFolder().get_token()
141
+ whoami = HfApi().whoami(auth_token)
142
+ email = whoami["email"] if "email" in whoami else ""
143
+ payload = {"email": email, "locale": locale, "dataset": bundle_version}
144
+ requests.post(f"{_API_URL}/{locale}/downloaders", json=payload).json()
145
+
146
+ def _split_generators(self, dl_manager):
147
+ """Returns SplitGenerators."""
148
+ hf_auth_token = dl_manager.download_config.use_auth_token
149
+ if hf_auth_token is None:
150
+ raise ConnectionError(
151
+ "Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset"
152
+ )
153
+
154
+ bundle_url_template = STATS["bundleURLTemplate"]
155
+ bundle_version = bundle_url_template.split("/")[0]
156
+ dl_manager.download_config.ignore_url_params = True
157
+
158
+ self._log_download(self.config.name, bundle_version, hf_auth_token)
159
+ archive_path = dl_manager.download(self._get_bundle_url(self.config.name, bundle_url_template))
160
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else None
161
+
162
+ if self.config.version < datasets.Version("5.0.0"):
163
+ path_to_data = ""
164
+ else:
165
+ path_to_data = "/".join([bundle_version, self.config.name])
166
+ path_to_clips = "/".join([path_to_data, "clips"]) if path_to_data else "clips"
167
+
168
+
169
+ return [
170
+ datasets.SplitGenerator(
171
+ name=datasets.Split.TRAIN,
172
+ gen_kwargs={
173
+ "local_extracted_archive": local_extracted_archive,
174
+ "archive_iterator": dl_manager.iter_archive(archive_path),
175
+ "metadata_filepath": "/data//train.tsv",
176
+ "path_to_clips": path_to_clips,
177
+ "mode":"train",
178
+ },
179
+ ),
180
+ datasets.SplitGenerator(
181
+ name=datasets.Split.TEST,
182
+ gen_kwargs={
183
+ "local_extracted_archive": local_extracted_archive,
184
+ "archive_iterator": dl_manager.iter_archive(archive_path),
185
+ "metadata_filepath": "/".join([path_to_data, "test.tsv"]) if path_to_data else "test.tsv",
186
+ "path_to_clips": path_to_clips,
187
+ "mode":"test",
188
+ },
189
+ ),
190
+ datasets.SplitGenerator(
191
+ name=datasets.Split.VALIDATION,
192
+ gen_kwargs={
193
+ "local_extracted_archive": local_extracted_archive,
194
+ "archive_iterator": dl_manager.iter_archive(archive_path),
195
+ "metadata_filepath": "/".join([path_to_data, "dev.tsv"]) if path_to_data else "dev.tsv",
196
+ "path_to_clips": path_to_clips,
197
+ "mode":"dev",
198
+ },
199
+ ),
200
+ ]
201
+
202
+ def _generate_examples(
203
+ self,
204
+ local_extracted_archive,
205
+ archive_iterator,
206
+ metadata_filepath,
207
+ path_to_clips,
208
+ mode
209
+ ):
210
+ """Yields examples."""
211
+ data_fields = list(self._info().features.keys())
212
+ metadata = {}
213
+ metadata_found = False
214
+ if mode in ["dev","test"]:
215
+ for path, f in archive_iterator:
216
+ if path == metadata_filepath:
217
+ metadata_found = True
218
+ lines = (line.decode("utf-8") for line in f)
219
+ reader = csv.DictReader(lines, delimiter="\t", quoting=csv.QUOTE_NONE)
220
+ for row in reader:
221
+ # set absolute path for mp3 audio file
222
+ if not row["path"].endswith(".mp3"):
223
+ row["path"] += ".mp3"
224
+ row["path"] = os.path.join(path_to_clips, row["path"])
225
+ # accent -> accents in CV 8.0
226
+ if "accents" in row:
227
+ row["accent"] = row["accents"]
228
+ del row["accents"]
229
+ # if data is incomplete, fill with empty values
230
+ for field in data_fields:
231
+ if field not in row:
232
+ row[field] = ""
233
+ metadata[row["path"]] = row
234
+ elif path.startswith(path_to_clips):
235
+ assert metadata_found, "Found audio clips before the metadata TSV file."
236
+ if not metadata:
237
+ break
238
+ if path in metadata:
239
+ result = metadata[path]
240
+ # set the audio feature and the path to the extracted file
241
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
242
+ result["audio"] = {"path": path, "bytes": f.read()}
243
+ # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
244
+ result["path"] = path if local_extracted_archive else None
245
+
246
+ yield path, result
247
+ else:
248
+ metadata_found = True
249
+ with open(metadata_filepath, "rb") as file_obj:
250
+ lines = (line.decode("utf-8") for line in file_obj)
251
+ reader = csv.DictReader(lines, delimiter="\t", quoting=csv.QUOTE_NONE)
252
+ for row in reader:
253
+ # set absolute path for mp3 audio file
254
+ if not row["path"].endswith(".mp3"):
255
+ row["path"] += ".mp3"
256
+ row["path"] = os.path.join(path_to_clips, row["path"])
257
+ # accent -> accents in CV 8.0
258
+ if "accents" in row:
259
+ row["accent"] = row["accents"]
260
+ del row["accents"]
261
+ # if data is incomplete, fill with empty values
262
+ for field in data_fields:
263
+ if field not in row:
264
+ row[field] = ""
265
+ metadata[row["path"]] = row
266
+ for path, f in archive_iterator:
267
+ if path.startswith(path_to_clips):
268
+ assert metadata_found, "Found audio clips before the metadata TSV file."
269
+ if not metadata:
270
+ break
271
+ if path in metadata:
272
+ result = metadata[path]
273
+ # set the audio feature and the path to the extracted file
274
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
275
+ result["audio"] = {"path": path, "bytes": f.read()}
276
+ # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
277
+ result["path"] = path if local_extracted_archive else None
278
+
279
+ yield path, result
data/README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: cc
3
- ---
 
 
 
 
languages.py ADDED
@@ -0,0 +1 @@
 
 
1
+ LANGUAGES = { 'bn': 'Bengali'}
release_stats.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STATS={'bundleURLTemplate': 'cv-corpus-9.0-2022-04-27/cv-corpus-9.0-2022-04-27-{locale}.tar.gz',
2
+ 'date': '2022-04-27',
3
+ 'name': 'Common Voice Corpus 9.0',
4
+ 'multilingual': True,
5
+ "locales":{"bn":{'duration': 1438112808,
6
+ 'reportedSentences': 693,
7
+ 'buckets': {'dev': 7748,
8
+ 'invalidated': 5844,
9
+ 'other': 192522,
10
+ 'reported': 717,
11
+ 'test': 7748,
12
+ 'train': 14503,
13
+ 'validated': 32754},
14
+ 'clips': 231120,
15
+ 'splits': {'accent': {'': 1},
16
+ 'age': {'thirties': 0.02,
17
+ 'twenties': 0.22,
18
+ '': 0.72,
19
+ 'teens': 0.04,
20
+ 'fourties': 0},
21
+ 'gender': {'male': 0.24, '': 0.72, 'female': 0.04, 'other': 0}},
22
+ 'users': 19863,
23
+ 'size': 8262390506,
24
+ 'checksum': '599a5f7c9e55a297928da390345a19180b279a1f013081e7255a657fc99f98d5',
25
+ 'avgDurationSecs': 6.222,
26
+ 'validDurationSecs': 203807.316,
27
+ 'totalHrs': 399.47,
28
+ 'validHrs': 56.61}},
29
+ 'totalDuration': 72782088097,
30
+ 'totalValidDurationSecs': 53904443,
31
+ 'totalHrs': 20217,
32
+ 'totalValidHrs': 14973,
33
+ 'version': '9.0.0'}