MajorTom commited on
Commit
1cf9931
·
verified ·
1 Parent(s): 8865531

Create mixpopuli.py

Browse files
Files changed (1) hide show
  1. mixpopuli.py +202 -0
mixpopuli.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+
6
+ import datasets
7
+
8
+
9
+ _DESCRIPTION = """
10
+ A large-scale multilingual speech corpus for representation learning, semi-supervised learning and interpretation.
11
+ """
12
+
13
+ _CITATION = """
14
+ @inproceedings{wang-etal-2021-voxpopuli,
15
+ title = "{V}ox{P}opuli: A Large-Scale Multilingual Speech Corpus for Representation Learning,
16
+ Semi-Supervised Learning and Interpretation",
17
+ author = "Wang, Changhan and
18
+ Riviere, Morgane and
19
+ Lee, Ann and
20
+ Wu, Anne and
21
+ Talnikar, Chaitanya and
22
+ Haziza, Daniel and
23
+ Williamson, Mary and
24
+ Pino, Juan and
25
+ Dupoux, Emmanuel",
26
+ booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics
27
+ and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
28
+ month = aug,
29
+ year = "2021",
30
+ publisher = "Association for Computational Linguistics",
31
+ url = "https://aclanthology.org/2021.acl-long.80",
32
+ doi = "10.18653/v1/2021.acl-long.80",
33
+ pages = "993--1003",
34
+ }
35
+ """
36
+
37
+ _HOMEPAGE = "https://github.com/facebookresearch/voxpopuli"
38
+
39
+ _LICENSE = "CC0, also see https://www.europarl.europa.eu/legal-notice/en/"
40
+
41
+ _ASR_LANGUAGES = [
42
+ "en", "de", "fr" , "ro", "es"
43
+ ]
44
+
45
+
46
+ _LANGUAGES = _ASR_LANGUAGES
47
+
48
+ _BASE_DATA_DIR = "data/"
49
+
50
+ _N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
51
+
52
+ _AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{lang}/{split}/{split}_part_{n_shard}.tar.gz"
53
+
54
+ _METADATA_PATH = _BASE_DATA_DIR + "{lang}/asr_{split}.tsv"
55
+
56
+
57
+ class VoxpopuliConfig(datasets.BuilderConfig):
58
+ """BuilderConfig for VoxPopuli."""
59
+
60
+ def __init__(self, name, languages="all", **kwargs):
61
+ """
62
+ Args:
63
+ name: `string` or `List[string]`:
64
+ name of a config: either one of the supported languages or "multilang" for many languages.
65
+ By default, "multilang" config includes all languages, including accented ones.
66
+ To specify a custom set of languages, pass them to the `languages` parameter
67
+ languages: `List[string]`: if config is "multilang" can be either "all" for all available languages,
68
+ excluding accented ones (default), or a custom list of languages.
69
+ **kwargs: keyword arguments forwarded to super.
70
+ """
71
+ if name == "multilang":
72
+ self.languages = _ASR_LANGUAGES if languages == "all" else languages
73
+ name = "multilang" if languages == "all" else "_".join(languages)
74
+ else:
75
+ self.languages = [name]
76
+
77
+ super().__init__(name=name, **kwargs)
78
+
79
+
80
+ class Voxpopuli(datasets.GeneratorBasedBuilder):
81
+ """The VoxPopuli dataset."""
82
+
83
+ VERSION = datasets.Version("1.3.0") # TODO: version
84
+ BUILDER_CONFIGS = [
85
+ VoxpopuliConfig(
86
+ name=name,
87
+ version=datasets.Version("1.3.0"),
88
+ )
89
+ for name in _LANGUAGES + ["multilang"]
90
+ ]
91
+ DEFAULT_WRITER_BATCH_SIZE = 256
92
+
93
+ def _info(self):
94
+ features = datasets.Features(
95
+ {
96
+ "audio_id": datasets.Value("string"),
97
+ "language": datasets.ClassLabel(names=_LANGUAGES),
98
+ "audio": datasets.Audio(sampling_rate=16_000),
99
+ "raw_text": datasets.Value("string"),
100
+ "normalized_text": datasets.Value("string"),
101
+ "gender": datasets.Value("string"), # TODO: ClassVar?
102
+ "speaker_id": datasets.Value("string"),
103
+ "is_gold_transcript": datasets.Value("bool"),
104
+ "accent": datasets.Value("string"),
105
+ }
106
+ )
107
+ return datasets.DatasetInfo(
108
+ description=_DESCRIPTION,
109
+ features=features,
110
+ homepage=_HOMEPAGE,
111
+ license=_LICENSE,
112
+ citation=_CITATION,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager):
116
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)
117
+ with open(n_shards_path) as f:
118
+ n_shards = json.load(f)
119
+ splits = ["train", "dev", "test"]
120
+
121
+ audio_urls = defaultdict(dict)
122
+ for split in splits:
123
+ for lang in self.config.languages:
124
+ audio_urls[split][lang] = [
125
+ _AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])
126
+ ]
127
+
128
+ meta_urls = defaultdict(dict)
129
+ for split in splits:
130
+ for lang in self.config.languages:
131
+ meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
132
+
133
+ # dl_manager.download_config.num_proc = len(urls)
134
+
135
+ meta_paths = dl_manager.download_and_extract(meta_urls)
136
+ audio_paths = dl_manager.download(audio_urls)
137
+
138
+ local_extracted_audio_paths = (
139
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
140
+ {
141
+ split: {lang: [None] * len(audio_paths[split][lang]) for lang in self.config.languages} for split in splits
142
+ }
143
+ )
144
+
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ gen_kwargs={
149
+ "audio_archives": {
150
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
151
+ for lang, lang_archives in audio_paths["train"].items()
152
+ },
153
+ "local_extracted_archives_paths": local_extracted_audio_paths["train"],
154
+ "metadata_paths": meta_paths["train"],
155
+ }
156
+ ),
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.VALIDATION,
159
+ gen_kwargs={
160
+ "audio_archives": {
161
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
162
+ for lang, lang_archives in audio_paths["dev"].items()
163
+ },
164
+ "local_extracted_archives_paths": local_extracted_audio_paths["dev"],
165
+ "metadata_paths": meta_paths["dev"],
166
+ }
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TEST,
170
+ gen_kwargs={
171
+ "audio_archives": {
172
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
173
+ for lang, lang_archives in audio_paths["test"].items()
174
+ },
175
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
176
+ "metadata_paths": meta_paths["test"],
177
+ }
178
+ ),
179
+ ]
180
+
181
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
182
+ assert len(metadata_paths) == len(audio_archives) == len(local_extracted_archives_paths)
183
+ features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
184
+
185
+ for lang in self.config.languages:
186
+ assert len(audio_archives[lang]) == len(local_extracted_archives_paths[lang])
187
+
188
+ meta_path = metadata_paths[lang]
189
+ with open(meta_path) as f:
190
+ metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
191
+
192
+ for audio_archive, local_extracted_archive_path in zip(audio_archives[lang], local_extracted_archives_paths[lang]):
193
+ for audio_filename, audio_file in audio_archive:
194
+ audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
195
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
196
+
197
+ yield audio_id, {
198
+ "audio_id": audio_id,
199
+ "language": lang,
200
+ **{feature: metadata[audio_id][feature] for feature in features},
201
+ "audio": {"path": path, "bytes": audio_file.read()},
202
+ }