damilola2104 commited on
Commit
bd95caa
·
1 Parent(s): dd01f19

Delete Openslr.py

Browse files
Files changed (1) hide show
  1. Openslr.py +0 -189
Openslr.py DELETED
@@ -1,189 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ OpenSLR Dataset"""
16
-
17
- from __future__ import absolute_import, division, print_function
18
-
19
- import os
20
- import re
21
- from pathlib import Path
22
-
23
- import datasets
24
- from datasets.tasks import AutomaticSpeechRecognition
25
-
26
-
27
- _DATA_URL = "https://openslr.org/resources/{}"
28
-
29
- _CITATION = """\
30
- SLR70, SLR71:
31
- @inproceedings{guevara-rukoz-etal-2020-crowdsourcing,
32
- title = {{Crowdsourcing Latin American Spanish for Low-Resource Text-to-Speech}},
33
- author = {Guevara-Rukoz, Adriana and Demirsahin, Isin and He, Fei and Chu, Shan-Hui Cathy and Sarin,
34
- Supheakmungkol and Pipatsrisawat, Knot and Gutkin, Alexander and Butryna, Alena and Kjartansson, Oddur},
35
- booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)},
36
- year = {2020},
37
- month = may,
38
- address = {Marseille, France},
39
- publisher = {European Language Resources Association (ELRA)},
40
- url = {https://www.aclweb.org/anthology/2020.lrec-1.801},
41
- pages = {6504--6513},
42
- ISBN = {979-10-95546-34-4},
43
- }
44
-
45
- """
46
-
47
- _DESCRIPTION = """\
48
- OpenSLR is a site devoted to hosting speech and language resources, such as training corpora for speech recognition,
49
- and software related to speech recognition. We intend to be a convenient place for anyone to put resources that
50
- they have created, so that they can be downloaded publicly.
51
- """
52
-
53
- _HOMEPAGE = "https://openslr.org/"
54
-
55
- _LICENSE = ""
56
-
57
- _RESOURCES = {
58
-
59
-
60
- "SLR70": {
61
- "Language": "Nigerian English",
62
- "LongName": "Crowdsourced high-quality Nigerian English speech data set",
63
- "Category": "Speech",
64
- "Summary": "Data set which contains recordings of Nigerian English",
65
- "Files": ["en_ng_female.zip", "en_ng_male.zip"],
66
- "IndexFiles": ["line_index.tsv", "line_index.tsv"],
67
- "DataDirs": ["", ""],
68
- },
69
- "SLR71": {
70
- "Language": "Chilean Spanish",
71
- "LongName": "Crowdsourced high-quality Chilean Spanish speech data set",
72
- "Category": "Speech",
73
- "Summary": "Data set which contains recordings of Chilean Spanish",
74
- "Files": ["es_cl_female.zip", "es_cl_male.zip"],
75
- "IndexFiles": ["line_index.tsv", "line_index.tsv"],
76
- "DataDirs": ["", ""],
77
-
78
- },
79
-
80
- }
81
-
82
-
83
- class OpenSlrConfig(datasets.BuilderConfig):
84
- """BuilderConfig for OpenSlr."""
85
-
86
- def __init__(self, name, **kwargs):
87
- """
88
- Args:
89
- data_dir: `string`, the path to the folder containing the files in the
90
- downloaded .tar
91
- citation: `string`, citation for the data set
92
- url: `string`, url for information about the data set
93
- **kwargs: keyword arguments forwarded to super.
94
- """
95
- self.language = kwargs.pop("language", None)
96
- self.long_name = kwargs.pop("long_name", None)
97
- self.category = kwargs.pop("category", None)
98
- self.summary = kwargs.pop("summary", None)
99
- self.files = kwargs.pop("files", None)
100
- self.index_files = kwargs.pop("index_files", None)
101
- self.data_dirs = kwargs.pop("data_dirs", None)
102
- description = (
103
- f"Open Speech and Language Resources dataset in {self.language}. Name: {self.name}, "
104
- f"Summary: {self.summary}."
105
- )
106
- super(OpenSlrConfig, self).__init__(name=name, description=description, **kwargs)
107
-
108
-
109
- class OpenSlr(datasets.GeneratorBasedBuilder):
110
- DEFAULT_WRITER_BATCH_SIZE = 32
111
-
112
- BUILDER_CONFIGS = [
113
- OpenSlrConfig(
114
- name=resource_id,
115
- language=_RESOURCES[resource_id]["Language"],
116
- long_name=_RESOURCES[resource_id]["LongName"],
117
- category=_RESOURCES[resource_id]["Category"],
118
- summary=_RESOURCES[resource_id]["Summary"],
119
- files=_RESOURCES[resource_id]["Files"],
120
- index_files=_RESOURCES[resource_id]["IndexFiles"],
121
- data_dirs=_RESOURCES[resource_id]["DataDirs"],
122
- )
123
- for resource_id in _RESOURCES.keys()
124
- ]
125
-
126
- def _info(self):
127
- features = datasets.Features(
128
- {
129
- "path": datasets.Value("string"),
130
- "audio": datasets.Audio(sampling_rate=48_000),
131
- "sentence": datasets.Value("string"),
132
- }
133
- )
134
-
135
- return datasets.DatasetInfo(
136
- description=_DESCRIPTION,
137
- features=features,
138
- supervised_keys=None,
139
- homepage=_HOMEPAGE,
140
- license=_LICENSE,
141
- citation=_CITATION,
142
- task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentence")],
143
- )
144
-
145
- def _split_generators(self, dl_manager):
146
- """Returns SplitGenerators."""
147
- resource_number = self.config.name.replace("SLR", "")
148
- urls = [f"{_DATA_URL.format(resource_number)}/{file}" for file in self.config.files]
149
- if urls[0].endswith(".zip"):
150
- dl_paths = dl_manager.download_and_extract(urls)
151
- path_to_indexs = [os.path.join(path, f"{self.config.index_files[i]}") for i, path in enumerate(dl_paths)]
152
- path_to_datas = [os.path.join(path, f"{self.config.data_dirs[i]}") for i, path in enumerate(dl_paths)]
153
- archives = None
154
- else:
155
- archives = dl_manager.download(urls)
156
- path_to_indexs = dl_manager.download(self.config.index_files)
157
- path_to_datas = self.config.data_dirs
158
-
159
- return [
160
- datasets.SplitGenerator(
161
- name=datasets.Split.TRAIN,
162
- gen_kwargs={
163
- "path_to_indexs": path_to_indexs,
164
- "path_to_datas": path_to_datas,
165
- "archive_files": [dl_manager.iter_archive(archive) for archive in archives] if archives else None,
166
- },
167
- ),
168
- ]
169
-
170
- def _generate_examples(self, path_to_indexs, path_to_datas, archive_files):
171
- """Yields examples."""
172
-
173
- counter = -1
174
- for i, path_to_index in enumerate(path_to_indexs):
175
- with open(path_to_index, encoding="utf-8") as f:
176
- lines = f.readlines()
177
- for id_, line in enumerate(lines):
178
- # Following regexs are needed to normalise the lines, since the datasets
179
- # are not always consistent and have bugs:
180
- line = re.sub(r"\t[^\t]*\t", "\t", line.strip())
181
- field_values = re.split(r"\t\t?", line)
182
- if len(field_values) != 2:
183
- continue
184
- filename, sentence = field_values
185
- # set absolute path for audio file
186
- path = os.path.join(path_to_datas[i], f"{filename}.wav")
187
- counter += 1
188
- yield counter, {"path": path, "audio": path, "sentence": sentence}
189
-