|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" OpenSLR Dataset""" |
|
|
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
|
|
import os |
|
|
import re |
|
|
from pathlib import Path |
|
|
|
|
|
import datasets |
|
|
from datasets.tasks import AutomaticSpeechRecognition |
|
|
|
|
|
|
|
|
_DATA_URL = "https://openslr.org/resources/{}" |
|
|
|
|
|
_CITATION = """\ |
|
|
SLR70, SLR71: |
|
|
@inproceedings{guevara-rukoz-etal-2020-crowdsourcing, |
|
|
title = {{Crowdsourcing Latin American Spanish for Low-Resource Text-to-Speech}}, |
|
|
author = {Guevara-Rukoz, Adriana and Demirsahin, Isin and He, Fei and Chu, Shan-Hui Cathy and Sarin, |
|
|
Supheakmungkol and Pipatsrisawat, Knot and Gutkin, Alexander and Butryna, Alena and Kjartansson, Oddur}, |
|
|
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)}, |
|
|
year = {2020}, |
|
|
month = may, |
|
|
address = {Marseille, France}, |
|
|
publisher = {European Language Resources Association (ELRA)}, |
|
|
url = {https://www.aclweb.org/anthology/2020.lrec-1.801}, |
|
|
pages = {6504--6513}, |
|
|
ISBN = {979-10-95546-34-4}, |
|
|
} |
|
|
|
|
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
OpenSLR is a site devoted to hosting speech and language resources, such as training corpora for speech recognition, |
|
|
and software related to speech recognition. We intend to be a convenient place for anyone to put resources that |
|
|
they have created, so that they can be downloaded publicly. |
|
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://openslr.org/" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
_RESOURCES = { |
|
|
|
|
|
|
|
|
"SLR70": { |
|
|
"Language": "Nigerian English", |
|
|
"LongName": "Crowdsourced high-quality Nigerian English speech data set", |
|
|
"Category": "Speech", |
|
|
"Summary": "Data set which contains recordings of Nigerian English", |
|
|
"Files": ["en_ng_female.zip", "en_ng_male.zip"], |
|
|
"IndexFiles": ["line_index.tsv", "line_index.tsv"], |
|
|
"DataDirs": ["", ""], |
|
|
}, |
|
|
"SLR71": { |
|
|
"Language": "Chilean Spanish", |
|
|
"LongName": "Crowdsourced high-quality Chilean Spanish speech data set", |
|
|
"Category": "Speech", |
|
|
"Summary": "Data set which contains recordings of Chilean Spanish", |
|
|
"Files": ["es_cl_female.zip", "es_cl_male.zip"], |
|
|
"IndexFiles": ["line_index.tsv", "line_index.tsv"], |
|
|
"DataDirs": ["", ""], |
|
|
|
|
|
}, |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
class OpenSlrConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for OpenSlr.""" |
|
|
|
|
|
def __init__(self, name, **kwargs): |
|
|
""" |
|
|
Args: |
|
|
data_dir: `string`, the path to the folder containing the files in the |
|
|
downloaded .tar |
|
|
citation: `string`, citation for the data set |
|
|
url: `string`, url for information about the data set |
|
|
**kwargs: keyword arguments forwarded to super. |
|
|
""" |
|
|
self.language = kwargs.pop("language", None) |
|
|
self.long_name = kwargs.pop("long_name", None) |
|
|
self.category = kwargs.pop("category", None) |
|
|
self.summary = kwargs.pop("summary", None) |
|
|
self.files = kwargs.pop("files", None) |
|
|
self.index_files = kwargs.pop("index_files", None) |
|
|
self.data_dirs = kwargs.pop("data_dirs", None) |
|
|
description = ( |
|
|
f"Open Speech and Language Resources dataset in {self.language}. Name: {self.name}, " |
|
|
f"Summary: {self.summary}." |
|
|
) |
|
|
super(OpenSlrConfig, self).__init__(name=name, description=description, **kwargs) |
|
|
|
|
|
|
|
|
class OpenSlr(datasets.GeneratorBasedBuilder): |
|
|
DEFAULT_WRITER_BATCH_SIZE = 32 |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
OpenSlrConfig( |
|
|
name=resource_id, |
|
|
language=_RESOURCES[resource_id]["Language"], |
|
|
long_name=_RESOURCES[resource_id]["LongName"], |
|
|
category=_RESOURCES[resource_id]["Category"], |
|
|
summary=_RESOURCES[resource_id]["Summary"], |
|
|
files=_RESOURCES[resource_id]["Files"], |
|
|
index_files=_RESOURCES[resource_id]["IndexFiles"], |
|
|
data_dirs=_RESOURCES[resource_id]["DataDirs"], |
|
|
) |
|
|
for resource_id in _RESOURCES.keys() |
|
|
] |
|
|
|
|
|
def _info(self): |
|
|
features = datasets.Features( |
|
|
{ |
|
|
"path": datasets.Value("string"), |
|
|
"audio": datasets.Audio(sampling_rate=48_000), |
|
|
"sentence": datasets.Value("string"), |
|
|
} |
|
|
) |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=features, |
|
|
supervised_keys=None, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentence")], |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
"""Returns SplitGenerators.""" |
|
|
resource_number = self.config.name.replace("SLR", "") |
|
|
urls = [f"{_DATA_URL.format(resource_number)}/{file}" for file in self.config.files] |
|
|
if urls[0].endswith(".zip"): |
|
|
dl_paths = dl_manager.download_and_extract(urls) |
|
|
path_to_indexs = [os.path.join(path, f"{self.config.index_files[i]}") for i, path in enumerate(dl_paths)] |
|
|
path_to_datas = [os.path.join(path, f"{self.config.data_dirs[i]}") for i, path in enumerate(dl_paths)] |
|
|
archives = None |
|
|
else: |
|
|
archives = dl_manager.download(urls) |
|
|
path_to_indexs = dl_manager.download(self.config.index_files) |
|
|
path_to_datas = self.config.data_dirs |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"path_to_indexs": path_to_indexs, |
|
|
"path_to_datas": path_to_datas, |
|
|
"archive_files": [dl_manager.iter_archive(archive) for archive in archives] if archives else None, |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, path_to_indexs, path_to_datas, archive_files): |
|
|
"""Yields examples.""" |
|
|
|
|
|
counter = -1 |
|
|
for i, path_to_index in enumerate(path_to_indexs): |
|
|
with open(path_to_index, encoding="utf-8") as f: |
|
|
lines = f.readlines() |
|
|
for id_, line in enumerate(lines): |
|
|
|
|
|
|
|
|
line = re.sub(r"\t[^\t]*\t", "\t", line.strip()) |
|
|
field_values = re.split(r"\t\t?", line) |
|
|
if len(field_values) != 2: |
|
|
continue |
|
|
filename, sentence = field_values |
|
|
|
|
|
path = os.path.join(path_to_datas[i], f"{filename}.wav") |
|
|
counter += 1 |
|
|
yield counter, {"path": path, "audio": path, "sentence": sentence} |
|
|
|
|
|
|