|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""AudioData dataset.""" |
|
|
|
|
|
|
|
|
import os |
|
|
from pathlib import Path |
|
|
|
|
|
import datasets |
|
|
from datasets.tasks import AutomaticSpeechRecognition |
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
@inproceedings{ |
|
|
title={AudioData Speech Corpus}, |
|
|
author={Your Name}, |
|
|
year={Year} |
|
|
} |
|
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
The AudioData corpus of reading speech has been developed to provide speech data for acoustic-phonetic research studies |
|
|
and for the evaluation of automatic speech recognition systems. |
|
|
More info on AudioData dataset can be understood from the "README" which can be found here: |
|
|
https://example.com/path/to/readme.txt |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AudioDataConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for AudioData.""" |
|
|
|
|
|
def __init__(self, **kwargs): |
|
|
""" |
|
|
Args: |
|
|
data_dir: `string`, the path to the folder containing the audio files |
|
|
citation: `string`, citation for the data set |
|
|
url: `string`, url for information about the data set |
|
|
**kwargs: keyword arguments forwarded to super. |
|
|
""" |
|
|
super(AudioDataConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
|
|
|
|
|
|
|
class AudioData(datasets.GeneratorBasedBuilder): |
|
|
"""AudioData dataset.""" |
|
|
|
|
|
BUILDER_CONFIGS = [AudioDataConfig(name="clean", description="'Clean' speech.")] |
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=datasets.Features( |
|
|
{ |
|
|
"folder": datasets.Value("string"), |
|
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
|
"label": datasets.Value("string"), |
|
|
} |
|
|
), |
|
|
supervised_keys=("folder", "label"), |
|
|
|
|
|
citation=_CITATION, |
|
|
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="label")], |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) |
|
|
|
|
|
if not os.path.exists(data_dir): |
|
|
raise FileNotFoundError( |
|
|
f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('timit_asr', data_dir=...)` that includes files unzipped from the TIMIT zip. Manual download instructions: {self.manual_download_instructions}" |
|
|
) |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train", "data_dir": data_dir}), |
|
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test", "data_dir": data_dir}), |
|
|
] |
|
|
|
|
|
|
|
|
def _generate_examples(self, split, data_dir): |
|
|
"""Generate examples from AudioData based on the test/train csv information.""" |
|
|
|
|
|
wav_paths = sorted(Path(data_dir).glob(f"**/{split}/**/*.wav")) |
|
|
for key, wav_path in enumerate(wav_paths): |
|
|
|
|
|
|
|
|
txt_path = with_case_insensitive_suffix(wav_path, ".txt") |
|
|
with txt_path.open(encoding="utf-8") as op: |
|
|
transcript = " ".join(op.readlines()[0].split()[2:]) |
|
|
|
|
|
example = { |
|
|
"file": str(wav_path), |
|
|
"audio": str(wav_path), |
|
|
"text": transcript, |
|
|
} |
|
|
|
|
|
yield key, example |
|
|
|
|
|
|
|
|
def with_case_insensitive_suffix(path: Path, suffix: str): |
|
|
path = path.with_suffix(suffix.lower()) |
|
|
path = path if path.exists() else path.with_suffix(suffix.upper()) |
|
|
return path |
|
|
|