| """Multilingual TTS Dataset in LJSpeech Format""" |
|
|
| import csv |
| import json |
| import os |
| import zipfile |
| from pathlib import Path |
|
|
| import datasets |
| from datasets import Features, Value, Audio |
|
|
|
|
| _CITATION = """@dataset{multilingual_tts_ljspeech, |
| title={Multilingual TTS Dataset in LJSpeech Format}, |
| year={2024}, |
| note={English: LibriTTS-R (CC BY 4.0), Chinese: AISHELL-3 (Apache 2.0)} |
| } |
| """ |
|
|
| _DESCRIPTION = """A high-quality multilingual Text-to-Speech dataset in LJSpeech format, |
| containing English and Chinese speech data suitable for commercial use. |
| |
| This dataset combines: |
| - English: LibriTTS-R (~49 hours, 247 speakers, 32K utterances) |
| - Chinese: AISHELL-3 (~49 hours, 174 speakers, 63K utterances) |
| |
| Total: ~97 hours, 421 speakers, 95K utterances |
| All audio normalized to 22050Hz, 16-bit, mono WAV format. |
| """ |
|
|
| _HOMEPAGE = "https://huggingface.co/datasets/ayousanz/multi-dataset-v2" |
| _LICENSE = "Mixed: CC BY 4.0 (English), Apache 2.0 (Chinese)" |
|
|
|
|
| class MultilingualTTSLJSpeech(datasets.GeneratorBasedBuilder): |
| """Multilingual TTS Dataset in LJSpeech Format""" |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| def _info(self): |
| features = Features({ |
| "audio_id": Value("string"), |
| "audio": Audio(sampling_rate=22050), |
| "transcription": Value("string"), |
| "normalized_text": Value("string"), |
| "speaker_id": Value("string"), |
| "language": Value("string"), |
| }) |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| |
| files_to_download = { |
| |
| "train_csv": "metadata/train.csv", |
| "validation_csv": "metadata/validation.csv", |
| "test_csv": "metadata/test.csv", |
| |
| |
| "train_en_zip": "audio/train_english.zip", |
| "train_zh_zip": "audio/train_chinese.zip", |
| "validation_en_zip": "audio/validation_english.zip", |
| "validation_zh_zip": "audio/validation_chinese.zip", |
| "test_en_zip": "audio/test_english.zip", |
| "test_zh_zip": "audio/test_chinese.zip", |
| } |
| |
| downloaded_files = dl_manager.download(files_to_download) |
| |
| |
| extracted_dirs = {} |
| for split in ["train", "validation", "test"]: |
| for lang in ["en", "zh"]: |
| lang_name = "english" if lang == "en" else "chinese" |
| key = f"{split}_{lang}_zip" |
| if key in downloaded_files: |
| extracted_dirs[f"{split}_{lang}"] = dl_manager.extract(downloaded_files[key]) |
| |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "metadata_path": downloaded_files["train_csv"], |
| "extracted_dirs": extracted_dirs, |
| "split": "train", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "metadata_path": downloaded_files["validation_csv"], |
| "extracted_dirs": extracted_dirs, |
| "split": "validation", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "metadata_path": downloaded_files["test_csv"], |
| "extracted_dirs": extracted_dirs, |
| "split": "test", |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, metadata_path, extracted_dirs, split): |
| """Yields examples.""" |
| with open(metadata_path, encoding="utf-8") as f: |
| reader = csv.DictReader(f, delimiter="|") |
| |
| for idx, row in enumerate(reader): |
| audio_id = row["audio_id"] |
| language = row["language"] |
| |
| |
| audio_dir_key = f"{split}_{language}" |
| if audio_dir_key in extracted_dirs: |
| audio_path = Path(extracted_dirs[audio_dir_key]) / split / language / "wavs" / f"{audio_id}.wav" |
| |
| if audio_path.exists(): |
| yield idx, { |
| "audio_id": audio_id, |
| "audio": str(audio_path), |
| "transcription": row["transcription"], |
| "normalized_text": row["normalized_text"], |
| "speaker_id": row["speaker_id"], |
| "language": language, |
| } |
|
|