Srinath N Ramalingam commited on
Commit
8d7d253
·
1 Parent(s): 106eaf4

Add HF dataset loading script

Browse files
Files changed (1) hide show
  1. tts_dataset.py +52 -0
tts_dataset.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ import datasets
4
+
5
+ _CITATION = ""
6
+ _DESCRIPTION = (
7
+ "Phonemized-VCTK: audio + phonemes for TTS/alignment, paired by basename."
8
+ )
9
+
10
+ class TTSDataset(datasets.GeneratorBasedBuilder):
11
+ VERSION = datasets.Version("1.0.0")
12
+
13
+ def _info(self):
14
+ return datasets.DatasetInfo(
15
+ description=_DESCRIPTION,
16
+ features=datasets.Features({
17
+ "speaker": datasets.Value("string"),
18
+ "audio": datasets.Audio(sampling_rate=16000),
19
+ "phoneme": datasets.Sequence(datasets.Value("string")),
20
+ }),
21
+ supervised_keys=None,
22
+ homepage="https://huggingface.co/datasets/srinathnr/TTS_DATASET",
23
+ citation=_CITATION,
24
+ )
25
+
26
+ def _split_generators(self, dl_manager):
27
+ root = Path(dl_manager.manual_dir or ".")
28
+ return [
29
+ datasets.SplitGenerator(
30
+ name=datasets.Split.TRAIN,
31
+ gen_kwargs={"root_dir": root}
32
+ )
33
+ ]
34
+
35
+ def _generate_examples(self, root_dir):
36
+ # Scan for all .wav and .txt files
37
+ wav_dir = Path(root_dir) / "wav"
38
+ phon_dir = Path(root_dir) / "phonemized"
39
+ audio_files = {p.stem: p for p in wav_dir.rglob("*.wav") if not p.name.startswith("._")}
40
+ phoneme_files = {p.stem: p for p in phon_dir.rglob("*.txt") if not p.name.startswith("._")}
41
+ # Intersect by basename
42
+ for key in sorted(set(audio_files) & set(phoneme_files)):
43
+ audio_path = audio_files[key]
44
+ phoneme_path = phoneme_files[key]
45
+ speaker = audio_path.parts[-2] # e.g. "p225"
46
+ with open(phoneme_path) as f:
47
+ phonemes = f.read().split()
48
+ yield key, {
49
+ "speaker": speaker,
50
+ "audio": str(audio_path),
51
+ "phoneme": phonemes,
52
+ }