Update asr_sundanese_2_hub.py
Browse files- asr_sundanese_2_hub.py +35 -41
asr_sundanese_2_hub.py
CHANGED
|
@@ -114,57 +114,51 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
|
|
| 114 |
)
|
| 115 |
|
| 116 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
| 117 |
-
"""Returns SplitGenerators."""
|
| 118 |
-
# As the data is already split manually, we only need to download and pass the paths
|
| 119 |
-
train_path = dl_manager.download_and_extract(_URLs["su_id_asr_train"])
|
| 120 |
-
val_path = dl_manager.download_and_extract(_URLs["su_id_asr_val"])
|
| 121 |
-
test_path = dl_manager.download_and_extract(_URLs["su_id_asr_test"])
|
| 122 |
-
|
| 123 |
return [
|
| 124 |
datasets.SplitGenerator(
|
| 125 |
name=datasets.Split.TRAIN,
|
| 126 |
-
gen_kwargs={"filepath":
|
| 127 |
),
|
| 128 |
datasets.SplitGenerator(
|
| 129 |
name=datasets.Split.VALIDATION,
|
| 130 |
-
gen_kwargs={"filepath":
|
| 131 |
),
|
| 132 |
datasets.SplitGenerator(
|
| 133 |
name=datasets.Split.TEST,
|
| 134 |
-
gen_kwargs={"filepath":
|
| 135 |
-
)
|
| 136 |
]
|
| 137 |
|
| 138 |
def _generate_examples(self, filepath: Path):
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
f.close()
|
|
|
|
| 114 |
)
|
| 115 |
|
| 116 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
return [
|
| 118 |
datasets.SplitGenerator(
|
| 119 |
name=datasets.Split.TRAIN,
|
| 120 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_train"])},
|
| 121 |
),
|
| 122 |
datasets.SplitGenerator(
|
| 123 |
name=datasets.Split.VALIDATION,
|
| 124 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_val"])},
|
| 125 |
),
|
| 126 |
datasets.SplitGenerator(
|
| 127 |
name=datasets.Split.TEST,
|
| 128 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_test"])},
|
| 129 |
+
)
|
| 130 |
]
|
| 131 |
|
| 132 |
def _generate_examples(self, filepath: Path):
|
| 133 |
+
for key, fp in filepath.items():
|
| 134 |
+
tsv_file = os.path.join(fp, "utt_spk_text.tsv")
|
| 135 |
+
with open(tsv_file, "r") as f:
|
| 136 |
+
tsv_file = csv.reader(f, delimiter="\t")
|
| 137 |
+
for line in tsv_file:
|
| 138 |
+
audio_id, sp_id, text = line[0], line[1], line[2]
|
| 139 |
+
wav_path = os.path.join(fp, "data", "{}.flac".format(audio_id))
|
| 140 |
+
|
| 141 |
+
if os.path.exists(wav_path):
|
| 142 |
+
if self.config.schema == "source":
|
| 143 |
+
ex = {
|
| 144 |
+
"id": audio_id,
|
| 145 |
+
"speaker_id": sp_id,
|
| 146 |
+
"path": wav_path,
|
| 147 |
+
"audio": wav_path,
|
| 148 |
+
"text": text,
|
| 149 |
+
}
|
| 150 |
+
yield audio_id, ex
|
| 151 |
+
elif self.config.schema == "seacrowd_sptext":
|
| 152 |
+
ex = {
|
| 153 |
+
"id": audio_id,
|
| 154 |
+
"speaker_id": sp_id,
|
| 155 |
+
"path": wav_path,
|
| 156 |
+
"audio": wav_path,
|
| 157 |
+
"text": text,
|
| 158 |
+
"metadata": {
|
| 159 |
+
"speaker_age": None,
|
| 160 |
+
"speaker_gender": None,
|
| 161 |
+
},
|
| 162 |
+
}
|
| 163 |
+
yield audio_id, ex
|
| 164 |
f.close()
|