| import csv |
| import os |
| from typing import Dict, List |
|
|
| import datasets |
|
|
| from seacrowd.utils import schemas |
| from seacrowd.utils.configs import SEACrowdConfig |
| from seacrowd.utils.constants import ( |
| DEFAULT_SEACROWD_VIEW_NAME, |
| DEFAULT_SOURCE_VIEW_NAME, |
| Tasks, |
| ) |
|
|
| _DATASETNAME = "su_id_asr" |
| _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME |
| _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME |
|
|
| _LANGUAGES = ["sun"] |
| _LOCAL = False |
| _CITATION = """\ |
| @inproceedings{sodimana18_sltu, |
| author={Keshan Sodimana and Pasindu {De Silva} and Supheakmungkol Sarin and Oddur Kjartansson and Martin Jansche and Knot Pipatsrisawat and Linne Ha}, |
| title={{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Frameworks for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}}, |
| year=2018, |
| booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)}, |
| pages={66--70}, |
| doi={10.21437/SLTU.2018-14} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| Sundanese ASR training data set containing ~220K utterances. |
| This dataset was collected by Google in Indonesia. |
| """ |
|
|
| _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr" |
|
|
| _LICENSE = "Attribution-ShareAlike 4.0 International." |
|
|
| _URLs = { |
| "su_id_asr_train": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=ESbYerhrepxPsggILmK8hZwB9ywXeZzLX7fF885Yo9F7JA", |
| "su_id_asr_dev": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=EdmZ2KYglRBJrKacGRklGD4BEcZXqY6txIrEhj2csx3I3g", |
| "su_id_asr_test": "https://univindonesia-my.sharepoint.com/personal/patrick_samuel_office_ui_ac_id/_layouts/15/download.aspx?share=ET_Yu0vwbk9Mu-2vg68mSnkBJ-CnY1DOBjm8GVjGLKFZxQ", |
| } |
|
|
| _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] |
|
|
| _SOURCE_VERSION = "1.0.0" |
| _SEACROWD_VERSION = "2024.06.20" |
|
|
|
|
| class SuIdASR(datasets.GeneratorBasedBuilder): |
| """su_id contains ~220K utterances for Sundanese ASR training data.""" |
|
|
| BUILDER_CONFIGS = [ |
| SEACrowdConfig( |
| name="su_id_asr_source", |
| version=datasets.Version(_SOURCE_VERSION), |
| description="SU_ID_ASR source schema", |
| schema="source", |
| subset_id="su_id_asr", |
| ), |
| SEACrowdConfig( |
| name="su_id_asr_seacrowd_sptext", |
| version=datasets.Version(_SEACROWD_VERSION), |
| description="SU_ID_ASR Nusantara schema", |
| schema="seacrowd_sptext", |
| subset_id="su_id_asr", |
| ), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "su_id_asr_source" |
|
|
| def _info(self): |
| if self.config.schema == "source": |
| features = datasets.Features( |
| { |
| "id": datasets.Value("string"), |
| "speaker_id": datasets.Value("string"), |
| "path": datasets.Value("string"), |
| "audio": datasets.Audio(sampling_rate=16_000), |
| "text": datasets.Value("string"), |
| } |
| ) |
| elif self.config.schema == "seacrowd_sptext": |
| features = schemas.speech_text_features |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")], |
| ) |
|
|
| def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
| base_path_train = dl_manager.download_and_extract(_URLs["su_id_asr_train"]) |
| base_path_validation = dl_manager.download_and_extract(_URLs["su_id_asr_dev"]) |
| base_path_test = dl_manager.download_and_extract(_URLs["su_id_asr_test"]) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"filepath": base_path_train, "split": "train"}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={"filepath": base_path_validation, "split": "validation"}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={"filepath": base_path_test, "split": "test"}, |
| ), |
| ] |
|
|
| def _generate_examples(self, filepath: str): |
| |
| tsv_file = os.path.join(filepath, "utt_spk_text.tsv") |
|
|
| |
| if not os.path.exists(tsv_file): |
| raise FileNotFoundError(f"TSV file not found at: {tsv_file}") |
|
|
| with open(tsv_file, "r") as file: |
| tsv_reader = csv.reader(file, delimiter="\t") |
|
|
| for line in tsv_reader: |
| audio_id, speaker_id, transcription_text = line[0], line[1], line[2] |
| wav_path = os.path.join(filepath, "{}.flac".format(audio_id)) |
|
|
| if os.path.exists(wav_path): |
| ex = { |
| "id": audio_id, |
| "speaker_id": speaker_id, |
| "path": wav_path, |
| "audio": wav_path, |
| "text": transcription_text, |
| } |
| if self.config.schema == "seacrowd_sptext": |
| ex["metadata"] = { |
| "speaker_age": None, |
| "speaker_gender": None, |
| } |
| yield audio_id, ex |
|
|