jineeuslab commited on
Commit
8e39987
·
verified ·
1 Parent(s): cb24793

Update asr_sundanese_2.py

Browse files
Files changed (1) hide show
  1. asr_sundanese_2.py +36 -30
asr_sundanese_2.py CHANGED
@@ -15,40 +15,42 @@
15
 
16
  import csv
17
  import os
18
- from typing import Dict, List
 
19
 
20
  import datasets
21
 
22
  from seacrowd.utils import schemas
23
  from seacrowd.utils.configs import SEACrowdConfig
24
- from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
25
- DEFAULT_SOURCE_VIEW_NAME, Tasks)
26
 
27
- _DATASETNAME = "su_id_asr"
28
- _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
29
- _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
30
-
31
- _LANGUAGES = ["sun"]
32
- _LOCAL = False
33
  _CITATION = """\
34
- @inproceedings{sodimana18_sltu,
35
- author={Keshan Sodimana and Pasindu {De Silva} and Supheakmungkol Sarin and Oddur Kjartansson and Martin Jansche and Knot Pipatsrisawat and Linne Ha},
36
- title={{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Frameworks for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
37
- year=2018,
38
- booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)},
39
- pages={66--70},
40
- doi={10.21437/SLTU.2018-14}
41
- }
 
 
42
  """
43
 
 
 
44
  _DESCRIPTION = """\
45
- Sundanese ASR training data set containing ~220K utterances.
46
- This dataset was collected by Google in Indonesia.
 
 
47
  """
48
 
49
- _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
 
 
50
 
51
- _LICENSE = "Attribution-ShareAlike 4.0 International."
52
 
53
  _URLs = {
54
  "su_id_asr_train": "https://drive.google.com/uc?export=download&id=10YBMnKSfZQKCuYGXAsTeTfUM5t3rGLs-",
@@ -56,26 +58,31 @@ _URLs = {
56
  "su_id_asr_test": "https://drive.google.com/uc?export=download&id=1P6mtQJoZ2QV7AC9zbR2nDbW6s6YrJ_XU",
57
  }
58
 
59
- _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
60
 
61
  _SOURCE_VERSION = "1.0.0"
 
62
  _SEACROWD_VERSION = "2024.06.20"
63
 
 
64
  class SuIdASR(datasets.GeneratorBasedBuilder):
65
- """su_id contains ~220K utterances for Sundanese ASR training data."""
 
 
 
66
 
67
  BUILDER_CONFIGS = [
68
  SEACrowdConfig(
69
  name="su_id_asr_source",
70
- version=datasets.Version(_SOURCE_VERSION),
71
- description="SU_ID_ASR source schema",
72
  schema="source",
73
  subset_id="su_id_asr",
74
  ),
75
  SEACrowdConfig(
76
  name="su_id_asr_seacrowd_sptext",
77
- version=datasets.Version(_SEACROWD_VERSION),
78
- description="SU_ID_ASR Nusantara schema",
79
  schema="seacrowd_sptext",
80
  subset_id="su_id_asr",
81
  ),
@@ -83,7 +90,7 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
83
 
84
  DEFAULT_CONFIG_NAME = "su_id_asr_source"
85
 
86
- def _info(self):
87
  if self.config.schema == "source":
88
  features = datasets.Features(
89
  {
@@ -103,7 +110,6 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
103
  homepage=_HOMEPAGE,
104
  license=_LICENSE,
105
  citation=_CITATION,
106
- # task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
107
  )
108
 
109
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
@@ -114,7 +120,7 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
114
  ),
115
  datasets.SplitGenerator(
116
  name=datasets.Split.VALIDATION,
117
- gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_val"])}, # Fix here
118
  ),
119
  datasets.SplitGenerator(
120
  name=datasets.Split.TEST,
 
15
 
16
  import csv
17
  import os
18
+ from pathlib import Path
19
+ from typing import List
20
 
21
  import datasets
22
 
23
  from seacrowd.utils import schemas
24
  from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Tasks
 
26
 
 
 
 
 
 
 
27
  _CITATION = """\
28
+ @inproceedings{kjartansson-etal-sltu2018,
29
+ title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
30
+ author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
31
+ booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
32
+ year = {2018},
33
+ address = {Gurugram, India},
34
+ month = aug,
35
+ pages = {52--55},
36
+ URL = {http://dx.doi.org/10.21437/SLTU.2018-11},
37
+ }
38
  """
39
 
40
+ _DATASETNAME = "su_id_asr"
41
+
42
  _DESCRIPTION = """\
43
+ This data set contains transcribed audio data for Javanese. The data set consists of wave files, and a TSV file.
44
+ The file utt_spk_text.tsv contains a FileID, UserID and the transcription of audio in the file.
45
+ The data set has been manually quality checked, but there might still be errors.
46
+ This dataset was collected by Google in collaboration with Reykjavik University and Universitas Gadjah Mada in Indonesia.
47
  """
48
 
49
+ _HOMEPAGE = "http://openslr.org/35/"
50
+ _LANGUAGES = ["sun"]
51
+ _LOCAL = False
52
 
53
+ _LICENSE = "Attribution-ShareAlike 4.0 International"
54
 
55
  _URLs = {
56
  "su_id_asr_train": "https://drive.google.com/uc?export=download&id=10YBMnKSfZQKCuYGXAsTeTfUM5t3rGLs-",
 
58
  "su_id_asr_test": "https://drive.google.com/uc?export=download&id=1P6mtQJoZ2QV7AC9zbR2nDbW6s6YrJ_XU",
59
  }
60
 
61
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
62
 
63
  _SOURCE_VERSION = "1.0.0"
64
+
65
  _SEACROWD_VERSION = "2024.06.20"
66
 
67
+
68
  class SuIdASR(datasets.GeneratorBasedBuilder):
69
+ """Sundanese ASR training data set containing ~185K utterances."""
70
+
71
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
72
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
73
 
74
  BUILDER_CONFIGS = [
75
  SEACrowdConfig(
76
  name="su_id_asr_source",
77
+ version=SOURCE_VERSION,
78
+ description="su_id_asr source schema",
79
  schema="source",
80
  subset_id="su_id_asr",
81
  ),
82
  SEACrowdConfig(
83
  name="su_id_asr_seacrowd_sptext",
84
+ version=SEACROWD_VERSION,
85
+ description="su_id_asr Nusantara schema",
86
  schema="seacrowd_sptext",
87
  subset_id="su_id_asr",
88
  ),
 
90
 
91
  DEFAULT_CONFIG_NAME = "su_id_asr_source"
92
 
93
+ def _info(self) -> datasets.DatasetInfo:
94
  if self.config.schema == "source":
95
  features = datasets.Features(
96
  {
 
110
  homepage=_HOMEPAGE,
111
  license=_LICENSE,
112
  citation=_CITATION,
 
113
  )
114
 
115
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
 
120
  ),
121
  datasets.SplitGenerator(
122
  name=datasets.Split.VALIDATION,
123
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_dev"])},
124
  ),
125
  datasets.SplitGenerator(
126
  name=datasets.Split.TEST,