jineeuslab commited on
Commit
23bc620
·
verified ·
1 Parent(s): 26c590f

Update asr_sundanese_2_hub.py

Browse files
Files changed (1) hide show
  1. asr_sundanese_2_hub.py +169 -163
asr_sundanese_2_hub.py CHANGED
@@ -1,164 +1,170 @@
1
- # coding=utf-8
2
- # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import csv
17
- import os
18
- from pathlib import Path
19
- from typing import List
20
-
21
- import datasets
22
-
23
- from seacrowd.utils import schemas
24
- from seacrowd.utils.configs import SEACrowdConfig
25
- from seacrowd.utils.constants import Tasks
26
-
27
- _CITATION = """\
28
- @inproceedings{kjartansson-etal-sltu2018,
29
- title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
30
- author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
31
- booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
32
- year = {2018},
33
- address = {Gurugram, India},
34
- month = aug,
35
- pages = {52--55},
36
- URL = {http://dx.doi.org/10.21437/SLTU.2018-11},
37
- }
38
- """
39
-
40
- _DATASETNAME = "su_id_asr"
41
-
42
- _DESCRIPTION = """\
43
- This data set contains transcribed audio data for Sundanese, specifically asr_sundanese_2 dataset from OpenSLR.
44
- The data set consists of wave files, and a TSV file.
45
- The file utt_spk_text.tsv contains a FileID, UserID and the transcription of audio in the file.
46
- The data set has been manually quality checked, but there might still be errors.
47
- This dataset was collected by Google in collaboration with Reykjavik University and Universitas Gadjah Mada in Indonesia.
48
- """
49
-
50
- _HOMEPAGE = "http://openslr.org/36/"
51
- _LANGUAGES = ["sun"]
52
- _LOCAL = False
53
-
54
- _LICENSE = "Attribution-ShareAlike 4.0 International"
55
-
56
- _URLs = {
57
- "su_id_asr_train": "https://drive.google.com/uc?export=download&id=10YBMnKSfZQKCuYGXAsTeTfUM5t3rGLs-",
58
- "su_id_asr_val": "https://drive.google.com/uc?export=download&id=1GvKjmV9ETd3wAFS5mcwA6hL0l4nwT-Xk",
59
- "su_id_asr_test": "https://drive.google.com/uc?export=download&id=1P6mtQJoZ2QV7AC9zbR2nDbW6s6YrJ_XU",
60
- }
61
-
62
- _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
63
-
64
- _SOURCE_VERSION = "1.0.0"
65
-
66
- _SEACROWD_VERSION = "2024.06.20"
67
-
68
-
69
- class SuIdASR(datasets.GeneratorBasedBuilder):
70
- """Sundanese ASR training data set."""
71
-
72
- SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
- SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
74
-
75
- BUILDER_CONFIGS = [
76
- SEACrowdConfig(
77
- name="su_id_asr_source",
78
- version=SOURCE_VERSION,
79
- description="su_id_asr source schema",
80
- schema="source",
81
- subset_id="su_id_asr",
82
- ),
83
- SEACrowdConfig(
84
- name="su_id_asr_seacrowd_sptext",
85
- version=SEACROWD_VERSION,
86
- description="su_id_asr Nusantara schema",
87
- schema="seacrowd_sptext",
88
- subset_id="su_id_asr",
89
- ),
90
- ]
91
-
92
- DEFAULT_CONFIG_NAME = "su_id_asr_source"
93
-
94
- def _info(self) -> datasets.DatasetInfo:
95
- if self.config.schema == "source":
96
- features = datasets.Features(
97
- {
98
- "id": datasets.Value("string"),
99
- "speaker_id": datasets.Value("string"),
100
- "path": datasets.Value("string"),
101
- "audio": datasets.Audio(sampling_rate=16_000),
102
- "text": datasets.Value("string"),
103
- }
104
- )
105
- elif self.config.schema == "seacrowd_sptext":
106
- features = schemas.speech_text_features
107
-
108
- return datasets.DatasetInfo(
109
- description=_DESCRIPTION,
110
- features=features,
111
- homepage=_HOMEPAGE,
112
- license=_LICENSE,
113
- citation=_CITATION,
114
- )
115
-
116
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
117
- return [
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TRAIN,
120
- gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_train"])},
121
- ),
122
- datasets.SplitGenerator(
123
- name=datasets.Split.VALIDATION,
124
- gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_val"])},
125
- ),
126
- datasets.SplitGenerator(
127
- name=datasets.Split.TEST,
128
- gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_test"])},
129
- )
130
- ]
131
-
132
- def _generate_examples(self, filepath: Path):
133
- for key, fp in filepath.items():
134
- tsv_file = os.path.join(fp, "utt_spk_text.tsv")
135
- with open(tsv_file, "r") as f:
136
- tsv_file = csv.reader(f, delimiter="\t")
137
- for line in tsv_file:
138
- audio_id, sp_id, text = line[0], line[1], line[2]
139
- wav_path = os.path.join(fp, "data", "{}.flac".format(audio_id))
140
-
141
- if os.path.exists(wav_path):
142
- if self.config.schema == "source":
143
- ex = {
144
- "id": audio_id,
145
- "speaker_id": sp_id,
146
- "path": wav_path,
147
- "audio": wav_path,
148
- "text": text,
149
- }
150
- yield audio_id, ex
151
- elif self.config.schema == "seacrowd_sptext":
152
- ex = {
153
- "id": audio_id,
154
- "speaker_id": sp_id,
155
- "path": wav_path,
156
- "audio": wav_path,
157
- "text": text,
158
- "metadata": {
159
- "speaker_age": None,
160
- "speaker_gender": None,
161
- },
162
- }
163
- yield audio_id, ex
 
 
 
 
 
 
164
  f.close()
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ import os
18
+ from pathlib import Path
19
+ from typing import List
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Tasks
26
+
27
+ _CITATION = """\
28
+ @inproceedings{kjartansson-etal-sltu2018,
29
+ title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
30
+ author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
31
+ booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
32
+ year = {2018},
33
+ address = {Gurugram, India},
34
+ month = aug,
35
+ pages = {52--55},
36
+ URL = {http://dx.doi.org/10.21437/SLTU.2018-11},
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "su_id_asr"
41
+
42
+ _DESCRIPTION = """\
43
+ This data set contains transcribed audio data for Sundanese, specifically asr_sundanese_2 dataset from OpenSLR.
44
+ The data set consists of wave files, and a TSV file.
45
+ The file utt_spk_text.tsv contains a FileID, UserID and the transcription of audio in the file.
46
+ The data set has been manually quality checked, but there might still be errors.
47
+ This dataset was collected by Google in collaboration with Reykjavik University and Universitas Gadjah Mada in Indonesia.
48
+ """
49
+
50
+ _HOMEPAGE = "http://openslr.org/36/"
51
+ _LANGUAGES = ["sun"]
52
+ _LOCAL = False
53
+
54
+ _LICENSE = "Attribution-ShareAlike 4.0 International"
55
+
56
+ _URLs = {
57
+ "su_id_asr_train": "https://drive.google.com/uc?export=download&id=10YBMnKSfZQKCuYGXAsTeTfUM5t3rGLs-",
58
+ "su_id_asr_val": "https://drive.google.com/uc?export=download&id=1GvKjmV9ETd3wAFS5mcwA6hL0l4nwT-Xk",
59
+ "su_id_asr_test": "https://drive.google.com/uc?export=download&id=1P6mtQJoZ2QV7AC9zbR2nDbW6s6YrJ_XU",
60
+ }
61
+
62
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
63
+
64
+ _SOURCE_VERSION = "1.0.0"
65
+
66
+ _SEACROWD_VERSION = "2024.06.20"
67
+
68
+
69
+ class SuIdASR(datasets.GeneratorBasedBuilder):
70
+ """Sundanese ASR training data set."""
71
+
72
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
74
+
75
+ BUILDER_CONFIGS = [
76
+ SEACrowdConfig(
77
+ name="su_id_asr_source",
78
+ version=SOURCE_VERSION,
79
+ description="su_id_asr source schema",
80
+ schema="source",
81
+ subset_id="su_id_asr",
82
+ ),
83
+ SEACrowdConfig(
84
+ name="su_id_asr_seacrowd_sptext",
85
+ version=SEACROWD_VERSION,
86
+ description="su_id_asr Nusantara schema",
87
+ schema="seacrowd_sptext",
88
+ subset_id="su_id_asr",
89
+ ),
90
+ ]
91
+
92
+ DEFAULT_CONFIG_NAME = "su_id_asr_source"
93
+
94
+ def _info(self) -> datasets.DatasetInfo:
95
+ if self.config.schema == "source":
96
+ features = datasets.Features(
97
+ {
98
+ "id": datasets.Value("string"),
99
+ "speaker_id": datasets.Value("string"),
100
+ "path": datasets.Value("string"),
101
+ "audio": datasets.Audio(sampling_rate=16_000),
102
+ "text": datasets.Value("string"),
103
+ }
104
+ )
105
+ elif self.config.schema == "seacrowd_sptext":
106
+ features = schemas.speech_text_features
107
+
108
+ return datasets.DatasetInfo(
109
+ description=_DESCRIPTION,
110
+ features=features,
111
+ homepage=_HOMEPAGE,
112
+ license=_LICENSE,
113
+ citation=_CITATION,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
117
+ """Returns SplitGenerators."""
118
+ # As the data is already split manually, we only need to download and pass the paths
119
+ train_path = dl_manager.download_and_extract(_URLs["su_id_asr_train"])
120
+ val_path = dl_manager.download_and_extract(_URLs["su_id_asr_val"])
121
+ test_path = dl_manager.download_and_extract(_URLs["su_id_asr_test"])
122
+
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TRAIN,
126
+ gen_kwargs={"filepath": train_path},
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.VALIDATION,
130
+ gen_kwargs={"filepath": val_path},
131
+ ),
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TEST,
134
+ gen_kwargs={"filepath": test_path},
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, filepath: Path):
139
+ """Yields examples."""
140
+ tsv_file = os.path.join(filepath, "utt_spk_text.tsv")
141
+
142
+ with open(tsv_file, "r", encoding="utf-8") as f:
143
+ tsv_reader = csv.reader(f, delimiter="\t")
144
+ for line in tsv_reader:
145
+ audio_id, sp_id, text = line[0], line[1], line[2]
146
+ wav_path = os.path.join(filepath, "data", f"{audio_id}.flac")
147
+
148
+ if os.path.exists(wav_path):
149
+ if self.config.schema == "source":
150
+ ex = {
151
+ "id": audio_id,
152
+ "speaker_id": sp_id,
153
+ "path": wav_path,
154
+ "audio": wav_path,
155
+ "text": text,
156
+ }
157
+ elif self.config.schema == "seacrowd_sptext":
158
+ ex = {
159
+ "id": audio_id,
160
+ "speaker_id": sp_id,
161
+ "path": wav_path,
162
+ "audio": wav_path,
163
+ "text": text,
164
+ "metadata": {
165
+ "speaker_age": None,
166
+ "speaker_gender": None,
167
+ },
168
+ }
169
+ yield audio_id, ex
170
  f.close()