Bagas Shalahuddin Wahid commited on
Commit
47bde8b
·
1 Parent(s): 60b13db
Files changed (4) hide show
  1. README.md +9 -0
  2. __init__.py +0 -0
  3. jv_id_asr_split.py +194 -0
  4. requirements.txt +2 -0
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - jav
4
+ pretty_name: Jv Id Asr
5
+ task_categories:
6
+ - speech-recognition
7
+ tags:
8
+ - speech-recognition
9
+ ---
__init__.py ADDED
File without changes
jv_id_asr_split.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ import os
18
+ from pathlib import Path
19
+ from typing import List
20
+ import gdown
21
+ import tempfile
22
+
23
+ import datasets
24
+
25
+ from seacrowd.utils import schemas
26
+ from seacrowd.utils.configs import SEACrowdConfig
27
+ from seacrowd.utils.constants import Tasks
28
+
29
+ _CITATION = """\
30
+ @inproceedings{kjartansson-etal-sltu2018,
31
+ title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
32
+ author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
33
+ booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
34
+ year = {2018},
35
+ address = {Gurugram, India},
36
+ month = aug,
37
+ pages = {52--55},
38
+ URL = {http://dx.doi.org/10.21437/SLTU.2018-11},
39
+ }
40
+ """
41
+
42
+ _DATASETNAME = "jv_id_asr"
43
+
44
+ _DESCRIPTION = """\
45
+ This data set contains transcribed audio data for Javanese. The data set consists of wave files, and a TSV file.
46
+ The file utt_spk_text.tsv contains a FileID, UserID and the transcription of audio in the file.
47
+ The data set has been manually quality checked, but there might still be errors.
48
+ This dataset was collected by Google in collaboration with Reykjavik University and Universitas Gadjah Mada in Indonesia.
49
+ """
50
+
51
+ _HOMEPAGE = "http://openslr.org/35/"
52
+ _LANGUAGES = ["jav"]
53
+ _LOCAL = False
54
+
55
+ _LICENSE = "Attribution-ShareAlike 4.0 International"
56
+
57
+ _URLs = {
58
+ "jv_id_asr_train": "https://drive.google.com/file/d/1g1EsPWA1sibY5HMQlhhUVeLHTFLBevbH/view?usp=sharing",
59
+ "jv_id_asr_dev": "https://drive.google.com/file/d/1rrpCE2VZqFSjPu2diNnwzMmX2EG346dN/view?usp=sharing",
60
+ "jv_id_asr_test": "https://drive.google.com/file/d/1qqjKhEqWiTOQP5XbMeIVkef_aPi3ZNug/view?usp=sharing",
61
+ }
62
+
63
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
64
+
65
+ _SOURCE_VERSION = "1.0.0"
66
+
67
+ _SEACROWD_VERSION = "2024.06.20"
68
+
69
+ def download_from_gdrive(url, output_dir):
70
+ """Download a file from Google Drive and save it to the specified directory."""
71
+ file_id = url.split("/d/")[-1].split("/")[0] # Extract FILE_ID from URL
72
+ gdrive_url = f"https://drive.google.com/uc?id={file_id}"
73
+ output_path = os.path.join(output_dir, f"{file_id}.zip") # Save file
74
+ gdown.download(gdrive_url, output_path, quiet=False)
75
+ return output_path
76
+
77
+ class JvIdASR(datasets.GeneratorBasedBuilder):
78
+ """Javanese ASR training data set containing ~185K utterances."""
79
+
80
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
81
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
82
+
83
+ BUILDER_CONFIGS = [
84
+ SEACrowdConfig(
85
+ name="jv_id_asr_source",
86
+ version=SOURCE_VERSION,
87
+ description="jv_id_asr source schema",
88
+ schema="source",
89
+ subset_id="jv_id_asr",
90
+ ),
91
+ SEACrowdConfig(
92
+ name="jv_id_asr_seacrowd_sptext",
93
+ version=SEACROWD_VERSION,
94
+ description="jv_id_asr Nusantara schema",
95
+ schema="seacrowd_sptext",
96
+ subset_id="jv_id_asr",
97
+ ),
98
+ ]
99
+
100
+ DEFAULT_CONFIG_NAME = "jv_id_asr_source"
101
+
102
+
103
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
104
+ def download_from_gdrive(url, name):
105
+ with tempfile.TemporaryDirectory() as temp_dir:
106
+ file_id = url.split("/d/")[-1].split("/")[0]
107
+ output_path = os.path.join(temp_dir, f"{name}.zip")
108
+ gdown.download(url, output_path, fuzzy=True)
109
+ extracted_path = dl_manager.extract(output_path)
110
+ return extracted_path
111
+
112
+ paths = {
113
+ "train": download_from_gdrive(_URLs["jv_id_asr_train"], 'asr_javanese_train'),
114
+ "dev": download_from_gdrive(_URLs["jv_id_asr_dev"], 'asr_javanese_dev'),
115
+ "test": download_from_gdrive(_URLs["jv_id_asr_test"], 'asr_javanese_test'),
116
+ }
117
+
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name="phase_1_train",
121
+ gen_kwargs={"filepath": paths["train"], "split": "phase_1_train"},
122
+ ),
123
+ datasets.SplitGenerator(
124
+ name="phase_2_train",
125
+ gen_kwargs={"filepath": paths["train"], "split": "phase_2_train"},
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name="phase_1_val",
129
+ gen_kwargs={"filepath": paths["dev"], "split": "phase_1_val"},
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name="phase_2_val",
133
+ gen_kwargs={"filepath": paths["dev"], "split": "phase_2_val"},
134
+ ),
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.TEST,
137
+ gen_kwargs={"filepath": paths["test"], "split": None},
138
+ ),
139
+ ]
140
+
141
+ def _info(self) -> datasets.DatasetInfo:
142
+ if self.config.schema == "source":
143
+ features = datasets.Features(
144
+ {
145
+ "id": datasets.Value("string"),
146
+ "speaker_id": datasets.Value("string"),
147
+ "path": datasets.Value("string"),
148
+ "audio": datasets.Audio(sampling_rate=16_000),
149
+ "text": datasets.Value("string"),
150
+ "language": datasets.Value("string"),
151
+ }
152
+ )
153
+ elif self.config.schema == "seacrowd_sptext":
154
+ features = schemas.speech_text_features.copy()
155
+ features["language"] = datasets.Value("string")
156
+
157
+ return datasets.DatasetInfo(
158
+ description=_DESCRIPTION,
159
+ features=features,
160
+ homepage=_HOMEPAGE,
161
+ license=_LICENSE,
162
+ citation=_CITATION,
163
+ )
164
+
165
+ def _generate_examples(self, filepath: str, split=None):
166
+ tsv_path = os.path.join(filepath, "asr_javanese", "utt_spk_text.tsv")
167
+ with open(tsv_path, "r") as f:
168
+ reader = list(csv.reader(f, delimiter="\t"))
169
+ total = len(reader)
170
+
171
+ if split == "phase_1_train" or split == "phase_1_val":
172
+ reader = reader[: total // 2]
173
+ elif split == "phase_2_train" or split == "phase_2_val":
174
+ reader = reader[total // 2 :]
175
+
176
+ for line in reader:
177
+ audio_id, sp_id, text = line[0], line[1], line[2]
178
+ wav_path = os.path.join(filepath, "asr_javanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
179
+
180
+ if os.path.exists(wav_path):
181
+ ex = {
182
+ "id": audio_id,
183
+ "speaker_id": sp_id,
184
+ "path": wav_path,
185
+ "audio": wav_path,
186
+ "text": text,
187
+ "language": "jv",
188
+ }
189
+
190
+ if self.config.schema == "seacrowd_sptext":
191
+ ex["metadata"] = {"speaker_age": None, "speaker_gender": None}
192
+
193
+ yield audio_id, ex
194
+ f.close()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ seacrowd>=0.2.0
2
+ gdown