cahya commited on
Commit
739029a
·
1 Parent(s): 5da672b
Files changed (3) hide show
  1. release_stats.py +8 -3
  2. test01.py +29 -47
  3. usage.py +1 -1
release_stats.py CHANGED
@@ -1,7 +1,12 @@
1
  STATS = {
2
  "name": "Librivox-Indonesia",
3
  "bundleURLTemplate": "https://huggingface.co/datasets/cahya/test01/resolve/main/audio.tgz",
4
- "version": "1.0",
5
  "date": "",
6
- "locales": {'reportedSentences': 261, 'buckets': {'dev': 3218, 'invalidated': 2454, 'other': 22787, 'reported': 260, 'test': 3622, 'train': 5043, 'validated': 23132}, 'duration': 196639788, 'clips': 48373, 'splits': {'accent': {'': 1}, 'age': {'': 0.26, 'twenties': 0.39, 'thirties': 0.07, 'teens': 0.26, 'fifties': 0, 'fourties': 0.02}, 'gender': {'': 0.26, 'male': 0.41, 'female': 0.29, 'other': 0.04}}, 'users': 416, 'size': 1253048208, 'checksum': '874e959e2ca1aacc502ff969a3e54de792dd41e4f672ae1fd9d38213f4bf4139', 'avgDurationSecs': 4.065, 'validDurationSecs': 94033.274, 'totalHrs': 54.62, 'validHrs': 26.12}
7
- }
 
 
 
 
 
 
1
  STATS = {
2
  "name": "Librivox-Indonesia",
3
  "bundleURLTemplate": "https://huggingface.co/datasets/cahya/test01/resolve/main/audio.tgz",
4
+ "version": "1.0.0",
5
  "date": "",
6
+ "locales": {
7
+ "id": {'reportedSentences': 261, 'buckets': {'dev': 3218, 'invalidated': 2454, 'other': 22787, 'reported': 260, 'test': 3622, 'train': 5043, 'validated': 23132}, 'duration': 196639788, 'clips': 48373, 'splits': {'accent': {'': 1}, 'age': {'': 0.26, 'twenties': 0.39, 'thirties': 0.07, 'teens': 0.26, 'fifties': 0, 'fourties': 0.02}, 'gender': {'': 0.26, 'male': 0.41, 'female': 0.29, 'other': 0.04}}, 'users': 416, 'size': 1253048208, 'checksum': '874e959e2ca1aacc502ff969a3e54de792dd41e4f672ae1fd9d38213f4bf4139', 'avgDurationSecs': 4.065, 'validDurationSecs': 94033.274, 'totalHrs': 54.62, 'validHrs': 26.12},
8
+ "sun": {'reportedSentences': 261, 'buckets': {'dev': 3218, 'invalidated': 2454, 'other': 22787, 'reported': 260, 'test': 3622, 'train': 5043, 'validated': 23132}, 'duration': 196639788, 'clips': 48373, 'splits': {'accent': {'': 1}, 'age': {'': 0.26, 'twenties': 0.39, 'thirties': 0.07, 'teens': 0.26, 'fifties': 0, 'fourties': 0.02}, 'gender': {'': 0.26, 'male': 0.41, 'female': 0.29, 'other': 0.04}}, 'users': 416, 'size': 1253048208, 'checksum': '874e959e2ca1aacc502ff969a3e54de792dd41e4f672ae1fd9d38213f4bf4139', 'avgDurationSecs': 4.065, 'validDurationSecs': 94033.274, 'totalHrs': 54.62, 'validHrs': 26.12}
9
+ },
10
+ 'totalDuration': 72782088097, 'totalValidDurationSecs': 53904443, 'totalHrs': 20217, 'totalValidHrs': 14973
11
+ }
12
+
test01.py CHANGED
@@ -30,7 +30,7 @@ from .release_stats import STATS
30
  _CITATION = """\
31
  """
32
 
33
- _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
34
 
35
  _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
36
 
@@ -49,10 +49,8 @@ class Test01Config(datasets.BuilderConfig):
49
  self.size_bytes = kwargs.pop("size_bytes", None)
50
  self.size_human = size_str(self.size_bytes)
51
  description = (
52
- f"Common Voice speech to text dataset in {self.language} released on {self.release_date}. "
53
- f"The dataset comprises {self.validated_hr} hours of validated transcribed speech data "
54
- f"out of {self.total_hr} hours in total from {self.num_speakers} speakers. "
55
- f"The dataset contains {self.num_clips} audio clips and has a size of {self.size_human}."
56
  )
57
  super(Test01Config, self).__init__(
58
  name=name,
@@ -91,17 +89,11 @@ class Test01(datasets.GeneratorBasedBuilder):
91
  )
92
  features = datasets.Features(
93
  {
94
- "client_id": datasets.Value("string"),
95
  "path": datasets.Value("string"),
96
- "audio": datasets.features.Audio(sampling_rate=48_000),
 
97
  "sentence": datasets.Value("string"),
98
- "up_votes": datasets.Value("int64"),
99
- "down_votes": datasets.Value("int64"),
100
- "age": datasets.Value("string"),
101
- "gender": datasets.Value("string"),
102
- "accent": datasets.Value("string"),
103
- "locale": datasets.Value("string"),
104
- "segment": datasets.Value("string"),
105
  }
106
  )
107
 
@@ -120,11 +112,13 @@ class Test01(datasets.GeneratorBasedBuilder):
120
 
121
  def _split_generators(self, dl_manager):
122
  """Returns SplitGenerators."""
 
123
  hf_auth_token = dl_manager.download_config.use_auth_token
124
  if hf_auth_token is None:
125
  raise ConnectionError(
126
  "Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset"
127
  )
 
128
 
129
  bundle_url_template = STATS["bundleURLTemplate"]
130
  bundle_version = bundle_url_template.split("/")[0]
@@ -145,7 +139,7 @@ class Test01(datasets.GeneratorBasedBuilder):
145
  gen_kwargs={
146
  "local_extracted_archive": local_extracted_archive,
147
  "archive_iterator": dl_manager.iter_archive(archive_path),
148
- "metadata_filepath": "/".join([path_to_data, "train.tsv"]) if path_to_data else "train.tsv",
149
  "path_to_clips": path_to_clips,
150
  },
151
  ),
@@ -161,36 +155,24 @@ class Test01(datasets.GeneratorBasedBuilder):
161
  """Yields examples."""
162
  data_fields = list(self._info().features.keys())
163
  metadata = {}
164
- metadata_found = False
 
 
 
 
 
 
 
 
 
 
165
  for path, f in archive_iterator:
166
- if path == metadata_filepath:
167
- metadata_found = True
168
- lines = (line.decode("utf-8") for line in f)
169
- reader = csv.DictReader(lines, delimiter="\t", quoting=csv.QUOTE_NONE)
170
- for row in reader:
171
- # set absolute path for mp3 audio file
172
- if not row["path"].endswith(".mp3"):
173
- row["path"] += ".mp3"
174
- row["path"] = os.path.join(path_to_clips, row["path"])
175
- # accent -> accents in CV 8.0
176
- if "accents" in row:
177
- row["accent"] = row["accents"]
178
- del row["accents"]
179
- # if data is incomplete, fill with empty values
180
- for field in data_fields:
181
- if field not in row:
182
- row[field] = ""
183
- metadata[row["path"]] = row
184
- elif path.startswith(path_to_clips):
185
- assert metadata_found, "Found audio clips before the metadata TSV file."
186
- if not metadata:
187
- break
188
- if path in metadata:
189
- result = dict(metadata[path])
190
- # set the audio feature and the path to the extracted file
191
- path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
192
- result["audio"] = {"path": path, "bytes": f.read()}
193
- # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
194
- result["path"] = path if local_extracted_archive else None
195
-
196
- yield path, result
 
30
  _CITATION = """\
31
  """
32
 
33
+ _HOMEPAGE = "https://huggingface.co/indonesian-nlp/librivox-indonesia"
34
 
35
  _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
36
 
 
49
  self.size_bytes = kwargs.pop("size_bytes", None)
50
  self.size_human = size_str(self.size_bytes)
51
  description = (
52
+ f"LibriVox-Indonesia speech to text dataset in {self.language} released on {self.release_date}. "
53
+ f"The dataset comprises {self.validated_hr} hours of transcribed speech data"
 
 
54
  )
55
  super(Test01Config, self).__init__(
56
  name=name,
 
89
  )
90
  features = datasets.Features(
91
  {
 
92
  "path": datasets.Value("string"),
93
+ "language": datasets.Value("string"),
94
+ "reader": datasets.Value("string"),
95
  "sentence": datasets.Value("string"),
96
+ "audio": datasets.features.Audio(sampling_rate=48_000)
 
 
 
 
 
 
97
  }
98
  )
99
 
 
112
 
113
  def _split_generators(self, dl_manager):
114
  """Returns SplitGenerators."""
115
+ """
116
  hf_auth_token = dl_manager.download_config.use_auth_token
117
  if hf_auth_token is None:
118
  raise ConnectionError(
119
  "Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset"
120
  )
121
+ """
122
 
123
  bundle_url_template = STATS["bundleURLTemplate"]
124
  bundle_version = bundle_url_template.split("/")[0]
 
139
  gen_kwargs={
140
  "local_extracted_archive": local_extracted_archive,
141
  "archive_iterator": dl_manager.iter_archive(archive_path),
142
+ "metadata_filepath": "/".join([path_to_data, "audio_transcription.csv"]) if path_to_data else "audio_transcription.csv",
143
  "path_to_clips": path_to_clips,
144
  },
145
  ),
 
155
  """Yields examples."""
156
  data_fields = list(self._info().features.keys())
157
  metadata = {}
158
+ filepath = local_extracted_archive + "/audio/audio_transcription.csv"
159
+ with open(filepath, "r") as f:
160
+ lines = (line for line in f)
161
+ utterances = csv.DictReader(lines)
162
+ for row in utterances:
163
+ row["path"] = os.path.join(path_to_clips, row["path"])
164
+ # if data is incomplete, fill with empty values
165
+ for field in data_fields:
166
+ if field not in row:
167
+ row[field] = ""
168
+ metadata[row["path"]] = row
169
  for path, f in archive_iterator:
170
+ if path.endswith(".mp3") and path in metadata:
171
+ result = dict(metadata[path])
172
+ # set the audio feature and the path to the extracted file
173
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
174
+ result["audio"] = {"path": path, "bytes": f.read()}
175
+ # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
176
+ result["path"] = path if local_extracted_archive else None
177
+
178
+ yield path, result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
usage.py CHANGED
@@ -2,7 +2,7 @@ from datasets import load_dataset
2
 
3
 
4
  def main():
5
- ds = load_dataset("./test01.py", "sun")
6
  print(ds)
7
 
8
 
 
2
 
3
 
4
  def main():
5
+ ds = load_dataset("./test01.py", "sun", ignore_verifications=True)
6
  print(ds)
7
 
8