leviethoang commited on
Commit
05a39e1
·
1 Parent(s): c32ddf2

Update VBVLSP.py

Browse files
Files changed (1) hide show
  1. VBVLSP.py +69 -150
VBVLSP.py CHANGED
@@ -1,5 +1,5 @@
1
  # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
@@ -12,160 +12,79 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """ VinDataVLSP Dataset"""
16
 
17
 
 
 
 
 
18
  import datasets
19
- from datasets.tasks import AutomaticSpeechRecognition
20
- import pandas as pd
21
- import re
22
-
23
-
24
- _DATA_URL = "https://husteduvn-my.sharepoint.com/:u:/g/personal/hoang_lv194767_sis_hust_edu_vn/EYhNns0j8GJEgZvb-G2aRS4Bt7AEdQMrGxYtyO2xjc6Img?e=gIpjeY&download=1"
25
- _PROMPTS_URLS = {
26
- "train": "https://drive.google.com/uc?export=download&id=1qqvPo2tOco8_iDU2eAl-WuVivr2GJTS8",
27
- "test": "https://drive.google.com/uc?export=download&id=1o7-ugckc1rKXfhLQHLKUeUaydOgJEEkO",
28
- "validation": "https://drive.google.com/uc?export=download&id=1eTwNzCVWhQBf3MDLgS8coZ_mySbp67BM"
29
- }
30
-
31
- _DESCRIPTION = """\
32
- """
33
-
34
- _LANGUAGES = {
35
- "vi": {
36
- "Language": "Vietnamese",
37
- "Date": "2021-12-11",
38
- "Size": "11 GB",
39
- "Version": "vi_100h_2021-12-11",
40
- },
41
- }
42
-
43
-
44
- class VinDataVLSPConfig(datasets.BuilderConfig):
45
- """BuilderConfig for CommonVoice."""
46
-
47
- def __init__(self, name, sub_version, **kwargs):
48
- """
49
- Args:
50
- data_dir: `string`, the path to the folder containing the files in the
51
- downloaded .tar
52
- citation: `string`, citation for the data set
53
- url: `string`, url for information about the data set
54
- **kwargs: keyword arguments forwarded to super.
55
- """
56
- self.sub_version = sub_version
57
- self.language = kwargs.pop("language", None)
58
- self.date_of_snapshot = kwargs.pop("date", None)
59
- self.size = kwargs.pop("size", None)
60
- self.validated_hr_total = kwargs.pop("val_hrs", None)
61
- self.total_hr_total = kwargs.pop("total_hrs", None)
62
- self.num_of_voice = kwargs.pop("num_of_voice", None)
63
- description = ""
64
- super(VinDataVLSPConfig, self).__init__(
65
- name=name, version=datasets.Version("0.1.0", ""), description=description, **kwargs
66
- )
67
-
68
-
69
- class VinDataVLSP(datasets.GeneratorBasedBuilder):
70
 
 
 
 
 
71
  DEFAULT_WRITER_BATCH_SIZE = 1000
72
- BUILDER_CONFIGS = [
73
- VinDataVLSPConfig(
74
- name=lang_id,
75
- language=_LANGUAGES[lang_id]["Language"],
76
- sub_version=_LANGUAGES[lang_id]["Version"],
77
- # date=_LANGUAGES[lang_id]["Date"],
78
- # size=_LANGUAGES[lang_id]["Size"],
79
- # val_hrs=_LANGUAGES[lang_id]["Validated_Hr_Total"],
80
- # total_hrs=_LANGUAGES[lang_id]["Overall_Hr_Total"],
81
- # num_of_voice=_LANGUAGES[lang_id]["Number_Of_Voice"],
82
- )
83
- for lang_id in _LANGUAGES.keys()
84
- ]
85
-
86
- def _info(self):
87
- features = datasets.Features(
88
- {
89
- "file_path": datasets.Value("string"),
90
- "script": datasets.Value("string"),
91
- "audio": datasets.Audio(sampling_rate=16_000),
92
- }
93
- )
94
-
95
- return datasets.DatasetInfo(
96
- description=_DESCRIPTION,
97
- features=features,
98
- supervised_keys=None,
99
- task_templates=[
100
- AutomaticSpeechRecognition(audio_file_path_column="file_path", transcription_column="script")
101
- ],
102
- )
103
 
104
  def _split_generators(self, dl_manager):
105
- """Returns SplitGenerators."""
106
- tsv_files = dl_manager.download(_PROMPTS_URLS)
107
- archive = dl_manager.download(_DATA_URL)
108
- path_to_clips = "./vlsp2020_train_set_02"
109
- print(tsv_files)
110
- da = pd.read_csv(tsv_files["train"], sep="\t")
111
- print(len(da))
112
- print(archive)
113
- return [
114
- datasets.SplitGenerator(
115
- name=datasets.Split.TRAIN,
116
- gen_kwargs={
117
- "tsv_files": tsv_files["train"],
118
- "audio_files": dl_manager.iter_archive(archive),
119
- "path_to_clips": path_to_clips,
120
- },
121
- ),
122
- datasets.SplitGenerator(
123
- name=datasets.Split.TEST,
124
- gen_kwargs={
125
- "tsv_files": tsv_files["test"],
126
- "audio_files": dl_manager.iter_archive(archive),
127
- "path_to_clips": path_to_clips,
128
- },
129
- ),
130
- datasets.SplitGenerator(
131
- name=datasets.Split.VALIDATION,
132
- gen_kwargs={
133
- "tsv_files": tsv_files["validation"],
134
- "audio_files": dl_manager.iter_archive(archive),
135
- "path_to_clips": path_to_clips,
136
- },
137
- ),
138
- ]
139
-
140
- def _generate_examples(self, tsv_files, audio_files, path_to_clips):
141
- """Yields examples."""
142
  data_fields = list(self._info().features.keys())
143
-
144
- # audio is not a header of the csv files
145
- data_fields.remove("audio")
146
- examples = {}
147
-
148
- df = pd.read_csv(tsv_files, sep="\t", header=0)
149
- df = df.dropna()
150
- chars_to_ignore_regex = r'[,?.!\-;:"“%\'�]'
151
-
152
- for file_path, script in zip(df["file_path"], df["script"]):
153
- # set full path for mp3 audio file
154
- audio_path = path_to_clips + "/" + file_path
155
- # Preprocessing script
156
- if ":" in script:
157
- two_dot_index = script.index(":")
158
- script = script[two_dot_index + 1:]
159
- script = script.replace("\n", " ")
160
- script = re.sub(chars_to_ignore_regex, '', script).lower()
161
-
162
- examples[audio_path] = {
163
- "file_path": audio_path,
164
- "script": script,
165
- }
166
-
167
- for path, f in audio_files:
168
- if path.startswith(path_to_clips):
169
- if path in examples:
170
- audio = {"path": path, "bytes": f.read()}
171
- yield path, {**examples[path], "audio": audio}
 
1
  # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
+ """ Common Voice Dataset"""
16
 
17
 
18
+ import csv
19
+ import os
20
+ import json
21
+
22
  import datasets
23
+ from datasets.utils.py_utils import size_str
24
+ from tqdm import tqdm
25
+
26
+
27
+ # TODO: change "streaming" to "main" after merge!
28
+ _BASE_URL = "https://huggingface.co/datasets/leviethoang/VBVLSP/resolve/main"
29
+
30
+ _AUDIO_URL = _BASE_URL + "audio/vlsp2020_train_set_02.tar.gz"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{split}.tsv"
33
+
34
+
35
+ class CommonVoice(datasets.GeneratorBasedBuilder):
36
  DEFAULT_WRITER_BATCH_SIZE = 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  def _split_generators(self, dl_manager):
39
+ splits = ("train", , "test", "validation")
40
+ audio_url = _AUDIO_URL
41
+ archive_path = dl_manager.download(audio_url)
42
+ local_extracted_archive_path = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
43
+
44
+ meta_urls = {split: _TRANSCRIPT_URL.format(split=split) for split in splits}
45
+ meta_paths = dl_manager.download_and_extract(meta_urls)
46
+
47
+ split_generators = []
48
+ split_names = {
49
+ "train": datasets.Split.TRAIN,
50
+ "validation": datasets.Split.VALIDATION,
51
+ "test": datasets.Split.TEST,
52
+ }
53
+ for split in splits:
54
+ split_generators.append(
55
+ datasets.SplitGenerator(
56
+ name=split_names.get(split, split),
57
+ gen_kwargs={
58
+ "local_extracted_archive_paths": local_extracted_archive_path,
59
+ "archives": dl_manager.iter_archive(archive_path),
60
+ "meta_path": meta_paths[split],
61
+ },
62
+ ),
63
+ )
64
+
65
+ return split_generators
66
+
67
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
 
 
 
 
 
 
 
 
68
  data_fields = list(self._info().features.keys())
69
+ metadata = {}
70
+ with open(meta_path, encoding="utf-8") as f:
71
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
72
+ for row in tqdm(reader, desc="Reading metadata..."):
73
+ # if data is incomplete, fill with empty values
74
+ for field in data_fields:
75
+ if field not in row:
76
+ row[field] = ""
77
+ metadata[row["file_path"]] = row
78
+
79
+
80
+ for filename, file in audio_archive:
81
+ _, filename = os.path.split(filename)
82
+ if filename in metadata:
83
+ result = dict(metadata[filename])
84
+ # set the audio feature and the path to the extracted file
85
+ path = os.path.join(local_extracted_archive_path, filename) if local_extracted_archive_path else filename
86
+ result["audio"] = {"file_path": path, "bytes": file.read()}
87
+ # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
88
+ result["file_path"] = path if local_extracted_archive_paths else filename
89
+
90
+ yield path, result