yangwang825 commited on
Commit
0f24f2a
·
verified ·
1 Parent(s): 2752009

Create sep28k.py

Browse files
Files changed (1) hide show
  1. sep28k.py +269 -0
sep28k.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ """SEP-28K dataset."""
4
+
5
+ import os
6
+ import gzip
7
+ import shutil
8
+ import urllib.request
9
+ from typing import List
10
+ from pathlib import Path
11
+
12
+ import librosa
13
+ import datasets
14
+ import pandas as pd
15
+ from rich import print
16
+ from tqdm.auto import tqdm
17
+
18
+ SAMPLING_RATE = 16_000
19
+
20
+ CLASSES = ['block', 'prolongation', 'sound_rep', 'word_rep', 'interjection', 'no_dysfluencies']
21
+
22
+
23
+ class SEP28KConfig(datasets.BuilderConfig):
24
+ """BuilderConfig for SEP-28K."""
25
+
26
+ def __init__(self, features, **kwargs):
27
+ super(SEP28KConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs)
28
+ self.features = features
29
+
30
+
31
+ class SEP28K(datasets.GeneratorBasedBuilder):
32
+
33
+ BUILDER_CONFIGS = [
34
+ SEP28KConfig(
35
+ features=datasets.Features(
36
+ {
37
+ "audio": datasets.Audio(sampling_rate=SAMPLING_RATE),
38
+ # "speaker": datasets.Value("string"),
39
+ # "duration": datasets.Value("int32"),
40
+ "start": datasets.Value("int32"),
41
+ "end": datasets.Value("int32"),
42
+ "stutter": datasets.Sequence(datasets.Value("string")),
43
+ "label": datasets.Sequence(datasets.features.ClassLabel(names=CLASSES)),
44
+ }
45
+ ),
46
+ name="default",
47
+ description="",
48
+ ),
49
+ ]
50
+
51
+ DEFAULT_CONFIG_NAME = "default"
52
+
53
+ def _info(self):
54
+ return datasets.DatasetInfo(
55
+ description="SEP-28K dataset",
56
+ features=self.config.features,
57
+ )
58
+
59
+ def _split_generators(self, dl_manager):
60
+ if dl_manager.manual_dir is None:
61
+ from datasets.config import HF_DATASETS_CACHE
62
+
63
+ data_dir = os.path.join(HF_DATASETS_CACHE, "downloads")
64
+ print(f'`data_dir` is None, set the path to {data_dir}')
65
+ else:
66
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
67
+
68
+ if not os.path.exists(os.path.join(data_dir, 'clips')):
69
+ download_file(
70
+ 'https://huggingface.co/datasets/confit/sep-28k/resolve/main/archive.zip',
71
+ dest=os.path.join(data_dir, 'archive.zip'),
72
+ unpack=True,
73
+ write_permissions=True
74
+ )
75
+
76
+ return [
77
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir}),
78
+ ]
79
+
80
+ def _generate_examples(self, data_dir):
81
+ """Generate examples from VoxCeleb"""
82
+ metadata_df = pd.read_csv(os.path.join(data_dir, 'SEP-28k_labels.csv'))
83
+ metadata_df = metadata_df[metadata_df['Unsure'] == 0].reset_index(drop=True)
84
+
85
+ threshold = 2 # https://arxiv.org/pdf/2102.12394
86
+ _mapping = {}
87
+ for idx, row in metadata_df.iterrows():
88
+ filename = f"{row['Show']}_{row['EpId']}_{row['ClipId']}"
89
+ start = row['Start']
90
+ end = row['Stop']
91
+ block = 1 if row['Block'] >= threshold else 0
92
+ prolongation = 1 if row['Prolongation'] >= threshold else 0
93
+ sound_rep = 1 if row['SoundRep'] >= threshold else 0
94
+ word_rep = 1 if row['WordRep'] >= threshold else 0
95
+ interjection = 1 if row['Interjection'] >= threshold else 0
96
+ # no_stuttered_words = 1 if row['NoStutteredWords'] >= 1 else 0
97
+ dysfluencies = sum([prolongation, block, sound_rep, word_rep, interjection])
98
+ no_dysfluencies = 1 if dysfluencies == 0 else 0
99
+
100
+ stutter = []
101
+ if block == 1:
102
+ stutter.append('block')
103
+ if prolongation == 1:
104
+ stutter.append('prolongation')
105
+ if sound_rep == 1:
106
+ stutter.append('sound_rep')
107
+ if word_rep == 1:
108
+ stutter.append('word_rep')
109
+ if interjection == 1:
110
+ stutter.append('interjection')
111
+ if no_dysfluencies == 1:
112
+ stutter.append('no_dysfluencies')
113
+
114
+ _mapping[filename] = {
115
+ 'filename': filename,
116
+ 'start': start,
117
+ 'end': end,
118
+ 'block': block,
119
+ 'prolongation': prolongation,
120
+ 'sound_rep': sound_rep,
121
+ 'word_rep': word_rep,
122
+ 'interjection': interjection,
123
+ 'no_dysfluencies': no_dysfluencies,
124
+ 'stutter': stutter,
125
+ }
126
+
127
+ # Iterating the contents of the data to extract the relevant information
128
+ extensions = ['.wav']
129
+
130
+ _, wav_paths = fast_scandir(data_dir, extensions, recursive=True)
131
+
132
+ for guid, wav_path in enumerate(wav_paths):
133
+ # duration = librosa.get_duration(path=wav_path)
134
+ # if duration <= 0:
135
+ # continue
136
+ try:
137
+ fileid = Path(wav_path).stem
138
+ info = _mapping[fileid]
139
+ yield guid, {
140
+ "id": str(guid),
141
+ "audio": wav_path,
142
+ "stutter": info['stutter'],
143
+ "label": info['stutter'],
144
+ "start": start,
145
+ "end": end,
146
+ }
147
+ except:
148
+ continue
149
+
150
+
151
+ def fast_scandir(path: str, extensions: List[str], recursive: bool = False):
152
+ # Scan files recursively faster than glob
153
+ # From github.com/drscotthawley/aeiou/blob/main/aeiou/core.py
154
+ subfolders, files = [], []
155
+
156
+ try: # hope to avoid 'permission denied' by this try
157
+ for f in os.scandir(path):
158
+ try: # 'hope to avoid too many levels of symbolic links' error
159
+ if f.is_dir():
160
+ subfolders.append(f.path)
161
+ elif f.is_file():
162
+ if os.path.splitext(f.name)[1].lower() in extensions:
163
+ files.append(f.path)
164
+ except Exception:
165
+ pass
166
+ except Exception:
167
+ pass
168
+
169
+ if recursive:
170
+ for path in list(subfolders):
171
+ sf, f = fast_scandir(path, extensions, recursive=recursive)
172
+ subfolders.extend(sf)
173
+ files.extend(f) # type: ignore
174
+
175
+ return subfolders, files
176
+
177
+
178
+ def download_file(
179
+ source,
180
+ dest,
181
+ unpack=False,
182
+ dest_unpack=None,
183
+ replace_existing=False,
184
+ write_permissions=False,
185
+ ):
186
+ """Downloads the file from the given source and saves it in the given
187
+ destination path.
188
+ Arguments
189
+ ---------
190
+ source : path or url
191
+ Path of the source file. If the source is an URL, it downloads it from
192
+ the web.
193
+ dest : path
194
+ Destination path.
195
+ unpack : bool
196
+ If True, it unpacks the data in the dest folder.
197
+ dest_unpack: path
198
+ Path where to store the unpacked dataset
199
+ replace_existing : bool
200
+ If True, replaces the existing files.
201
+ write_permissions: bool
202
+ When set to True, all the files in the dest_unpack directory will be granted write permissions.
203
+ This option is active only when unpack=True.
204
+ """
205
+ class DownloadProgressBar(tqdm):
206
+ """DownloadProgressBar class."""
207
+
208
+ def update_to(self, b=1, bsize=1, tsize=None):
209
+ """Needed to support multigpu training."""
210
+ if tsize is not None:
211
+ self.total = tsize
212
+ self.update(b * bsize - self.n)
213
+
214
+ # Create the destination directory if it doesn't exist
215
+ dest_dir = Path(dest).resolve().parent
216
+ dest_dir.mkdir(parents=True, exist_ok=True)
217
+ if "http" not in source:
218
+ shutil.copyfile(source, dest)
219
+
220
+ elif not os.path.isfile(dest) or (
221
+ os.path.isfile(dest) and replace_existing
222
+ ):
223
+ print(f"Downloading {source} to {dest}")
224
+ with DownloadProgressBar(
225
+ unit="B",
226
+ unit_scale=True,
227
+ miniters=1,
228
+ desc=source.split("/")[-1],
229
+ ) as t:
230
+ urllib.request.urlretrieve(
231
+ source, filename=dest, reporthook=t.update_to
232
+ )
233
+ else:
234
+ print(f"{dest} exists. Skipping download")
235
+
236
+ # Unpack if necessary
237
+ if unpack:
238
+ if dest_unpack is None:
239
+ dest_unpack = os.path.dirname(dest)
240
+ print(f"Extracting {dest} to {dest_unpack}")
241
+ # shutil unpack_archive does not work with tar.gz files
242
+ if (
243
+ source.endswith(".tar.gz")
244
+ or source.endswith(".tgz")
245
+ or source.endswith(".gz")
246
+ ):
247
+ out = dest.replace(".gz", "")
248
+ with gzip.open(dest, "rb") as f_in:
249
+ with open(out, "wb") as f_out:
250
+ shutil.copyfileobj(f_in, f_out)
251
+ else:
252
+ shutil.unpack_archive(dest, dest_unpack)
253
+ if write_permissions:
254
+ set_writing_permissions(dest_unpack)
255
+
256
+
257
+ def set_writing_permissions(folder_path):
258
+ """
259
+ This function sets user writing permissions to all the files in the given folder.
260
+ Arguments
261
+ ---------
262
+ folder_path : folder
263
+ Folder whose files will be granted write permissions.
264
+ """
265
+ for root, dirs, files in os.walk(folder_path):
266
+ for file_name in files:
267
+ file_path = os.path.join(root, file_name)
268
+ # Set writing permissions (mode 0o666) to the file
269
+ os.chmod(file_path, 0o666)