calicxy commited on
Commit
102cdb8
·
1 Parent(s): 64351fd

updates to part 3 script and adding part 2 loading

Browse files
Files changed (2) hide show
  1. imda-dataset.py +112 -225
  2. imda_nsc_part3.py +348 -0
imda-dataset.py CHANGED
@@ -1,100 +1,54 @@
1
  import os
 
2
  import datasets
3
- # import pandas as pd
4
  from sklearn.model_selection import train_test_split
5
- import textgrids
6
- import io
7
- import soundfile as sf
8
- from urllib.request import urlopen
9
- import re
10
- import json
11
- import tempfile
12
-
13
- def cleanup_string(line):
14
-
15
- words_to_remove = ['(ppo)','(ppc)', '(ppb)', '(ppl)', '<s/>','<c/>','<q/>', '<fil/>', '<sta/>', '<nps/>', '<spk/>', '<non/>', '<unk>', '<s>', '<z>', '<nen>']
16
-
17
- formatted_line = re.sub(r'\s+', ' ', line).strip().lower()
18
-
19
- #detect all word that matches words in the words_to_remove list
20
- for word in words_to_remove:
21
- if re.search(word,formatted_line):
22
- # formatted_line = re.sub(word,'', formatted_line)
23
- formatted_line = formatted_line.replace(word,'')
24
- formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
25
- # print("*** removed words: " + formatted_line)
26
-
27
- #detect '\[(.*?)\].' e.g. 'Okay [ah], why did I gamble?'
28
- #remove [ ] and keep text within
29
- if re.search('\[(.*?)\]', formatted_line):
30
- formatted_line = re.sub('\[(.*?)\]', r'\1', formatted_line).strip()
31
- #print("***: " + formatted_line)
32
-
33
- #detect '\((.*?)\).' e.g. 'Okay (um), why did I gamble?'
34
- #remove ( ) and keep text within
35
- if re.search('\((.*?)\)', formatted_line):
36
- formatted_line = re.sub('\((.*?)\)', r'\1', formatted_line).strip()
37
- # print("***: " + formatted_line)
38
-
39
- #detect '\'(.*?)\'' e.g. 'not 'hot' per se'
40
- #remove ' ' and keep text within
41
- if re.search('\'(.*?)\'', formatted_line):
42
- formatted_line = re.sub('\'(.*?)\'', r'\1', formatted_line).strip()
43
- #print("***: " + formatted_line)
44
-
45
- #remove punctation '''!()-[]{};:'"\, <>./?@#$%^&*_~'''
46
- punctuation = '''!–;"\,./?@#$%^&*~'''
47
- punctuation_list = str.maketrans("","",punctuation)
48
- formatted_line = re.sub(r'-', ' ', formatted_line)
49
- formatted_line = re.sub(r'_', ' ', formatted_line)
50
- formatted_line = formatted_line.translate(punctuation_list)
51
- formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
52
- #print("***: " + formatted_line)
53
-
54
- return formatted_line
55
-
56
-
57
 
58
  _DESCRIPTION = """\
59
- The National Speech Corpus (NSC) is the first large-scale Singapore English corpus
60
- spearheaded by the Info-communications and Media Development Authority (IMDA) of Singapore.
61
  """
62
 
63
  _CITATION = """\
64
  """
65
  _CHANNEL_CONFIGS = sorted([
66
- "Audio Same CloseMic", "Audio Separate IVR", "Audio Separate StandingMic"
67
  ])
68
 
69
- _HOMEPAGE = "https://www.imda.gov.sg/how-we-can-help/national-speech-corpus"
70
 
71
- _LICENSE = ""
72
 
73
- _PATH_TO_DATA = './IMDA - National Speech Corpus/PART3'
74
- # _PATH_TO_DATA = './PART1/DATA'
75
 
76
- INTERVAL_MAX_LENGTH = 25
 
 
 
77
 
78
  class Minds14Config(datasets.BuilderConfig):
79
  """BuilderConfig for xtreme-s"""
80
 
81
  def __init__(
82
- self, channel, description, homepage, path_to_data
83
  ):
84
  super(Minds14Config, self).__init__(
85
- name=channel,
86
  version=datasets.Version("1.0.0", ""),
87
  description=self.description,
88
  )
89
  self.channel = channel
 
 
90
  self.description = description
91
  self.homepage = homepage
92
  self.path_to_data = path_to_data
93
 
94
 
95
- def _build_config(channel):
96
  return Minds14Config(
97
  channel=channel,
 
 
98
  description=_DESCRIPTION,
99
  homepage=_HOMEPAGE,
100
  path_to_data=_PATH_TO_DATA,
@@ -119,21 +73,25 @@ class NewDataset(datasets.GeneratorBasedBuilder):
119
  # data = datasets.load_dataset('my_dataset', 'second_domain')
120
  BUILDER_CONFIGS = []
121
  for channel in _CHANNEL_CONFIGS + ["all"]:
122
- BUILDER_CONFIGS.append(_build_config(channel))
 
 
123
  # BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
124
 
125
- DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
126
 
127
  def _info(self):
128
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
129
  task_templates = None
 
130
  features = datasets.Features(
131
  {
132
- "audio": datasets.features.Audio(),
133
  "transcript": datasets.Value("string"),
134
  "mic": datasets.Value("string"),
135
  "audio_name": datasets.Value("string"),
136
- "interval": datasets.Value("string")
 
137
  }
138
  )
139
 
@@ -163,51 +121,31 @@ class NewDataset(datasets.GeneratorBasedBuilder):
163
  else [self.config.channel]
164
  )
165
 
166
- json_path = dl_manager.download(os.path.join(self.config.path_to_data, "directory_list.json"))
167
- print(f"json_path: {json_path}")
168
- with open(json_path, "r") as f:
169
- directory_dict = json.load(f)
170
- print(f"directory_dict: {directory_dict}")
171
-
172
- train_audio_list = []
173
- test_audio_list = []
174
- for mic in mics:
175
- audio_list = []
176
- if mic == "Audio Same CloseMic":
177
- audio_list = [x for x in directory_dict[mic] if (x[-5] == "1") ]
178
- train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
179
- for path in train:
180
- train_audio_list.append(os.path.join(self.config.path_to_data, mic, path))
181
- s = list(path)
182
- s[-5] = "2"
183
- train_audio_list.append(os.path.join(self.config.path_to_data, mic, "".join(s)))
184
- for path in test:
185
- test_audio_list.append(os.path.join(self.config.path_to_data, mic, path))
186
- s = list(path)
187
- s[-5] = "2"
188
- test_audio_list.append(os.path.join(self.config.path_to_data, mic, "".join(s)))
189
- elif mic == "Audio Separate IVR":
190
- audio_list = [x.split("\\")[0] for x in directory_dict[mic]]
191
- train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
192
- for folder in train:
193
- audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
194
- train_audio_list.extend(audios)
195
- for folder in test:
196
- audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
197
- test_audio_list.extend(audios)
198
- elif mic == "Audio Separate StandingMic":
199
- audio_list = [x[:14] for x in directory_dict[mic]]
200
- audio_list = list(set(audio_list))
201
- train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
202
- for folder in train:
203
- audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
204
- train_audio_list.extend(audios)
205
- for folder in test:
206
- audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
207
- test_audio_list.extend(audios)
208
-
209
- print(f"train_audio_list: { train_audio_list}")
210
- print(f"test_audio_list: { test_audio_list}")
211
 
212
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
213
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
@@ -216,17 +154,23 @@ class NewDataset(datasets.GeneratorBasedBuilder):
216
  datasets.SplitGenerator(
217
  name=datasets.Split.TRAIN,
218
  gen_kwargs={
219
- # "path_to_data": os.path.join(self.config.path_to_data, "Audio Same CloseMic"),
220
- "audio_list": train_audio_list,
221
- "dl_manager":dl_manager,
 
 
 
222
  },
223
  ),
224
  datasets.SplitGenerator(
225
  name=datasets.Split.TEST,
226
  gen_kwargs={
227
- # "path_to_data": os.path.join(self.config.path_to_data, "Audio Same CloseMic"),
228
- "audio_list": test_audio_list,
229
- "dl_manager":dl_manager,
 
 
 
230
  },
231
  ),
232
  ]
@@ -234,112 +178,55 @@ class NewDataset(datasets.GeneratorBasedBuilder):
234
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
235
  def _generate_examples(
236
  self,
237
- audio_list,
238
- dl_manager,
 
 
 
239
  ):
240
  id_ = 0
241
- for audio_path in audio_list:
242
- file = os.path.split(audio_path)[-1]
243
- folder = os.path.split(os.path.split(audio_path)[0])[-1]
244
- # get script_path
245
- if file.split("_")[0] == "conf":
246
- # mic == "Audio Separate IVR"
247
- s = list(file)
248
- s[14] = "_"
249
- file = ''.join(s)
250
- script_path = os.path.join(self.config.path_to_data, "Scripts Separate", file[:-4]+".TextGrid")
251
- elif folder.split()[1] == "Same":
252
- # mic == "Audio Same CloseMic IVR"
253
- script_path = os.path.join(self.config.path_to_data, "Scripts Same", file[:-4]+".TextGrid")
254
- elif folder.split()[1] == "Separate":
255
- # mic == "Audio Separate StandingMic":
256
- script_path = os.path.join(self.config.path_to_data, "Scripts Separate", file[:-4]+".TextGrid")
257
-
258
-
259
- # LOAD TRANSCRIPT
260
- # script_path = os.path.join(self.config.path_to_data, 'Scripts Same', '3000-1.TextGrid')
261
- # check that the textgrid file can be read
262
- script_path = dl_manager.download(script_path)
263
- try:
264
- # tg = textgrid.TextGrid.fromFile(script_path)
265
- with open(script_path, "rb") as f:
266
- tg = f.read()
267
- tg_dict = textgrids.TextGrid()
268
- tg_dict.parse(tg)
269
- for key in tg_dict.keys():
270
- tg = tg_dict[key]
271
- except UnicodeDecodeError:
272
- with open(script_path, "rb") as f:
273
- tg = f.read()
274
- decoded = tg.decode('utf-16')
275
- encoded = decoded.encode('utf-8')
276
- tg_dict = textgrids.TextGrid()
277
- tg_dict.parse(encoded)
278
- for key in tg_dict.keys():
279
- tg = tg_dict[key]
280
- except TypeError:
281
- with open(script_path, "rb") as f:
282
- tg = f.read()
283
- decoded = tg.decode('utf-8-sig')
284
- encoded = decoded.encode('utf-8')
285
- tg_dict = textgrids.TextGrid()
286
- tg_dict.parse(encoded)
287
- for key in tg_dict.keys():
288
- tg = tg_dict[key]
289
-
290
- except Exception as e:
291
- print(f"error reading textgrid file, {script_path}, {str(e)}")
292
- continue
293
- # LOAD AUDIO
294
- # check that archive path exists, else will not open the archive
295
- audio_path = dl_manager.download(audio_path)
296
- if os.path.exists(audio_path):
297
- with open(audio_path, 'rb') as f:
298
- data, sr = sf.read(f)
299
- # data, sr = sf.read(audio_path)
300
- result = {}
301
- i = 0
302
- intervalLength = 0
303
- intervalStart = 0
304
- transcript_list = []
305
- # filepath = os.path.join(self.config.path_to_data, 'tmp_clip.wav')
306
- # filepath = dl_manager.download(filepath)
307
- tempWavFile = tempfile.mktemp('.wav')
308
- while i < (len(tg)-1):
309
- transcript = cleanup_string(tg[i].text)
310
- if intervalLength == 0 and len(transcript) == 0:
311
- intervalStart = tg[i].xmax
312
- i+=1
313
- continue
314
- intervalLength += tg[i].xmax-tg[i].xmin
315
- if intervalLength > INTERVAL_MAX_LENGTH:
316
- print(f"INTERVAL LONGER THAN {intervalLength}")
317
- result["transcript"] = transcript
318
- result["interval"] = "start:"+str(tg[i].xmin)+", end:"+str(tg[i].xmax)
319
- result["audio"] = {"path": audio_path, "bytes": data[int(tg[i].xmin*sr):int(tg[i].xmax*sr)], "sampling_rate":sr}
320
- yield id_, result
321
- id_+= 1
322
- intervalLength = 0
323
- else:
324
- if (intervalLength + tg[i+1].xmax-tg[i+1].xmin) < INTERVAL_MAX_LENGTH:
325
- if len(transcript) != 0:
326
- transcript_list.append(transcript)
327
- i+=1
328
- continue
329
- if len(transcript) == 0:
330
- spliced_audio = data[int(intervalStart*sr):int(tg[i].xmin*sr)]
331
- else:
332
- transcript_list.append(transcript)
333
- spliced_audio = data[int(intervalStart*sr):int(tg[i].xmax*sr)]
334
 
335
- sf.write(tempWavFile,spliced_audio, sr )
336
- # sf.write(filepath, spliced_audio, sr)
337
- result["interval"] = "start:"+str(intervalStart)+", end:"+str(tg[i].xmax)
338
- result["audio"] = {"path": tempWavFile, "bytes": spliced_audio, "sampling_rate":sr}
339
- result["transcript"] = ' '.join(transcript_list)
340
- yield id_, result
341
- id_+= 1
342
- intervalLength=0
343
- intervalStart=tg[i].xmax
344
- transcript_list = []
345
- i+=1
 
1
  import os
2
+ import glob
3
  import datasets
4
+ import pandas as pd
5
  from sklearn.model_selection import train_test_split
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  _DESCRIPTION = """\
8
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
 
9
  """
10
 
11
  _CITATION = """\
12
  """
13
  _CHANNEL_CONFIGS = sorted([
14
+ "CHANNEL0", "CHANNEL1", "CHANNEL2"
15
  ])
16
 
17
+ _GENDER_CONFIGS = sorted(["F", "M"])
18
 
19
+ _RACE_CONFIGS = sorted(["CHINESE", "MALAY", "INDIAN", "OTHERS"])
20
 
21
+ _HOMEPAGE = "https://huggingface.co/indonesian-nlp/librivox-indonesia"
 
22
 
23
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
24
+
25
+ _PATH_TO_DATA = './IMDA - National Speech Corpus/PART1'
26
+ # _PATH_TO_DATA = './PART1/DATA'
27
 
28
  class Minds14Config(datasets.BuilderConfig):
29
  """BuilderConfig for xtreme-s"""
30
 
31
  def __init__(
32
+ self, channel, gender, race, description, homepage, path_to_data
33
  ):
34
  super(Minds14Config, self).__init__(
35
+ name=channel+gender+race,
36
  version=datasets.Version("1.0.0", ""),
37
  description=self.description,
38
  )
39
  self.channel = channel
40
+ self.gender = gender
41
+ self.race = race
42
  self.description = description
43
  self.homepage = homepage
44
  self.path_to_data = path_to_data
45
 
46
 
47
+ def _build_config(channel, gender, race):
48
  return Minds14Config(
49
  channel=channel,
50
+ gender=gender,
51
+ race=race,
52
  description=_DESCRIPTION,
53
  homepage=_HOMEPAGE,
54
  path_to_data=_PATH_TO_DATA,
 
73
  # data = datasets.load_dataset('my_dataset', 'second_domain')
74
  BUILDER_CONFIGS = []
75
  for channel in _CHANNEL_CONFIGS + ["all"]:
76
+ for gender in _GENDER_CONFIGS + ["all"]:
77
+ for race in _RACE_CONFIGS + ["all"]:
78
+ BUILDER_CONFIGS.append(_build_config(channel, gender, race))
79
  # BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
80
 
81
+ DEFAULT_CONFIG_NAME = "allallall" # It's not mandatory to have a default configuration. Just use one if it make sense.
82
 
83
  def _info(self):
84
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
85
  task_templates = None
86
+ # mics = _CHANNEL_CONFIGS
87
  features = datasets.Features(
88
  {
89
+ "audio": datasets.features.Audio(sampling_rate=16000),
90
  "transcript": datasets.Value("string"),
91
  "mic": datasets.Value("string"),
92
  "audio_name": datasets.Value("string"),
93
+ "gender": datasets.Value("string"),
94
+ "race": datasets.Value("string"),
95
  }
96
  )
97
 
 
121
  else [self.config.channel]
122
  )
123
 
124
+ gender = (
125
+ _GENDER_CONFIGS
126
+ if self.config.gender == "all"
127
+ else [self.config.gender]
128
+ )
129
+
130
+ race = (
131
+ _RACE_CONFIGS
132
+ if self.config.race == "all"
133
+ else [self.config.race]
134
+ )
135
+
136
+ # augment speaker ids directly here
137
+ # read the speaker information
138
+ train_speaker_ids = []
139
+ test_speaker_ids = []
140
+ # path_to_speaker = os.path.join(self.config.path_to_data, "DOC", "Speaker Information (Part 1).XLSX")
141
+ path_to_speaker = dl_manager.download(os.path.join(self.config.path_to_data, "DOC", "Speaker Information (Part 1).XLSX"))
142
+ speaker_df = pd.read_excel(path_to_speaker, dtype={'SCD/PART1': object})
143
+ for g in gender:
144
+ for r in race:
145
+ X = speaker_df[(speaker_df["ACC"]==r) & (speaker_df["SEX"]==g)]
146
+ X_train, X_test = train_test_split(X, test_size=0.3, random_state=42, shuffle=True)
147
+ train_speaker_ids.extend(X_train["SCD/PART1"])
148
+ test_speaker_ids.extend(X_test["SCD/PART1"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
151
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
 
154
  datasets.SplitGenerator(
155
  name=datasets.Split.TRAIN,
156
  gen_kwargs={
157
+ "path_to_data": self.config.path_to_data,
158
+ "speaker_metadata":speaker_df,
159
+ # "speaker_ids": train_speaker_ids,
160
+ "speaker_ids":["0001"],
161
+ "mics": mics,
162
+ "dl_manager": dl_manager
163
  },
164
  ),
165
  datasets.SplitGenerator(
166
  name=datasets.Split.TEST,
167
  gen_kwargs={
168
+ "path_to_data": self.config.path_to_data,
169
+ "speaker_metadata":speaker_df,
170
+ # "speaker_ids": test_speaker_ids,
171
+ "speaker_ids": ["0003"],
172
+ "mics": mics,
173
+ "dl_manager": dl_manager
174
  },
175
  ),
176
  ]
 
178
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
179
  def _generate_examples(
180
  self,
181
+ path_to_data,
182
+ speaker_metadata,
183
+ speaker_ids,
184
+ mics,
185
+ dl_manager
186
  ):
187
  id_ = 0
188
+ for mic in mics:
189
+ for speaker in speaker_ids:
190
+ # TRANSCRIPT: in the case of error, if no file found then dictionary will b empty
191
+ d = {}
192
+ counter = 0
193
+ while counter < 10:
194
+ data = dl_manager.download(os.path.join(path_to_data, "DATA", mic, "SCRIPT", mic[-1]+speaker+str(counter)+'.TXT'))
195
+ try:
196
+ line_num = 0
197
+ with open(data, encoding='utf-8-sig') as f:
198
+ for line in f:
199
+ if line_num == 0:
200
+ key = line.split("\t")[0]
201
+ line_num += 1
202
+ elif line_num == 1:
203
+ d[key] = line.strip()
204
+ line_num -= 1
205
+ except:
206
+ print(f"{counter}")
207
+ break
208
+ counter+=1
209
+ # AUDIO: in the case of error it will skip the speaker
210
+ # archive_path = os.path.join(path_to_data, "DATA", mic, "WAVE", "SPEAKER"+speaker+'.zip')
211
+ archive_path = dl_manager.download(os.path.join(path_to_data, "DATA", mic, "WAVE", "SPEAKER"+speaker+'.zip'))
212
+ # check that archive path exists, else will not open the archive
213
+ if os.path.exists(archive_path):
214
+ audio_files = dl_manager.iter_archive(archive_path)
215
+ for path, f in audio_files:
216
+ # bug catching if any error?
217
+ result = {}
218
+ full_path = os.path.join(archive_path, path) if archive_path else path # bug catching here
219
+ result["audio"] = {"path": full_path, "bytes": f.read()}
220
+ result["audio_name"] = path
221
+ result["mic"] = mic
222
+ metadata_row = speaker_metadata.loc[speaker_metadata["SCD/PART1"]==speaker].iloc[0]
223
+ result["gender"]=metadata_row["SEX"]
224
+ result["race"]=metadata_row["ACC"]
225
+ try:
226
+ result["transcript"] = d[f.name[-13:-4]]
227
+ yield id_, result
228
+ id_ += 1
229
+ except:
230
+ print(f"unable to find transcript")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
+
 
 
 
 
 
 
 
 
 
 
imda_nsc_part3.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+ # import pandas as pd
4
+ from sklearn.model_selection import train_test_split
5
+ import textgrids
6
+ import io
7
+ import soundfile as sf
8
+ from urllib.request import urlopen
9
+ import re
10
+ import json
11
+ import tempfile
12
+
13
+ def cleanup_string(line):
14
+
15
+ words_to_remove = ['(ppo)','(ppc)', '(ppb)', '(ppl)', '<s/>','<c/>','<q/>', '<fil/>', '<sta/>', '<nps/>', '<spk/>', '<non/>', '<unk>', '<s>', '<z>', '<nen>']
16
+
17
+ formatted_line = re.sub(r'\s+', ' ', line).strip().lower()
18
+
19
+ #detect all word that matches words in the words_to_remove list
20
+ for word in words_to_remove:
21
+ if re.search(word,formatted_line):
22
+ # formatted_line = re.sub(word,'', formatted_line)
23
+ formatted_line = formatted_line.replace(word,'')
24
+ formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
25
+ # print("*** removed words: " + formatted_line)
26
+
27
+ #detect '\[(.*?)\].' e.g. 'Okay [ah], why did I gamble?'
28
+ #remove [ ] and keep text within
29
+ if re.search('\[(.*?)\]', formatted_line):
30
+ formatted_line = re.sub('\[(.*?)\]', r'\1', formatted_line).strip()
31
+ #print("***: " + formatted_line)
32
+
33
+ #detect '\((.*?)\).' e.g. 'Okay (um), why did I gamble?'
34
+ #remove ( ) and keep text within
35
+ if re.search('\((.*?)\)', formatted_line):
36
+ formatted_line = re.sub('\((.*?)\)', r'\1', formatted_line).strip()
37
+ # print("***: " + formatted_line)
38
+
39
+ #detect '\'(.*?)\'' e.g. 'not 'hot' per se'
40
+ #remove ' ' and keep text within
41
+ if re.search('\'(.*?)\'', formatted_line):
42
+ formatted_line = re.sub('\'(.*?)\'', r'\1', formatted_line).strip()
43
+ #print("***: " + formatted_line)
44
+
45
+ #remove punctation '''!()-[]{};:'"\, <>./?@#$%^&*_~'''
46
+ punctuation = '''!–;"\,./?@#$%^&*~'''
47
+ punctuation_list = str.maketrans("","",punctuation)
48
+ formatted_line = re.sub(r'-', ' ', formatted_line)
49
+ formatted_line = re.sub(r'_', ' ', formatted_line)
50
+ formatted_line = formatted_line.translate(punctuation_list)
51
+ formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
52
+ #print("***: " + formatted_line)
53
+
54
+ return formatted_line
55
+
56
+
57
+
58
+ _DESCRIPTION = """\
59
+ The National Speech Corpus (NSC) is the first large-scale Singapore English corpus
60
+ spearheaded by the Info-communications and Media Development Authority (IMDA) of Singapore.
61
+ """
62
+
63
+ _CITATION = """\
64
+ """
65
+ _CHANNEL_CONFIGS = sorted([
66
+ "Audio Same CloseMic", "Audio Separate IVR", "Audio Separate StandingMic"
67
+ ])
68
+
69
+ _HOMEPAGE = "https://www.imda.gov.sg/how-we-can-help/national-speech-corpus"
70
+
71
+ _LICENSE = ""
72
+
73
+ # _PATH_TO_DATA = './IMDA - National Speech Corpus/PART3'
74
+ _PATH_TO_DATA = './PART3'
75
+
76
+ INTERVAL_MAX_LENGTH = 25
77
+
78
+ class Minds14Config(datasets.BuilderConfig):
79
+ """BuilderConfig for xtreme-s"""
80
+
81
+ def __init__(
82
+ self, channel, description, homepage, path_to_data
83
+ ):
84
+ super(Minds14Config, self).__init__(
85
+ name=channel,
86
+ version=datasets.Version("1.0.0", ""),
87
+ description=self.description,
88
+ )
89
+ self.channel = channel
90
+ self.description = description
91
+ self.homepage = homepage
92
+ self.path_to_data = path_to_data
93
+
94
+
95
+ def _build_config(channel):
96
+ return Minds14Config(
97
+ channel=channel,
98
+ description=_DESCRIPTION,
99
+ homepage=_HOMEPAGE,
100
+ path_to_data=_PATH_TO_DATA,
101
+ )
102
+
103
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
104
+ class NewDataset(datasets.GeneratorBasedBuilder):
105
+ """TODO: Short description of my dataset."""
106
+
107
+ VERSION = datasets.Version("1.1.0")
108
+
109
+ # This is an example of a dataset with multiple configurations.
110
+ # If you don't want/need to define several sub-sets in your dataset,
111
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
112
+
113
+ # If you need to make complex sub-parts in the datasets with configurable options
114
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
115
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
116
+
117
+ # You will be able to load one or the other configurations in the following list with
118
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
119
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
120
+ BUILDER_CONFIGS = []
121
+ for channel in _CHANNEL_CONFIGS + ["all"]:
122
+ BUILDER_CONFIGS.append(_build_config(channel))
123
+ # BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
124
+
125
+ DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
126
+
127
+ def _info(self):
128
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
129
+ task_templates = None
130
+ features = datasets.Features(
131
+ {
132
+ "audio": datasets.features.Audio(),
133
+ "transcript": datasets.Value("string"),
134
+ "audio_name": datasets.Value("string"),
135
+ "interval": datasets.Value("string")
136
+ }
137
+ )
138
+
139
+ return datasets.DatasetInfo(
140
+ # This is the description that will appear on the datasets page.
141
+ description=_DESCRIPTION,
142
+ # This defines the different columns of the dataset and their types
143
+ features=features, # Here we define them above because they are different between the two configurations
144
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
145
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
146
+ supervised_keys=("audio", "transcript"),
147
+ # Homepage of the dataset for documentation
148
+ homepage=_HOMEPAGE,
149
+ # License for the dataset if available
150
+ license=_LICENSE,
151
+ # Citation for the dataset
152
+ citation=_CITATION,
153
+ task_templates=task_templates,
154
+ )
155
+
156
+ def _split_generators(self, dl_manager):
157
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
158
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
159
+ mics = (
160
+ _CHANNEL_CONFIGS
161
+ if self.config.channel == "all"
162
+ else [self.config.channel]
163
+ )
164
+
165
+ json_path = dl_manager.download(os.path.join(self.config.path_to_data, "directory_list_part3.json"))
166
+ print(f"json_path: {json_path}")
167
+ with open(json_path, "r") as f:
168
+ directory_dict = json.load(f)
169
+ print(f"directory_dict: {directory_dict}")
170
+
171
+ train_audio_list = []
172
+ test_audio_list = []
173
+ for mic in mics:
174
+ audio_list = []
175
+ if mic == "Audio Same CloseMic":
176
+ audio_list = [x for x in directory_dict[mic] if (x[-5] == "1") ]
177
+ train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
178
+ for path in train:
179
+ train_audio_list.append(os.path.join(self.config.path_to_data, mic, path))
180
+ s = list(path)
181
+ s[-5] = "2"
182
+ train_audio_list.append(os.path.join(self.config.path_to_data, mic, "".join(s)))
183
+ for path in test:
184
+ test_audio_list.append(os.path.join(self.config.path_to_data, mic, path))
185
+ s = list(path)
186
+ s[-5] = "2"
187
+ test_audio_list.append(os.path.join(self.config.path_to_data, mic, "".join(s)))
188
+ elif mic == "Audio Separate IVR":
189
+ audio_list = [x.split("\\")[0] for x in directory_dict[mic]]
190
+ train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
191
+ for folder in train:
192
+ audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
193
+ train_audio_list.extend(audios)
194
+ for folder in test:
195
+ audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
196
+ test_audio_list.extend(audios)
197
+ elif mic == "Audio Separate StandingMic":
198
+ audio_list = [x[:14] for x in directory_dict[mic]]
199
+ audio_list = list(set(audio_list))
200
+ train, test = train_test_split(audio_list, test_size=0.3, random_state=42, shuffle=True)
201
+ for folder in train:
202
+ audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
203
+ train_audio_list.extend(audios)
204
+ for folder in test:
205
+ audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
206
+ test_audio_list.extend(audios)
207
+
208
+ print(f"train_audio_list: { train_audio_list}")
209
+ print(f"test_audio_list: { test_audio_list}")
210
+
211
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
212
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
213
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
214
+ return [
215
+ datasets.SplitGenerator(
216
+ name=datasets.Split.TRAIN,
217
+ gen_kwargs={
218
+ # "path_to_data": os.path.join(self.config.path_to_data, "Audio Same CloseMic"),
219
+ "audio_list": train_audio_list,
220
+ "dl_manager":dl_manager,
221
+ },
222
+ ),
223
+ datasets.SplitGenerator(
224
+ name=datasets.Split.TEST,
225
+ gen_kwargs={
226
+ # "path_to_data": os.path.join(self.config.path_to_data, "Audio Same CloseMic"),
227
+ "audio_list": test_audio_list,
228
+ "dl_manager":dl_manager,
229
+ },
230
+ ),
231
+ ]
232
+
233
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
234
+ def _generate_examples(
235
+ self,
236
+ audio_list,
237
+ dl_manager,
238
+ ):
239
+ id_ = 0
240
+ for audio_path in audio_list:
241
+ file = os.path.split(audio_path)[-1]
242
+ folder = os.path.split(os.path.split(audio_path)[0])[-1]
243
+ # get script_path
244
+ if file.split("_")[0] == "conf":
245
+ # mic == "Audio Separate IVR"
246
+ s = list(file)
247
+ s[14] = "_"
248
+ file = ''.join(s)
249
+ script_path = os.path.join(self.config.path_to_data, "Scripts Separate", file[:-4]+".TextGrid")
250
+ elif folder.split()[1] == "Same":
251
+ # mic == "Audio Same CloseMic IVR"
252
+ script_path = os.path.join(self.config.path_to_data, "Scripts Same", file[:-4]+".TextGrid")
253
+ elif folder.split()[1] == "Separate":
254
+ # mic == "Audio Separate StandingMic":
255
+ script_path = os.path.join(self.config.path_to_data, "Scripts Separate", file[:-4]+".TextGrid")
256
+
257
+
258
+ # LOAD TRANSCRIPT
259
+ # script_path = os.path.join(self.config.path_to_data, 'Scripts Same', '3000-1.TextGrid')
260
+ # check that the textgrid file can be read
261
+ script_path = dl_manager.download(script_path)
262
+ try:
263
+ # tg = textgrid.TextGrid.fromFile(script_path)
264
+ with open(script_path, "rb") as f:
265
+ tg = f.read()
266
+ tg_dict = textgrids.TextGrid()
267
+ tg_dict.parse(tg)
268
+ for key in tg_dict.keys():
269
+ tg = tg_dict[key]
270
+ except UnicodeDecodeError:
271
+ with open(script_path, "rb") as f:
272
+ tg = f.read()
273
+ decoded = tg.decode('utf-16')
274
+ encoded = decoded.encode('utf-8')
275
+ tg_dict = textgrids.TextGrid()
276
+ tg_dict.parse(encoded)
277
+ for key in tg_dict.keys():
278
+ tg = tg_dict[key]
279
+ except TypeError:
280
+ with open(script_path, "rb") as f:
281
+ tg = f.read()
282
+ decoded = tg.decode('utf-8-sig')
283
+ encoded = decoded.encode('utf-8')
284
+ tg_dict = textgrids.TextGrid()
285
+ tg_dict.parse(encoded)
286
+ for key in tg_dict.keys():
287
+ tg = tg_dict[key]
288
+
289
+ except Exception as e:
290
+ print(f"error reading textgrid file, {script_path}, {str(e)}")
291
+ continue
292
+ # LOAD AUDIO
293
+ # check that archive path exists, else will not open the archive
294
+ audio_path = dl_manager.download(audio_path)
295
+ if os.path.exists(audio_path):
296
+ with open(audio_path, 'rb') as f:
297
+ data, sr = sf.read(f)
298
+ # data, sr = sf.read(audio_path)
299
+ result = {}
300
+ i = 0
301
+ intervalLength = 0
302
+ intervalStart = 0
303
+ transcript_list = []
304
+ # filepath = os.path.join(self.config.path_to_data, 'tmp_clip.wav')
305
+ # filepath = dl_manager.download(filepath)
306
+ tempWavFile = tempfile.mktemp('.wav')
307
+ while i < (len(tg)-1):
308
+ transcript = cleanup_string(tg[i].text)
309
+ if intervalLength == 0 and len(transcript) == 0:
310
+ intervalStart = tg[i].xmax
311
+ i+=1
312
+ continue
313
+ intervalLength += tg[i].xmax-tg[i].xmin
314
+ if intervalLength > INTERVAL_MAX_LENGTH:
315
+ print(f"INTERVAL LONGER THAN {intervalLength}")
316
+ spliced_audio = data[int(tg[i].xmin*sr):int(tg[i].xmax*sr)]
317
+ sf.write(tempWavFile,spliced_audio, sr)
318
+ result["transcript"] = transcript
319
+ result["interval"] = "start:"+str(tg[i].xmin)+", end:"+str(tg[i].xmax)
320
+ result["audio"] = {"path": tempWavFile, "bytes": spliced_audio, "sampling_rate":sr}
321
+ result["audio_name"] = audio_path
322
+ yield id_, result
323
+ id_+= 1
324
+ intervalLength = 0
325
+ else:
326
+ if (intervalLength + tg[i+1].xmax-tg[i+1].xmin) < INTERVAL_MAX_LENGTH:
327
+ if len(transcript) != 0:
328
+ transcript_list.append(transcript)
329
+ i+=1
330
+ continue
331
+ if len(transcript) == 0:
332
+ spliced_audio = data[int(intervalStart*sr):int(tg[i].xmin*sr)]
333
+ else:
334
+ transcript_list.append(transcript)
335
+ spliced_audio = data[int(intervalStart*sr):int(tg[i].xmax*sr)]
336
+
337
+ sf.write(tempWavFile,spliced_audio, sr )
338
+ # sf.write(filepath, spliced_audio, sr)
339
+ result["interval"] = "start:"+str(intervalStart)+", end:"+str(tg[i].xmax)
340
+ result["audio"] = {"path": tempWavFile, "bytes": spliced_audio, "sampling_rate":sr}
341
+ result["transcript"] = ' '.join(transcript_list)
342
+ result["audio_name"] = audio_path
343
+ yield id_, result
344
+ id_+= 1
345
+ intervalLength=0
346
+ intervalStart=tg[i].xmax
347
+ transcript_list = []
348
+ i+=1