jspaulsen commited on
Commit
b9f67de
·
verified ·
1 Parent(s): 947bc5b

Create process.py

Browse files
Files changed (1) hide show
  1. process.py +251 -0
process.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ import multiprocessing as mp
3
+ from dataclasses import dataclass
4
+ import os
5
+ from pathlib import Path
6
+ import queue
7
+
8
+ import pydub
9
+ # import noisereduce as nr
10
+ import soundfile as sf
11
+ from tqdm import tqdm
12
+
13
+ from metadata import MetadataItem, LockedMetadata
14
+ from vad import remove_silence, get_vad_model_and_utils
15
+
16
+
17
+ @dataclass
18
+ class ProcessedFile:
19
+ output: Path
20
+ transcription: str
21
+ speaker_id: str
22
+ mic_id: str
23
+
24
+
25
+ @dataclass
26
+ class FileToProcess:
27
+ input: Path
28
+ input_txt: Path
29
+
30
+
31
+ # def noise_reduce(
32
+ # input: Path,
33
+ # ) -> Path:
34
+ # waveform, sample_rate = sf.read(input)
35
+ # reduced_noise = nr.reduce_noise(y=waveform, sr=sample_rate, stationary=True, prop_decrease=0.8)
36
+
37
+ # sf.write(input, reduced_noise, sample_rate)
38
+ # return input
39
+
40
+
41
+ def pad_silence(
42
+ input: Path | BytesIO,
43
+ pad_length: int,
44
+ format: str = 'wav',
45
+ ) -> Path | BytesIO:
46
+ audio = pydub.AudioSegment.from_file(input, format=format)
47
+
48
+ # Add silence padding to the start and end
49
+ padded: pydub.AudioSegment = pydub.AudioSegment.silent(duration=pad_length) + audio + pydub.AudioSegment.silent(duration=pad_length)
50
+ padded.export(input, format=format)
51
+
52
+ return input
53
+
54
+
55
+ def process_worker(
56
+ work: mp.Queue,
57
+ output: mp.Queue,
58
+ ) -> None:
59
+ vad_models_and_utils = get_vad_model_and_utils(use_cuda=False, use_onnx=False)
60
+
61
+ while work.qsize() > 0:
62
+ try:
63
+ nitem = work.get(timeout=1)
64
+ except queue.Empty:
65
+ break
66
+
67
+ result = process_file(
68
+ vad_models_and_utils=vad_models_and_utils,
69
+ inp=nitem.input,
70
+ inp_txt=nitem.input_txt,
71
+ output_directory=Path('dataset'),
72
+ pad_length=25,
73
+ )
74
+
75
+ output.put(result)
76
+
77
+ print(f"Worker {mp.current_process().name} finished processing.")
78
+
79
+
80
+ def process_file(
81
+ vad_models_and_utils: tuple,
82
+ inp: Path,
83
+ inp_txt: Path,
84
+ output_directory: Path,
85
+ pad_length: int = 25,
86
+ ) -> ProcessedFile | None:
87
+ output_fpath = output_directory / f"{inp.stem}.wav"
88
+
89
+ if not inp.exists():
90
+ return None
91
+
92
+ if not inp_txt.exists():
93
+ return None
94
+
95
+ transcription = (
96
+ inp_txt
97
+ .read_text()
98
+ .strip()
99
+ )
100
+
101
+ speaker_id = inp.parent.name
102
+ mic_id = inp.stem.split('_')[-1] # Assuming the mic_id is the last part of the stem
103
+
104
+ audio_mem = BytesIO()
105
+
106
+ # Convert file to wav
107
+ audio: pydub.AudioSegment = pydub.AudioSegment.from_file(inp)
108
+ audio.export(audio_mem, format='wav')
109
+ audio_mem.seek(0)
110
+
111
+ silent_audio_mem = BytesIO()
112
+
113
+ # Noise Reduction
114
+ # output_fpath = noise_reduce(output_fpath)
115
+
116
+ # Trim silence and remove leading/trailing silence
117
+ _, _ = remove_silence(
118
+ vad_models_and_utils,
119
+ audio_path=audio_mem,
120
+ out_path=silent_audio_mem,
121
+ trim_just_beginning_and_end=True,
122
+ format='wav',
123
+ )
124
+
125
+ silent_audio_mem.seek(0)
126
+
127
+ # Pad silence
128
+ output_audio = pad_silence(silent_audio_mem, pad_length)
129
+ assert isinstance(output_audio, BytesIO), "Output audio should be a BytesIO object"
130
+
131
+ # Actually save the processed audio to the output path
132
+ with open(output_fpath, 'wb') as f:
133
+ f.write(output_audio.getbuffer())
134
+
135
+ return ProcessedFile(
136
+ output=output_fpath,
137
+ transcription=transcription,
138
+ speaker_id=speaker_id,
139
+ mic_id=mic_id,
140
+ )
141
+
142
+
143
+ def main() -> None:
144
+ txt = Path('txt')
145
+ wav = Path('wav48_silence_trimmed')
146
+ output_directory = Path('dataset')
147
+ metadata_fpath = output_directory / 'metadata.csv'
148
+ num_workers = os.cpu_count() or 1
149
+ # num_workers = int(num_workers * 1.5) # Use 75% of available CPU cores
150
+
151
+ mp.set_start_method("spawn", force=True)
152
+
153
+ print(f"Using {num_workers} workers for processing")
154
+
155
+ if not txt.exists() or not wav.exists():
156
+ raise ValueError("Input directories do not exist")
157
+
158
+ if not output_directory.exists():
159
+ output_directory.mkdir(parents=True, exist_ok=True)
160
+
161
+ # file_name,text,mic_id
162
+ metadata = LockedMetadata(key_field='id')
163
+
164
+ if metadata_fpath.exists():
165
+ metadata = LockedMetadata.load(metadata_fpath, key_field='id')
166
+
167
+ files_to_process: list[FileToProcess] = []
168
+ files = list(wav.glob('**/*.flac'))
169
+
170
+ # stem maps to id
171
+ # if the stem of the
172
+ for file in files:
173
+ stem = file.stem
174
+
175
+ if stem in metadata:
176
+ continue
177
+
178
+ text = stem
179
+
180
+ # Remove the _mic1 or _mic2 suffix from the stem
181
+ if stem.endswith('_mic1') or stem.endswith('_mic2'):
182
+ text = stem[:-5]
183
+
184
+ # get the directory of the file
185
+ directory = file.parent.name
186
+ input_txt = txt / directory / f"{text}.txt"
187
+
188
+ files_to_process.append(
189
+ FileToProcess(
190
+ input=file,
191
+ input_txt=input_txt,
192
+ )
193
+ )
194
+
195
+ work_queue: mp.Queue[FileToProcess] = mp.Queue()
196
+ output_queue: mp.Queue[ProcessedFile | None] = mp.Queue()
197
+
198
+ # fill the work queue with files to process
199
+ for file in files_to_process:
200
+ work_queue.put(file)
201
+
202
+
203
+ # Before processing the files, ensure that the VAD model is downloaded.
204
+ # This will ensure that the model is available for processing.
205
+ get_vad_model_and_utils(use_cuda=False, use_onnx=False)
206
+
207
+ processes = [
208
+ mp.Process(
209
+ target=process_worker,
210
+ args=(work_queue, output_queue),
211
+ )
212
+ for _ in range(num_workers)
213
+ ]
214
+
215
+ # Process each file.
216
+ results: list[ProcessedFile] = []
217
+
218
+ try:
219
+ results: list[ProcessedFile] = []
220
+
221
+ for w in processes:
222
+ w.start()
223
+
224
+ for _ in tqdm(range(len(files_to_process)), desc="Processing files", unit="file"):
225
+ result = output_queue.get()
226
+
227
+ if result is None:
228
+ continue
229
+
230
+ results.append(result)
231
+
232
+ # Wait for workers to finish
233
+ for w in processes:
234
+ w.join()
235
+ finally:
236
+ for result in results:
237
+ metadata.add(
238
+ MetadataItem(
239
+ id=result.output.stem,
240
+ text=result.transcription,
241
+ speaker_id=result.speaker_id,
242
+ file_name=result.output.name,
243
+ mic_id=result.mic_id,
244
+ )
245
+ )
246
+
247
+ metadata.save(metadata_fpath)
248
+
249
+
250
+ if __name__ == '__main__':
251
+ main()