Porjaz commited on
Commit
d8aed4f
·
verified ·
1 Parent(s): d83cf45

Create custom_interface_app.py

Browse files
Files changed (1) hide show
  1. custom_interface_app.py +326 -0
custom_interface_app.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from speechbrain.inference.interfaces import Pretrained
3
+ import librosa
4
+ import numpy as np
5
+
6
+
7
+ class ASR(Pretrained):
8
+ def __init__(self, *args, **kwargs):
9
+ super().__init__(*args, **kwargs)
10
+
11
+ def encode_batch_w2v2(self, device, wavs, wav_lens=None, normalize=False):
12
+ wavs = wavs.to(device)
13
+ wav_lens = wav_lens.to(device)
14
+
15
+ # Forward pass
16
+ encoded_outputs = self.mods.encoder_w2v2(wavs.detach())
17
+ # append
18
+ tokens_bos = torch.zeros((wavs.size(0), 1), dtype=torch.long).to(device)
19
+ embedded_tokens = self.mods.embedding(tokens_bos)
20
+ decoder_outputs, _ = self.mods.decoder(embedded_tokens, encoded_outputs, wav_lens)
21
+
22
+ # Output layer for seq2seq log-probabilities
23
+ predictions = self.hparams.test_search(encoded_outputs, wav_lens)[0]
24
+ # predicted_words = [self.hparams.tokenizer.decode_ids(prediction).split(" ") for prediction in predictions]
25
+ predicted_words = []
26
+ for prediction in predictions:
27
+ prediction = [token for token in prediction if token != 0]
28
+ predicted_words.append(self.hparams.tokenizer.decode_ids(prediction).split(" "))
29
+ prediction = []
30
+ for sent in predicted_words:
31
+ sent = self.filter_repetitions(sent, 3)
32
+ prediction.append(sent)
33
+ predicted_words = prediction
34
+ return predicted_words
35
+
36
+
37
+ def encode_batch_whisper(self, device, wavs, wav_lens=None, normalize=False):
38
+ wavs = wavs.to(device)
39
+ wav_lens = wav_lens.to(device)
40
+
41
+ # Forward encoder + decoder
42
+ tokens = torch.tensor([[1, 1]]) * self.mods.whisper.config.decoder_start_token_id
43
+ tokens = tokens.to(device)
44
+ enc_out, logits, _ = self.mods.whisper(wavs, tokens)
45
+ log_probs = self.hparams.log_softmax(logits)
46
+
47
+ hyps, _, _, _ = self.hparams.test_search(enc_out.detach(), wav_lens)
48
+ predicted_words = [self.mods.whisper.tokenizer.decode(token, skip_special_tokens=True).strip() for token in hyps]
49
+ return predicted_words
50
+
51
+
52
+ def filter_repetitions(self, seq, max_repetition_length):
53
+ seq = list(seq)
54
+ output = []
55
+ max_n = len(seq) // 2
56
+ for n in range(max_n, 0, -1):
57
+ max_repetitions = max(max_repetition_length // n, 1)
58
+ # Don't need to iterate over impossible n values:
59
+ # len(seq) can change a lot during iteration
60
+ if (len(seq) <= n*2) or (len(seq) <= max_repetition_length):
61
+ continue
62
+ iterator = enumerate(seq)
63
+ # Fill first buffers:
64
+ buffers = [[next(iterator)[1]] for _ in range(n)]
65
+ for seq_index, token in iterator:
66
+ current_buffer = seq_index % n
67
+ if token != buffers[current_buffer][-1]:
68
+ # No repeat, we can flush some tokens
69
+ buf_len = sum(map(len, buffers))
70
+ flush_start = (current_buffer-buf_len) % n
71
+ # Keep n-1 tokens, but possibly mark some for removal
72
+ for flush_index in range(buf_len - buf_len%n):
73
+ if (buf_len - flush_index) > n-1:
74
+ to_flush = buffers[(flush_index + flush_start) % n].pop(0)
75
+ else:
76
+ to_flush = None
77
+ # Here, repetitions get removed:
78
+ if (flush_index // n < max_repetitions) and to_flush is not None:
79
+ output.append(to_flush)
80
+ elif (flush_index // n >= max_repetitions) and to_flush is None:
81
+ output.append(to_flush)
82
+ buffers[current_buffer].append(token)
83
+ # At the end, final flush
84
+ current_buffer += 1
85
+ buf_len = sum(map(len, buffers))
86
+ flush_start = (current_buffer-buf_len) % n
87
+ for flush_index in range(buf_len):
88
+ to_flush = buffers[(flush_index + flush_start) % n].pop(0)
89
+ # Here, repetitions just get removed:
90
+ if flush_index // n < max_repetitions:
91
+ output.append(to_flush)
92
+ seq = []
93
+ to_delete = 0
94
+ for token in output:
95
+ if token is None:
96
+ to_delete += 1
97
+ elif to_delete > 0:
98
+ to_delete -= 1
99
+ else:
100
+ seq.append(token)
101
+ output = []
102
+ return seq
103
+
104
+
105
+ def increase_volume(self, waveform, threshold_db=-25):
106
+ # Measure loudness using RMS
107
+ loudness_vector = librosa.feature.rms(y=waveform)
108
+ average_loudness = np.mean(loudness_vector)
109
+ average_loudness_db = librosa.amplitude_to_db(average_loudness)
110
+
111
+ print(f"Average Loudness: {average_loudness_db} dB")
112
+
113
+ # Check if loudness is below threshold and apply gain if needed
114
+ if average_loudness_db < threshold_db:
115
+ # Calculate gain needed
116
+ gain_db = threshold_db - average_loudness_db
117
+ gain = librosa.db_to_amplitude(gain_db) # Convert dB to amplitude factor
118
+
119
+ # Apply gain to the audio signal
120
+ waveform = waveform * gain
121
+ loudness_vector = librosa.feature.rms(y=waveform)
122
+ average_loudness = np.mean(loudness_vector)
123
+ average_loudness_db = librosa.amplitude_to_db(average_loudness)
124
+
125
+ print(f"Average Loudness: {average_loudness_db} dB")
126
+ return waveform
127
+
128
+
129
+ def classify_file_w2v2(self, waveform, device):
130
+ # Load the audio file
131
+ # path = "long_sample.wav"
132
+ # waveform, sr = librosa.load(path, sr=16000)
133
+
134
+ # increase the volume if needed
135
+ # waveform = self.increase_volume(waveform)
136
+
137
+ # Get audio length in seconds
138
+ sr = 16000
139
+ audio_length = len(waveform) / sr
140
+
141
+ if audio_length >= 20:
142
+ print(f"Audio is too long ({audio_length:.2f} seconds), splitting into segments")
143
+ # Detect non-silent segments
144
+
145
+ non_silent_intervals = librosa.effects.split(waveform, top_db=20) # Adjust top_db for sensitivity
146
+
147
+ segments = []
148
+ current_segment = []
149
+ current_length = 0
150
+ max_duration = 20 * sr # Maximum segment duration in samples (20 seconds)
151
+
152
+
153
+ for interval in non_silent_intervals:
154
+ start, end = interval
155
+ segment_part = waveform[start:end]
156
+
157
+ # If adding the next part exceeds max duration, store the segment and start a new one
158
+ if current_length + len(segment_part) > max_duration:
159
+ segments.append(np.concatenate(current_segment))
160
+ current_segment = []
161
+ current_length = 0
162
+
163
+ current_segment.append(segment_part)
164
+ current_length += len(segment_part)
165
+
166
+ # Append the last segment if it's not empty
167
+ if current_segment:
168
+ segments.append(np.concatenate(current_segment))
169
+
170
+ # Process each segment
171
+ outputs = []
172
+ for i, segment in enumerate(segments):
173
+ print(f"Processing segment {i + 1}/{len(segments)}, length: {len(segment) / sr:.2f} seconds")
174
+
175
+ # import soundfile as sf
176
+ # sf.write(f"outputs/segment_{i}.wav", segment, sr)
177
+
178
+ segment_tensor = torch.tensor(segment).to(device)
179
+
180
+ # Fake a batch for the segment
181
+ batch = segment_tensor.unsqueeze(0).to(device)
182
+ rel_length = torch.tensor([1.0]).to(device) # Adjust if necessary
183
+
184
+ # Pass the segment through the ASR model
185
+ result = " ".join(self.encode_batch_w2v2(device, batch, rel_length)[0])
186
+ outputs.append(result)
187
+ return outputs
188
+ else:
189
+ waveform = torch.tensor(waveform).to(device)
190
+ waveform = waveform.to(device)
191
+ # Fake a batch:
192
+ batch = waveform.unsqueeze(0)
193
+ rel_length = torch.tensor([1.0]).to(device)
194
+ outputs = " ".join(self.encode_batch_w2v2(device, batch, rel_length)[0])
195
+ return [outputs]
196
+
197
+
198
+ def classify_file_whisper_mkd(self, waveform, device):
199
+ # Load the audio file
200
+ # path = "long_sample.wav"
201
+ # waveform, sr = librosa.load(path, sr=16000)
202
+
203
+ # increase the volume if needed
204
+ # waveform = self.increase_volume(waveform)
205
+
206
+ # Get audio length in seconds
207
+ sr = 16000
208
+ audio_length = len(waveform) / sr
209
+
210
+ if audio_length >= 20:
211
+ print(f"Audio is too long ({audio_length:.2f} seconds), splitting into segments")
212
+ # Detect non-silent segments
213
+
214
+ non_silent_intervals = librosa.effects.split(waveform, top_db=20) # Adjust top_db for sensitivity
215
+
216
+ segments = []
217
+ current_segment = []
218
+ current_length = 0
219
+ max_duration = 20 * sr # Maximum segment duration in samples (20 seconds)
220
+
221
+ for interval in non_silent_intervals:
222
+ start, end = interval
223
+ segment_part = waveform[start:end]
224
+
225
+ # If adding the next part exceeds max duration, store the segment and start a new one
226
+ if current_length + len(segment_part) > max_duration:
227
+ segments.append(np.concatenate(current_segment))
228
+ current_segment = []
229
+ current_length = 0
230
+
231
+ current_segment.append(segment_part)
232
+ current_length += len(segment_part)
233
+
234
+ # Append the last segment if it's not empty
235
+ if current_segment:
236
+ segments.append(np.concatenate(current_segment))
237
+
238
+ # Process each segment
239
+ outputs = []
240
+ for i, segment in enumerate(segments):
241
+ print(f"Processing segment {i + 1}/{len(segments)}, length: {len(segment) / sr:.2f} seconds")
242
+
243
+ # import soundfile as sf
244
+ # sf.write(f"outputs/segment_{i}.wav", segment, sr)
245
+
246
+ segment_tensor = torch.tensor(segment).to(device)
247
+
248
+ # Fake a batch for the segment
249
+ batch = segment_tensor.unsqueeze(0).to(device)
250
+ rel_length = torch.tensor([1.0]).to(device) # Adjust if necessary
251
+
252
+ # Pass the segment through the ASR model
253
+ segment_output = self.encode_batch_whisper(device, batch, rel_length)
254
+ outputs.append(segment_output)
255
+ return outputs
256
+ else:
257
+ waveform = torch.tensor(waveform).to(device)
258
+ waveform = waveform.to(device)
259
+ # Fake a batch:
260
+ batch = waveform.unsqueeze(0)
261
+ rel_length = torch.tensor([1.0]).to(device)
262
+ outputs.append(self.encode_batch_whisper(device, batch, rel_length))
263
+ return outputs
264
+
265
+
266
+ def classify_file_whisper(self, path, pipe, device):
267
+ waveform, sr = librosa.load(path, sr=16000)
268
+ transcription = pipe(waveform, generate_kwargs={"language": "macedonian"})["text"]
269
+ return transcription
270
+
271
+
272
+ def classify_file_mms(self, path, processor, model, device):
273
+ # Load the audio file
274
+ waveform, sr = librosa.load(path, sr=16000)
275
+
276
+ # Get audio length in seconds
277
+ audio_length = len(waveform) / sr
278
+
279
+ if audio_length >= 20:
280
+ print(f"MMS Audio is too long ({audio_length:.2f} seconds), splitting into segments")
281
+ # Detect non-silent segments
282
+ non_silent_intervals = librosa.effects.split(waveform, top_db=20) # Adjust top_db for sensitivity
283
+
284
+ segments = []
285
+ current_segment = []
286
+ current_length = 0
287
+ max_duration = 20 * sr # Maximum segment duration in samples (20 seconds)
288
+
289
+
290
+ for interval in non_silent_intervals:
291
+ start, end = interval
292
+ segment_part = waveform[start:end]
293
+
294
+ # If adding the next part exceeds max duration, store the segment and start a new one
295
+ if current_length + len(segment_part) > max_duration:
296
+ segments.append(np.concatenate(current_segment))
297
+ current_segment = []
298
+ current_length = 0
299
+
300
+ current_segment.append(segment_part)
301
+ current_length += len(segment_part)
302
+
303
+ # Append the last segment if it's not empty
304
+ if current_segment:
305
+ segments.append(np.concatenate(current_segment))
306
+
307
+ # Process each segment
308
+ outputs = []
309
+ for i, segment in enumerate(segments):
310
+ print(f"MMS Processing segment {i + 1}/{len(segments)}, length: {len(segment) / sr:.2f} seconds")
311
+
312
+ segment_tensor = torch.tensor(segment).to(device)
313
+
314
+ # Pass the segment through the ASR model
315
+ inputs = processor(segment_tensor, sampling_rate=16_000, return_tensors="pt").to(device)
316
+ outputs = model(**inputs).logits
317
+ ids = torch.argmax(outputs, dim=-1)[0]
318
+ segment_output = processor.decode(ids)
319
+ yield segment_output
320
+ else:
321
+ waveform = torch.tensor(waveform).to(device)
322
+ inputs = processor(waveform, sampling_rate=16_000, return_tensors="pt").to(device)
323
+ outputs = model(**inputs).logits
324
+ ids = torch.argmax(outputs, dim=-1)[0]
325
+ transcription = processor.decode(ids)
326
+ yield transcription