Datasets:
Update generate_robust_hp.py
Browse files- generate_robust_hp.py +2 -2
generate_robust_hp.py
CHANGED
|
@@ -94,7 +94,7 @@ for line in f_noisy_wav.readlines():
|
|
| 94 |
assert clean_utt_id == utt_id, (line, clean_line)
|
| 95 |
gt = ' '.join(f_text.readline().strip().split()[1:])
|
| 96 |
audio = whisper.load_audio(audio_path)
|
| 97 |
-
audio = whisper.pad_or_trim(audio)
|
| 98 |
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
| 99 |
options = whisper.DecodingOptions(language='en', beam_size=50)
|
| 100 |
texts, confidences = whisper.decode_score(model, mel, options)
|
|
@@ -104,7 +104,7 @@ for line in f_noisy_wav.readlines():
|
|
| 104 |
|
| 105 |
## clean audio feats
|
| 106 |
clean_audio = whisper.load_audio(clean_audio_path)
|
| 107 |
-
clean_audio = whisper.pad_or_trim(clean_audio)
|
| 108 |
clean_mel = whisper.log_mel_spectrogram(clean_audio).to(model.device)
|
| 109 |
clean_audio_features = model.encoder(clean_mel.unsqueeze(0))[0]
|
| 110 |
|
|
|
|
| 94 |
assert clean_utt_id == utt_id, (line, clean_line)
|
| 95 |
gt = ' '.join(f_text.readline().strip().split()[1:])
|
| 96 |
audio = whisper.load_audio(audio_path)
|
| 97 |
+
# audio = whisper.pad_or_trim(audio) # padding to 30s
|
| 98 |
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
| 99 |
options = whisper.DecodingOptions(language='en', beam_size=50)
|
| 100 |
texts, confidences = whisper.decode_score(model, mel, options)
|
|
|
|
| 104 |
|
| 105 |
## clean audio feats
|
| 106 |
clean_audio = whisper.load_audio(clean_audio_path)
|
| 107 |
+
# clean_audio = whisper.pad_or_trim(clean_audio) # padding to 30s
|
| 108 |
clean_mel = whisper.log_mel_spectrogram(clean_audio).to(model.device)
|
| 109 |
clean_audio_features = model.encoder(clean_mel.unsqueeze(0))[0]
|
| 110 |
|