Update app.py
Browse files
app.py
CHANGED
|
@@ -10,30 +10,45 @@ from scipy.io import wavfile
|
|
| 10 |
import scipy.signal as sps
|
| 11 |
from denoiser.demucs import Demucs
|
| 12 |
from pydub import AudioSegment
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
def transcribe(file_upload, microphone):
|
| 17 |
file = microphone if microphone is not None else file_upload
|
| 18 |
-
|
| 19 |
state_dict = torch.load(modelpath, map_location='cpu')
|
| 20 |
-
|
| 21 |
-
demucs = model
|
| 22 |
x, sr = torchaudio.load(file)
|
| 23 |
-
out =
|
| 24 |
out = out / max(out.abs().max().item(), 1)
|
| 25 |
torchaudio.save('enhanced.wav', out, sr)
|
| 26 |
enhanced = AudioSegment.from_wav('enhanced.wav') #只有去完噪的需要降bitrate再做語音識別
|
| 27 |
enhanced.export('enhanced.wav', format="wav", bitrate="256k")
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
demo = gr.Interface(
|
| 31 |
fn=transcribe,
|
| 32 |
inputs=[
|
| 33 |
-
gr.Audio(
|
| 34 |
-
gr.Audio(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
],
|
| 36 |
-
outputs=gr.Audio(type="filepath", label="Output"),
|
| 37 |
title="<p style='text-align: center'><a href='https://www.twman.org/AI' target='_blank'>語音質檢噪音去除 (語音增強):Meta Denoiser</a>",
|
| 38 |
description=(
|
| 39 |
"為了提升語音識別的效果,可以在識別前先進行噪音去除"
|
|
@@ -47,4 +62,4 @@ demo = gr.Interface(
|
|
| 47 |
],
|
| 48 |
)
|
| 49 |
|
| 50 |
-
demo.launch(
|
|
|
|
| 10 |
import scipy.signal as sps
|
| 11 |
from denoiser.demucs import Demucs
|
| 12 |
from pydub import AudioSegment
|
| 13 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 14 |
+
|
| 15 |
+
# 設定 Hugging Face Hub 的 Access Token
|
| 16 |
+
auth_token = os.getenv("HF_HOME")
|
| 17 |
+
|
| 18 |
+
# 加載私有模型
|
| 19 |
+
model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
|
| 20 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id, use_auth_token=os.environ["HF_TOKEN"])
|
| 21 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=os.environ["HF_TOKEN"])
|
| 22 |
|
| 23 |
def transcribe(file_upload, microphone):
|
| 24 |
file = microphone if microphone is not None else file_upload
|
| 25 |
+
demucs_model = Demucs(hidden=64)
|
| 26 |
state_dict = torch.load(modelpath, map_location='cpu')
|
| 27 |
+
demucs_model.load_state_dict(state_dict)
|
|
|
|
| 28 |
x, sr = torchaudio.load(file)
|
| 29 |
+
out = demucs_model(x[None])[0]
|
| 30 |
out = out / max(out.abs().max().item(), 1)
|
| 31 |
torchaudio.save('enhanced.wav', out, sr)
|
| 32 |
enhanced = AudioSegment.from_wav('enhanced.wav') #只有去完噪的需要降bitrate再做語音識別
|
| 33 |
enhanced.export('enhanced.wav', format="wav", bitrate="256k")
|
| 34 |
+
|
| 35 |
+
# 假設模型是用於文本分類
|
| 36 |
+
inputs = tokenizer(enhanced, return_tensors="pt")
|
| 37 |
+
outputs = model(**inputs)
|
| 38 |
+
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
| 39 |
+
|
| 40 |
+
return "enhanced.wav", predictions
|
| 41 |
|
| 42 |
demo = gr.Interface(
|
| 43 |
fn=transcribe,
|
| 44 |
inputs=[
|
| 45 |
+
gr.Audio(type="filepath", label="語音質檢麥克風實時錄音"),
|
| 46 |
+
gr.Audio(type="filepath", label="語音質檢原始音檔"),
|
| 47 |
+
],
|
| 48 |
+
outputs=[
|
| 49 |
+
gr.Audio(type="filepath", label="Output"),
|
| 50 |
+
gr.Textbox(label="Model Predictions")
|
| 51 |
],
|
|
|
|
| 52 |
title="<p style='text-align: center'><a href='https://www.twman.org/AI' target='_blank'>語音質檢噪音去除 (語音增強):Meta Denoiser</a>",
|
| 53 |
description=(
|
| 54 |
"為了提升語音識別的效果,可以在識別前先進行噪音去除"
|
|
|
|
| 62 |
],
|
| 63 |
)
|
| 64 |
|
| 65 |
+
demo.launch(debug=True)
|