Update app.py
Browse files
app.py
CHANGED
|
@@ -79,10 +79,68 @@ def denoise(filename, ckpt_path = CHECKPOINT, out = "out.wav"):
|
|
| 79 |
|
| 80 |
return out
|
| 81 |
|
| 82 |
-
audio = gr.inputs.Audio(label = "Audio to denoise", type = 'filepath')
|
| 83 |
-
inputs = [audio]
|
| 84 |
-
outputs = gr.outputs.Audio(label = "Denoised audio", type = 'filepath')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
-
title = "Speech Denoising in the Waveform Domain with Self-Attention from Nvidia"
|
| 87 |
-
|
| 88 |
-
gr.Interface(denoise, inputs, outputs, title=title, enable_queue=True).launch()
|
|
|
|
| 79 |
|
| 80 |
return out
|
| 81 |
|
| 82 |
+
#audio = gr.inputs.Audio(label = "Audio to denoise", type = 'filepath')
|
| 83 |
+
#inputs = [audio]
|
| 84 |
+
#outputs = gr.outputs.Audio(label = "Denoised audio", type = 'filepath')
|
| 85 |
+
|
| 86 |
+
#title = "Speech Denoising in the Waveform Domain with Self-Attention from Nvidia"
|
| 87 |
+
|
| 88 |
+
#gr.Interface(denoise, inputs, outputs, title=title, enable_queue=True).launch()
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
demo = gr.Blocks()
|
| 94 |
+
|
| 95 |
+
mic_transcribe = gr.Interface(
|
| 96 |
+
fn=denoise,
|
| 97 |
+
inputs=[
|
| 98 |
+
gr.inputs.Audio(source="microphone", label="Audio to denoise", type="filepath", optional=True),
|
| 99 |
+
gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
|
| 100 |
+
gr.inputs.Checkbox(default=False, label="Return timestamps"),
|
| 101 |
+
],
|
| 102 |
+
|
| 103 |
+
outputs=gr.outputs.Audio(label = "Denoised audio", type = 'filepath')
|
| 104 |
+
|
| 105 |
+
layout="horizontal",
|
| 106 |
+
#theme="huggingface",
|
| 107 |
+
title="My Demo: Speech enhancement",
|
| 108 |
+
#description=(
|
| 109 |
+
# "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
|
| 110 |
+
# f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
|
| 111 |
+
# " of arbitrary length."
|
| 112 |
+
# ),
|
| 113 |
+
allow_flagging="never",
|
| 114 |
+
)
|
| 115 |
+
#
|
| 116 |
+
mic_transcribe.launch()
|
| 117 |
+
|
| 118 |
+
# file_transcribe = gr.Interface(
|
| 119 |
+
# fn=transcribe,
|
| 120 |
+
# inputs=[
|
| 121 |
+
# gr.inputs.Audio(source="upload", optional=True, label="Audio file", type="filepath"),
|
| 122 |
+
# gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
|
| 123 |
+
# gr.inputs.Checkbox(default=False, label="Return timestamps"),
|
| 124 |
+
# ],
|
| 125 |
+
# outputs="text",
|
| 126 |
+
# layout="horizontal",
|
| 127 |
+
# theme="huggingface",
|
| 128 |
+
# title="Whisper Demo: Transcribe Audio",
|
| 129 |
+
# description=(
|
| 130 |
+
# "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
|
| 131 |
+
# f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
|
| 132 |
+
# " of arbitrary length."
|
| 133 |
+
# ),
|
| 134 |
+
# # examples=[
|
| 135 |
+
# # ["./example.flac", "transcribe", False],
|
| 136 |
+
# # ["./example.flac", "transcribe", True],
|
| 137 |
+
# # ],
|
| 138 |
+
# cache_examples=True,
|
| 139 |
+
# allow_flagging="never",
|
| 140 |
+
# )
|
| 141 |
+
|
| 142 |
+
# with demo:
|
| 143 |
+
# gr.TabbedInterface([mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"])
|
| 144 |
+
|
| 145 |
+
# demo.launch(enable_queue=True)
|
| 146 |
|
|
|
|
|
|
|
|
|