Champion commited on
Commit
3346cf2
·
verified ·
1 Parent(s): df01c87

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -11
app.py CHANGED
@@ -91,12 +91,6 @@ def inference(audio, model_tag):
91
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2308.04455' target='_blank'>PhD thesis: Anonymizing Speech: Evaluating and Designing Speaker Anonymization Techniques</a> | <a href='https://github.com/deep-privacy/SA-toolkit' target='_blank'>Github Repo</a></p>"
92
 
93
 
94
- def toggle_audio_src(choice):
95
- if choice == "mic":
96
- return gr.update(sources=["microphone"], value=None, label="Microphone (best with a headset)")
97
- else:
98
- return gr.update(sources=["upload"], value=None, label="File")
99
-
100
  with gr.Blocks() as interface:
101
  gr.Markdown(
102
  """
@@ -106,9 +100,7 @@ with gr.Blocks() as interface:
106
  )
107
  with gr.Row():
108
  with gr.Column():
109
- radio = gr.Radio(["file", "mic"], value="file",
110
- label="Input speech (File or Mic)")
111
- audio_input = gr.Audio(sources=["upload"], type="numpy", label="File",
112
  interactive=True, elem_id="melody-input")
113
  model_tag = gr.Dropdown(['hifigan_bn_tdnnf_wav2vec2_vq_48_v1',
114
  'hifigan_bn_tdnnf_wav2vec2_100h_aug_v1',
@@ -125,7 +117,6 @@ with gr.Blocks() as interface:
125
  audio_output = gr.Audio(label="Output")
126
  submit.click(inference, inputs=[audio_input, model_tag],
127
  outputs=[audio_output], batch=False)
128
- radio.change(toggle_audio_src, radio, [audio_input], queue=False, show_progress=False)
129
  gr.Examples(fn=inference,
130
  examples=[['3853-163249-0000.flac']],
131
  inputs=[audio_input, "hifigan_bn_tdnnf_wav2vec2_vq_48_v1"],
@@ -133,4 +124,4 @@ with gr.Blocks() as interface:
133
 
134
 
135
  gr.HTML(article)
136
- interface.queue().launch()
 
91
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2308.04455' target='_blank'>PhD thesis: Anonymizing Speech: Evaluating and Designing Speaker Anonymization Techniques</a> | <a href='https://github.com/deep-privacy/SA-toolkit' target='_blank'>Github Repo</a></p>"
92
 
93
 
 
 
 
 
 
 
94
  with gr.Blocks() as interface:
95
  gr.Markdown(
96
  """
 
100
  )
101
  with gr.Row():
102
  with gr.Column():
103
+ audio_input = gr.Audio(sources=["upload", "microphone"], type="numpy", label="File",
 
 
104
  interactive=True, elem_id="melody-input")
105
  model_tag = gr.Dropdown(['hifigan_bn_tdnnf_wav2vec2_vq_48_v1',
106
  'hifigan_bn_tdnnf_wav2vec2_100h_aug_v1',
 
117
  audio_output = gr.Audio(label="Output")
118
  submit.click(inference, inputs=[audio_input, model_tag],
119
  outputs=[audio_output], batch=False)
 
120
  gr.Examples(fn=inference,
121
  examples=[['3853-163249-0000.flac']],
122
  inputs=[audio_input, "hifigan_bn_tdnnf_wav2vec2_vq_48_v1"],
 
124
 
125
 
126
  gr.HTML(article)
127
+ interface.launch()