voice / app.py
djanssen79's picture
Update app.py
102c82f verified
import gradio as gr
import edge_tts
import asyncio
import tempfile
import os
async def get_voices():
voices = await edge_tts.list_voices()
voices = [voice for voice in voices if voice.get("Locale") == "de-DE"]
return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
async def text_to_speech(text, voice):
if not text.strip():
return None, "Text eingeben."
if not voice:
return None, "Stimme wählen."
voice_short_name = voice.split(" - ")[0]
#rate_str = f"+0%"
#pitch_str = f"+0Hz"
communicate = edge_tts.Communicate(text, voice_short_name)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
tmp_path = tmp_file.name
await communicate.save(tmp_path)
return tmp_path, None
async def tts_interface(text, voice):
audio, warning = await text_to_speech(text, voice)
if warning:
return audio, gr.Warning(warning)
return audio, None
async def create_demo():
voices = await get_voices()
with gr.Blocks() as demo:
gr.Markdown("# Text in mp3-Sprachdatei umwandeln")
with gr.Column(): # Inputs untereinander
text_input = gr.Textbox(label="Text", lines=5)
voice_select = gr.Dropdown(choices=[""] + list(voices.keys()), label="Stimme", value="")
with gr.Column(): # Outputs untereinander
audio_output = gr.Audio(label="Generated Audio", type="filepath")
warning_text = gr.Markdown(label="Warning", visible=False)
submit_btn = gr.Button("Umwandeln")
# Funktion mit den Elementen verknüpfen
submit_btn.click(fn=tts_interface, inputs=[text_input, voice_select], outputs=[audio_output, warning_text])
return demo
async def main():
demo = await create_demo()
demo.queue(default_concurrency_limit=5)
demo.launch(show_api=False)
if __name__ == "__main__":
asyncio.run(main())