Spaces:
Runtime error
Runtime error
| import tempfile | |
| from typing import Optional | |
| from TTS.config import load_config | |
| import gradio as gr | |
| import numpy as np | |
| from TTS.utils.manage import ModelManager | |
| from TTS.utils.synthesizer import Synthesizer | |
| from detoxify import Detoxify | |
| MODELS = {} | |
| SPEAKERS = {} | |
| MAX_TXT_LEN = 100 | |
| manager = ModelManager() | |
| MODEL_NAMES = manager.list_tts_models() | |
| # filter out multi-speaker models and slow wavegrad vocoders | |
| filters = ["vctk", "your_tts", "ek1"] | |
| MODEL_NAMES = [model_name for model_name in MODEL_NAMES if not any(f in model_name for f in filters)] | |
| EN = [el for el in MODEL_NAMES if "/en/" in el] | |
| OTHER = [el for el in MODEL_NAMES if "/en/" not in el] | |
| EN[0], EN[5] = EN[5], EN[0] | |
| MODEL_NAMES = EN + OTHER | |
| # reorder models | |
| print(MODEL_NAMES) | |
| def tts(text: str, model_name: str): | |
| if len(text) > MAX_TXT_LEN: | |
| text = text[:MAX_TXT_LEN] | |
| print(f"Input text was cutoff since it went over the {MAX_TXT_LEN} character limit.") | |
| print(text, model_name) | |
| # detoxify | |
| thresholds = { | |
| 'toxicity': 0.4, | |
| 'severe_toxicity': 0.2, | |
| 'obscene': 0.3, | |
| 'threat': 0.3, | |
| 'insult': 0.3, | |
| 'identity_attack': 0.3 | |
| } | |
| results = Detoxify('original').predict(text) | |
| #results = Detoxify('multilingual').predict(text) | |
| # download model | |
| model_path, config_path, model_item = manager.download_model(model_name) | |
| vocoder_name: Optional[str] = model_item["default_vocoder"] | |
| # download vocoder | |
| vocoder_path = None | |
| vocoder_config_path = None | |
| if vocoder_name is not None: | |
| vocoder_path, vocoder_config_path, _ = manager.download_model(vocoder_name) | |
| # init synthesizer | |
| synthesizer = Synthesizer( | |
| model_path, config_path, None, None, vocoder_path, vocoder_config_path, | |
| ) | |
| # synthesize | |
| if synthesizer is None: | |
| raise NameError("model not found") | |
| wavs = synthesizer.tts(text, None) | |
| # return output | |
| with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: | |
| synthesizer.save_wav(wavs, fp) | |
| return fp.name | |
| title = """<h1 align="center">๐ธ๐ฌ CoquiTTS Playground </h1>""" | |
| with gr.Blocks(analytics_enabled=False) as demo: | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown( | |
| """ | |
| ## <img src="https://raw.githubusercontent.com/coqui-ai/TTS/main/images/coqui-log-green-TTS.png" height="56"/> | |
| """ | |
| ) | |
| gr.Markdown( | |
| """ | |
| <br /> | |
| ## ๐ธCoqui.ai News | |
| - ๐ฃ ๐ธTTS now supports ๐ขTortoise with faster inference. | |
| - ๐ฃ **Coqui Studio API** is landed on ๐ธTTS. - [Example](https://github.com/coqui-ai/TTS/blob/dev/README.md#-python-api) | |
| - ๐ฃ [**Coqui Sudio API**](https://docs.coqui.ai/docs) is live. | |
| - ๐ฃ Voice generation with prompts - **Prompt to Voice** - is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin)!! - [Blog Post](https://coqui.ai/blog/tts/prompt-to-voice) | |
| - ๐ฃ Voice generation with fusion - **Voice fusion** - is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin). | |
| - ๐ฃ Voice cloning is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin). | |
| <br> | |
| """ | |
| ) | |
| with gr.Column(): | |
| gr.Markdown( | |
| """ | |
| <br/> | |
| ๐ป This space showcases some of the **[CoquiTTS](https://github.com/coqui-ai/TTS)** models. | |
| <br/> | |
| There are > 30 languages with single and multi speaker models, all thanks to our ๐ Contributors. | |
| <br/> | |
| Visit the links below for more. | |
| | | | | |
| | ------------------------------- | --------------------------------------- | | |
| | ๐ธ๐ฌ **CoquiTTS** | [Github](https://github.com/coqui-ai/TTS) | | |
| | ๐ผ **Documentation** | [ReadTheDocs](https://tts.readthedocs.io/en/latest/) | |
| | ๐ฉโ๐ป **Questions** | [GitHub Discussions] | | |
| | ๐ฏ **Community** | [](https://discord.gg/5eXr5seRrv) | | |
| [github issue tracker]: https://github.com/coqui-ai/tts/issues | |
| [github discussions]: https://github.com/coqui-ai/TTS/discussions | |
| [discord]: https://discord.gg/5eXr5seRrv | |
| """ | |
| ) | |
| with gr.Row(): | |
| gr.Markdown( | |
| """ | |
| <details> | |
| <summary>๐ Model contributors</summary> | |
| - <a href="https://github.com/nmstoker/" target="_blank">@nmstoker</a> | |
| - <a href="https://github.com/kaiidams/" target="_blank">@kaiidams</a> | |
| - <a href="https://github.com/WeberJulian/" target="_blank">@WeberJulian,</a> | |
| - <a href="https://github.com/Edresson/" target="_blank">@Edresson</a> | |
| - <a href="https://github.com/thorstenMueller/" target="_blank">@thorstenMueller</a> | |
| - <a href="https://github.com/r-dh/" target="_blank">@r-dh</a> | |
| - <a href="https://github.com/kirianguiller/" target="_blank">@kirianguiller</a> | |
| - <a href="https://github.com/robinhad/" target="_blank">@robinhad</a> | |
| - <a href="https://github.com/fkarabiber/" target="_blank">@fkarabiber</a> | |
| - <a href="https://github.com/nicolalandro/" target="_blank">@nicolalandro</a> | |
| - <a href="https://github.com/a-froghyar" target="_blank">@a-froghyar</a> | |
| - <a href="https://github.com/manmay-nakhashi" target="_blank">@manmay-nakhashi</a> | |
| - <a href="https://github.com/noml4u" target="_blank">@noml4u</a> | |
| </details> | |
| <br/> | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_text = gr.inputs.Textbox( | |
| label="Input Text", | |
| default="This sentence has been generated by a speech synthesis system.", | |
| ) | |
| model_select = gr.inputs.Dropdown( | |
| label="Pick Model: tts_models/<language>/<dataset>/<model_name>", | |
| choices=MODEL_NAMES, | |
| default="tts_models/en/jenny/jenny" | |
| ) | |
| tts_button = gr.Button("Send", elem_id="send-btn", visible=True) | |
| with gr.Column(): | |
| output_audio = gr.outputs.Audio(label="Output", type="filepath") | |
| tts_button.click( | |
| tts, | |
| inputs=[ | |
| input_text, | |
| model_select, | |
| ], | |
| outputs=[output_audio], | |
| ) | |
| demo.queue(concurrency_count=16).launch(debug=True) |