Spaces:
Sleeping
Sleeping
Delete app.py
Browse files
app.py
DELETED
|
@@ -1,182 +0,0 @@
|
|
| 1 |
-
import tempfile
|
| 2 |
-
from typing import Optional
|
| 3 |
-
from TTS.config import load_config
|
| 4 |
-
import gradio as gr
|
| 5 |
-
import numpy as np
|
| 6 |
-
from TTS.utils.manage import ModelManager
|
| 7 |
-
from TTS.utils.synthesizer import Synthesizer
|
| 8 |
-
|
| 9 |
-
# Classe estendida para confirmar automaticamente os termos de uso
|
| 10 |
-
class CustomModelManager(ModelManager):
|
| 11 |
-
def ask_tos(self, output_path):
|
| 12 |
-
print("This sentence has been generated by a speech synthesis system. tts_models/multilingual/multi-dataset/xtts_v1.1")
|
| 13 |
-
print(" > You must confirm the following:")
|
| 14 |
-
print(' | > "I have purchased a commercial license from Coqui: licensing@coqui.ai"')
|
| 15 |
-
print(' | > "Otherwise, I agree to the terms of the non-commercial CPML: https://coqui.ai/cpml" - [y/n]')
|
| 16 |
-
answer = 'y' # Automaticamente aceita os termos
|
| 17 |
-
return answer.lower() == 'y'
|
| 18 |
-
|
| 19 |
-
# Substituir o gerenciador padrão pelo personalizado
|
| 20 |
-
manager = CustomModelManager()
|
| 21 |
-
MODELS = {}
|
| 22 |
-
SPEAKERS = {}
|
| 23 |
-
MAX_TXT_LEN = 100
|
| 24 |
-
|
| 25 |
-
MODEL_NAMES = manager.list_tts_models()
|
| 26 |
-
|
| 27 |
-
# filter out multi-speaker models and slow wavegrad vocoders
|
| 28 |
-
filters = ["vctk", "your_tts", "ek1"]
|
| 29 |
-
MODEL_NAMES = [model_name for model_name in MODEL_NAMES if not any(f in model_name for f in filters)]
|
| 30 |
-
|
| 31 |
-
EN = [el for el in MODEL_NAMES if "/en/" in el]
|
| 32 |
-
OTHER = [el for el in MODEL_NAMES if "/en/" not in el]
|
| 33 |
-
EN[0], EN[5] = EN[5], EN[0]
|
| 34 |
-
MODEL_NAMES = EN + OTHER
|
| 35 |
-
|
| 36 |
-
# reorder models
|
| 37 |
-
print(MODEL_NAMES)
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
def tts(text: str, model_name: str):
|
| 41 |
-
if len(text) > MAX_TXT_LEN:
|
| 42 |
-
text = text[:MAX_TXT_LEN]
|
| 43 |
-
print(f"Input text was cutoff since it went over the {MAX_TXT_LEN} character limit.")
|
| 44 |
-
print(text, model_name)
|
| 45 |
-
# download model
|
| 46 |
-
model_path, config_path, model_item = manager.download_model(model_name)
|
| 47 |
-
print(f"Model path: {model_path}")
|
| 48 |
-
print(f"Config path: {config_path}")
|
| 49 |
-
print(f"Model item: {model_item}")
|
| 50 |
-
|
| 51 |
-
vocoder_name: Optional[str] = model_item["default_vocoder"]
|
| 52 |
-
# download vocoder
|
| 53 |
-
vocoder_path = None
|
| 54 |
-
vocoder_config_path = None
|
| 55 |
-
if vocoder_name is not None:
|
| 56 |
-
vocoder_path, vocoder_config_path, _ = manager.download_model(vocoder_name)
|
| 57 |
-
# init synthesizer
|
| 58 |
-
if model_path is None or config_path is None:
|
| 59 |
-
raise ValueError("Model path or config path is None")
|
| 60 |
-
|
| 61 |
-
synthesizer = Synthesizer(
|
| 62 |
-
model_path, config_path, None, None, vocoder_path, vocoder_config_path,
|
| 63 |
-
)
|
| 64 |
-
# synthesize
|
| 65 |
-
if synthesizer is None:
|
| 66 |
-
raise NameError("model not found")
|
| 67 |
-
wavs = synthesizer.tts(text, None)
|
| 68 |
-
# return output
|
| 69 |
-
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
|
| 70 |
-
synthesizer.save_wav(wavs, fp)
|
| 71 |
-
return fp.name
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
title = """<h1 align="center">🐸💬 CoquiTTS Playground </h1>"""
|
| 75 |
-
|
| 76 |
-
with gr.Blocks(analytics_enabled=False) as demo:
|
| 77 |
-
with gr.Row():
|
| 78 |
-
with gr.Column():
|
| 79 |
-
gr.Markdown(
|
| 80 |
-
"""
|
| 81 |
-
## <img src="https://raw.githubusercontent.com/coqui-ai/TTS/main/images/coqui-log-green-TTS.png" height="56"/>
|
| 82 |
-
"""
|
| 83 |
-
)
|
| 84 |
-
gr.Markdown(
|
| 85 |
-
"""
|
| 86 |
-
<br />
|
| 87 |
-
|
| 88 |
-
## 🐸Coqui.ai News
|
| 89 |
-
- 📣 ⓍTTS, our production TTS model that can speak 13 languages, is released [Blog Post](https://coqui.ai/blog/tts/open_xtts), [Demo](https://huggingface.co/spaces/coqui/xtts), [Docs](https://tts.readthedocs.io/en/dev/models/xtts.html)
|
| 90 |
-
- 📣 [🐶Bark](https://github.com/suno-ai/bark) is now available for inference with unconstrained voice cloning. [Docs](https://tts.readthedocs.io/en/dev/models/bark.html)
|
| 91 |
-
- 📣 You can use [~1100 Fairseq models](https://github.com/facebookresearch/fairseq/tree/main/examples/mms) with 🐸TTS.
|
| 92 |
-
- 📣 🐸TTS now supports 🐢Tortoise with faster inference. [Docs](https://tts.readthedocs.io/en/dev/models/tortoise.html)
|
| 93 |
-
- 📣 **Coqui Studio API** is landed on 🐸TTS. - [Example](https://github.com/coqui-ai/TTS/blob/dev/README.md#-python-api)
|
| 94 |
-
- 📣 [**Coqui Studio API**](https://docs.coqui.ai/docs) is live.
|
| 95 |
-
- 📣 Voice generation with prompts - **Prompt to Voice** - is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin)!! - [Blog Post](https://coqui.ai/blog/tts/prompt-to-voice)
|
| 96 |
-
- 📣 Voice generation with fusion - **Voice fusion** - is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin).
|
| 97 |
-
- 📣 Voice cloning is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin).
|
| 98 |
-
<br>
|
| 99 |
-
|
| 100 |
-
"""
|
| 101 |
-
)
|
| 102 |
-
with gr.Column():
|
| 103 |
-
gr.Markdown(
|
| 104 |
-
"""
|
| 105 |
-
<br/>
|
| 106 |
-
|
| 107 |
-
💻 This space showcases some of the **[CoquiTTS](https://github.com/coqui-ai/TTS)** models.
|
| 108 |
-
|
| 109 |
-
<br/>
|
| 110 |
-
|
| 111 |
-
There are > 30 languages with single and multi speaker models, all thanks to our 👑 Contributors.
|
| 112 |
-
|
| 113 |
-
<br/>
|
| 114 |
-
|
| 115 |
-
Visit the links below for more.
|
| 116 |
-
|
| 117 |
-
| | |
|
| 118 |
-
| ------------------------------- | --------------------------------------- |
|
| 119 |
-
| 🐸💬 **CoquiTTS** | [Github](https://github.com/coqui-ai/TTS) |
|
| 120 |
-
| 💼 **Documentation** | [ReadTheDocs](https://tts.readthedocs.io/en/latest/)
|
| 121 |
-
| 👩💻 **Questions** | [GitHub Discussions] |
|
| 122 |
-
| 🗯 **Community** | [](https://discord.gg/5eXr5seRrv) |
|
| 123 |
-
|
| 124 |
-
[github issue tracker]: https://github.com/coqui-ai/tts/issues
|
| 125 |
-
[github discussions]: https://github.com/coqui-ai/TTS/discussions
|
| 126 |
-
[discord]: https://discord.gg/5eXr5seRrv
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
"""
|
| 130 |
-
)
|
| 131 |
-
|
| 132 |
-
with gr.Row():
|
| 133 |
-
gr.Markdown(
|
| 134 |
-
"""
|
| 135 |
-
<details>
|
| 136 |
-
<summary>👑 Model contributors</summary>
|
| 137 |
-
|
| 138 |
-
- <a href="https://github.com/nmstoker/" target="_blank">@nmstoker</a>
|
| 139 |
-
- <a href="https://github.com/kaiidams/" target="_blank">@kaiidams</a>
|
| 140 |
-
- <a href="https://github.com/WeberJulian/" target="_blank">@WeberJulian,</a>
|
| 141 |
-
- <a href="https://github.com/Edresson/" target="_blank">@Edresson</a>
|
| 142 |
-
- <a href="https://github.com/thorstenMueller/" target="_blank">@thorstenMueller</a>
|
| 143 |
-
- <a href="https://github.com/r-dh/" target="_blank">@r-dh</a>
|
| 144 |
-
- <a href="https://github.com/kirianguiller/" target="_blank">@kirianguiller</a>
|
| 145 |
-
- <a href="https://github.com/robinhad/" target="_blank">@robinhad</a>
|
| 146 |
-
- <a href="https://github.com/fkarabiber/" target="_blank">@fkarabiber</a>
|
| 147 |
-
- <a href="https://github.com/nicolalandro/" target="_blank">@nicolalandro</a>
|
| 148 |
-
- <a href="https://github.com/a-froghyar" target="_blank">@a-froghyar</a>
|
| 149 |
-
- <a href="https://github.com/manmay-nakhashi" target="_blank">@manmay-nakhashi</a>
|
| 150 |
-
- <a href="https://github.com/noml4u" target="_blank">@noml4u</a>
|
| 151 |
-
</details>
|
| 152 |
-
|
| 153 |
-
<br/>
|
| 154 |
-
"""
|
| 155 |
-
)
|
| 156 |
-
|
| 157 |
-
with gr.Row():
|
| 158 |
-
with gr.Column():
|
| 159 |
-
input_text = gr.inputs.Textbox(
|
| 160 |
-
label="Input Text",
|
| 161 |
-
default="This sentence has been generated by a speech synthesis system.",
|
| 162 |
-
)
|
| 163 |
-
model_select = gr.inputs.Dropdown(
|
| 164 |
-
label="Pick Model: tts_models/<language>/<dataset>/<model_name>",
|
| 165 |
-
choices=MODEL_NAMES,
|
| 166 |
-
default="tts_models/en/jenny/jenny"
|
| 167 |
-
)
|
| 168 |
-
tts_button = gr.Button("Send", elem_id="send-btn", visible=True)
|
| 169 |
-
|
| 170 |
-
with gr.Column():
|
| 171 |
-
output_audio = gr.outputs.Audio(label="Output", type="filepath")
|
| 172 |
-
|
| 173 |
-
tts_button.click(
|
| 174 |
-
tts,
|
| 175 |
-
inputs=[
|
| 176 |
-
input_text,
|
| 177 |
-
model_select,
|
| 178 |
-
],
|
| 179 |
-
outputs=[output_audio],
|
| 180 |
-
)
|
| 181 |
-
|
| 182 |
-
demo.queue(concurrency_count=16).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|