|
|
"""E2 TTS - HuggingFace Space Demo |
|
|
|
|
|
Generated at 2026-01-29T18:35:40Z from templates/space/app.py.j2. |
|
|
|
|
|
A Gradio interface for E2 TTS text-to-speech synthesis. |
|
|
""" |
|
|
|
|
|
import os |
|
|
from pathlib import Path |
|
|
|
|
|
import gradio as gr |
|
|
import numpy as np |
|
|
|
|
|
from ttsdb_e2_tts import E2TTS |
|
|
|
|
|
|
|
|
|
|
|
MODEL_ID = os.environ.get("MODEL_ID", "ttsds/e2-tts") |
|
|
model = model = E2TTS(model_id=MODEL_ID) |
|
|
|
|
|
def synthesize( |
|
|
text: str, |
|
|
reference_audio: str, |
|
|
reference_text: str, |
|
|
language: str, |
|
|
) -> tuple[int, np.ndarray]: |
|
|
"""Synthesize speech from text. |
|
|
|
|
|
Expects `reference_audio` to be a filepath (Gradio `type="filepath"`). |
|
|
Returns (sample_rate, audio_array) as expected by Gradio. |
|
|
""" |
|
|
if not text or not text.strip(): |
|
|
raise gr.Error("Please enter some text to synthesize.") |
|
|
|
|
|
if not reference_audio or not os.path.exists(reference_audio): |
|
|
raise gr.Error("Please upload a reference audio file.") |
|
|
|
|
|
if not reference_text or not reference_text.strip(): |
|
|
raise gr.Error("Please enter the transcript of the reference audio.") |
|
|
|
|
|
audio, sr = model.synthesize( |
|
|
text=text, |
|
|
reference_audio=reference_audio, |
|
|
text_reference=reference_text, |
|
|
language=language, |
|
|
) |
|
|
return (sr, audio) |
|
|
|
|
|
gr.set_static_paths(paths=["examples"]) |
|
|
|
|
|
|
|
|
with gr.Blocks(title="E2 TTS TTS") as demo: |
|
|
|
|
|
gr.Markdown( |
|
|
""" |
|
|
# E2 TTS Text-to-Speech |
|
|
|
|
|
|
|
|
|
|
|
A non-autoregressive masked U-Net transformer text-to-speech model. |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
> **Note:** This demo is not affiliated with or endorsed by the original authors. |
|
|
> It is provided for research and educational purposes only. |
|
|
|
|
|
**Links:** [Code](https://github.com/SWivid/F5-TTS) | [Paper](https://ieeexplore.ieee.org/abstract/document/10832320) | [Weights](https://huggingface.co/SWivid/E2-TTS) |
|
|
""" |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
reference_audio = gr.Audio( |
|
|
label="Reference Audio", |
|
|
type="filepath", |
|
|
) |
|
|
reference_text = gr.Textbox( |
|
|
label="Reference Transcript", |
|
|
placeholder="Enter what is being said in the reference audio...", |
|
|
lines=2, |
|
|
) |
|
|
text_input = gr.Textbox( |
|
|
label="Text to Synthesize", |
|
|
placeholder="Enter the text you want to convert to speech...", |
|
|
lines=3, |
|
|
) |
|
|
language = gr.Dropdown( |
|
|
label="Language", |
|
|
choices=[ |
|
|
|
|
|
("English", "eng"), |
|
|
|
|
|
("Chinese (中文)", "zho"), |
|
|
|
|
|
], |
|
|
value="eng", |
|
|
) |
|
|
submit_btn = gr.Button("Synthesize", variant="primary") |
|
|
|
|
|
with gr.Column(): |
|
|
output_audio = gr.Audio( |
|
|
label="Synthesized Audio", |
|
|
type="numpy", |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
_runtime_examples = [] |
|
|
|
|
|
|
|
|
_rel = Path("examples/ref_eng.mp3") |
|
|
_src = Path(__file__).parent / _rel |
|
|
if _src.exists(): |
|
|
_runtime_examples.append([_src, "Were the leaders in this luckless change, though our own Baskerville, who was at work some years before them, went much on the same lines.", "With tenure, Suzie'd have all the more leisure for yachting, but her publications are no good.", "eng"]) |
|
|
|
|
|
|
|
|
|
|
|
_rel = Path("examples/ref_zho.wav") |
|
|
_src = Path(__file__).parent / _rel |
|
|
if _src.exists(): |
|
|
_runtime_examples.append([_src, "對,這就是我,萬人敬仰的太乙真人。雖然有點嬰兒肥,但也掩不住我,逼人的帥氣。", "視野無限廣,窗外有藍天", "zho"]) |
|
|
|
|
|
|
|
|
|
|
|
gr.Examples( |
|
|
examples=_runtime_examples, |
|
|
inputs=[reference_audio, reference_text, text_input, language], |
|
|
) |
|
|
|
|
|
submit_btn.click( |
|
|
fn=synthesize, |
|
|
inputs=[text_input, reference_audio, reference_text, language], |
|
|
outputs=[output_audio], |
|
|
) |
|
|
|
|
|
|
|
|
gr.Markdown( |
|
|
""" |
|
|
## Model Information |
|
|
|
|
|
| Property | Value | |
|
|
|----------|-------| |
|
|
| **Architecture** | Non-Autoregressive, Masked, Flow Matching, U-Net Transformer | |
|
|
| **Sample Rate** | 24000 Hz | |
|
|
| **Parameters** | 335M | |
|
|
| **Languages** | English, Chinese | |
|
|
| **Release Date** | 2024-10-30 | |
|
|
|
|
|
|
|
|
|
|
|
## Citations |
|
|
|
|
|
If you use this model, please cite the original work: |
|
|
|
|
|
|
|
|
```bibtex |
|
|
|
|
|
@inproceedings{e2-tts, |
|
|
title={{E2 TTS}: Embarrassingly easy fully non-autoregressive zero-shot tts}, |
|
|
author={Eskimez, Sefik Emre and Wang, Xiaofei and Thakker, Manthan and Li, Canrun and Tsai, Chung-Hsien and Xiao, Zhen and Yang, Hemin and Zhu, Zirun and Tang, Min and Tan, Xu and others}, |
|
|
booktitle={2024 IEEE Spoken Language Technology Workshop (SLT)}, |
|
|
pages={682--689}, |
|
|
year={2024}, |
|
|
organization={IEEE} |
|
|
} |
|
|
|
|
|
|
|
|
``` |
|
|
|
|
|
|
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
port = int(os.environ.get("PORT", "7860")) |
|
|
demo.launch(server_name="0.0.0.0", server_port=port, share=False, allowed_paths=["examples"]) |
|
|
|