Update tortoise space
Browse files- .gitattributes +2 -0
- Dockerfile +47 -0
- README.md +58 -8
- app.py +178 -0
- examples/ref_eng.mp3 +3 -0
- examples/ref_zho.wav +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
examples/ref_eng.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
examples/ref_zho.wav filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated at 2026-01-29T20:39:04Z from templates/space/Dockerfile.j2
|
| 2 |
+
#
|
| 3 |
+
# TorToise HuggingFace Space - Dockerfile
|
| 4 |
+
# Uses Docker SDK for more control over dependencies and environment
|
| 5 |
+
|
| 6 |
+
FROM python:3.10-slim
|
| 7 |
+
|
| 8 |
+
# Install system dependencies (as root)
|
| 9 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 10 |
+
git \
|
| 11 |
+
git-lfs \
|
| 12 |
+
libsox-dev \
|
| 13 |
+
ffmpeg \
|
| 14 |
+
gcc \
|
| 15 |
+
build-essential \
|
| 16 |
+
g++-12 \
|
| 17 |
+
espeak-ng
|
| 18 |
+
|
| 19 |
+
# Set up user (UID 1000 as required by HuggingFace Spaces)
|
| 20 |
+
RUN useradd -m -u 1000 user
|
| 21 |
+
USER user
|
| 22 |
+
ENV HOME=/home/user \
|
| 23 |
+
PATH=/home/user/.local/bin:$PATH
|
| 24 |
+
WORKDIR $HOME/app
|
| 25 |
+
|
| 26 |
+
# Upgrade pip
|
| 27 |
+
RUN pip install --no-cache-dir --upgrade pip
|
| 28 |
+
|
| 29 |
+
# Install PyTorch (CPU version for Spaces - GPU available at runtime if hardware supports it)
|
| 30 |
+
RUN pip install --no-cache-dir torch torchaudio --index-url https://download.pytorch.org/whl/cpu
|
| 31 |
+
|
| 32 |
+
# Install the external PyPI package (if specified) and the ttsdb wrapper from PyPI
|
| 33 |
+
RUN pip install --no-cache-dir ttsdb_tortoise==0.1.0
|
| 34 |
+
|
| 35 |
+
# Install Gradio
|
| 36 |
+
RUN pip install --no-cache-dir gradio>=5.15.0
|
| 37 |
+
|
| 38 |
+
# Copy the app
|
| 39 |
+
COPY --chown=user app.py $HOME/app/
|
| 40 |
+
# Copy packaged audio examples into the app examples directory (if present)
|
| 41 |
+
COPY --chown=user examples $HOME/app/examples
|
| 42 |
+
|
| 43 |
+
# Expose port (HuggingFace Spaces will map this)
|
| 44 |
+
EXPOSE 7860
|
| 45 |
+
|
| 46 |
+
# Run the app
|
| 47 |
+
CMD ["python", "app.py"]
|
README.md
CHANGED
|
@@ -1,12 +1,62 @@
|
|
| 1 |
---
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
pinned: false
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
# Generated at 2026-01-29T20:39:04Z from templates/space/README.md.j2
|
| 3 |
+
title: TorToise
|
| 4 |
+
emoji: 🎤
|
| 5 |
+
colorFrom: blue
|
| 6 |
+
colorTo: purple
|
| 7 |
+
sdk: docker
|
| 8 |
+
app_port: 7860
|
| 9 |
pinned: false
|
| 10 |
+
license: other
|
| 11 |
---
|
| 12 |
|
| 13 |
+
# TorToise Text-to-Speech
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
Tortoise TTS voice cloning model.
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
## Features
|
| 20 |
+
|
| 21 |
+
- Zero-shot voice cloning
|
| 22 |
+
- Multiple language support: English
|
| 23 |
+
- High-quality 24kHz audio output
|
| 24 |
+
|
| 25 |
+
## Usage
|
| 26 |
+
|
| 27 |
+
1. Upload a reference audio clip (3-10 seconds recommended)
|
| 28 |
+
2. Enter the transcript of the reference audio
|
| 29 |
+
3. Enter the text you want to synthesize
|
| 30 |
+
4. Select the language
|
| 31 |
+
5. Click "Synthesize"
|
| 32 |
+
|
| 33 |
+
## Model Information
|
| 34 |
+
|
| 35 |
+
- **Architecture**: Autoregressive, Diffusion, Language Modeling
|
| 36 |
+
- **Sample Rate**: 24000 Hz
|
| 37 |
+
- **Parameters**: 960M
|
| 38 |
+
|
| 39 |
+
## Citation
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
```bibtex
|
| 43 |
+
@misc{betker2023betterspeechsynthesisscaling,
|
| 44 |
+
title={Better speech synthesis through scaling},
|
| 45 |
+
author={James Betker},
|
| 46 |
+
year={2023},
|
| 47 |
+
eprint={2305.07243},
|
| 48 |
+
archivePrefix={arXiv},
|
| 49 |
+
primaryClass={cs.SD},
|
| 50 |
+
url={https://arxiv.org/abs/2305.07243},
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
## Links
|
| 57 |
+
|
| 58 |
+
- [Model Weights](https://huggingface.co/ttsds/tortoise)
|
| 59 |
+
- [Code Repository](https://github.com/neonbjb/tortoise-tts.git)
|
| 60 |
+
|
| 61 |
+
- [Paper](https://arxiv.org/abs/2305.07243)
|
| 62 |
+
|
app.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""TorToise - HuggingFace Space Demo
|
| 2 |
+
|
| 3 |
+
Generated at 2026-01-29T20:39:04Z from templates/space/app.py.j2.
|
| 4 |
+
|
| 5 |
+
A Gradio interface for TorToise text-to-speech synthesis.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
import gradio as gr
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from ttsdb_tortoise import TorToise
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Initialize model (downloads weights on first run)
|
| 18 |
+
MODEL_ID = os.environ.get("MODEL_ID", "ttsds/tortoise")
|
| 19 |
+
model = model = TorToise(model_id=MODEL_ID)
|
| 20 |
+
|
| 21 |
+
def synthesize(
|
| 22 |
+
text: str,
|
| 23 |
+
reference_audio: str,
|
| 24 |
+
reference_text: str,
|
| 25 |
+
language: str,
|
| 26 |
+
) -> tuple[int, np.ndarray]:
|
| 27 |
+
"""Synthesize speech from text.
|
| 28 |
+
|
| 29 |
+
Expects `reference_audio` to be a filepath (Gradio `type="filepath"`).
|
| 30 |
+
Returns (sample_rate, audio_array) as expected by Gradio.
|
| 31 |
+
"""
|
| 32 |
+
if not text or not text.strip():
|
| 33 |
+
raise gr.Error("Please enter some text to synthesize.")
|
| 34 |
+
|
| 35 |
+
if not reference_audio or not os.path.exists(reference_audio):
|
| 36 |
+
raise gr.Error("Please upload a reference audio file.")
|
| 37 |
+
|
| 38 |
+
if not reference_text or not reference_text.strip():
|
| 39 |
+
raise gr.Error("Please enter the transcript of the reference audio.")
|
| 40 |
+
|
| 41 |
+
audio, sr = model.synthesize(
|
| 42 |
+
text=text,
|
| 43 |
+
reference_audio=reference_audio,
|
| 44 |
+
text_reference=reference_text,
|
| 45 |
+
language=language,
|
| 46 |
+
)
|
| 47 |
+
return (sr, audio)
|
| 48 |
+
|
| 49 |
+
gr.set_static_paths(paths=["examples"])
|
| 50 |
+
|
| 51 |
+
# Build the Gradio interface
|
| 52 |
+
with gr.Blocks(title="TorToise TTS") as demo:
|
| 53 |
+
# Header
|
| 54 |
+
gr.Markdown(
|
| 55 |
+
"""
|
| 56 |
+
# TorToise Text-to-Speech
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
Tortoise TTS voice cloning model.
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
> **Note:** This demo is not affiliated with or endorsed by the original authors.
|
| 66 |
+
> It is provided for research and educational purposes only.
|
| 67 |
+
|
| 68 |
+
**Links:** [Code](https://github.com/neonbjb/tortoise-tts.git) | [Paper](https://arxiv.org/abs/2305.07243) | [Weights](https://huggingface.co/jbetker/tortoise-tts-v2)
|
| 69 |
+
"""
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
with gr.Row():
|
| 73 |
+
with gr.Column():
|
| 74 |
+
reference_audio = gr.Audio(
|
| 75 |
+
label="Reference Audio",
|
| 76 |
+
type="filepath",
|
| 77 |
+
)
|
| 78 |
+
reference_text = gr.Textbox(
|
| 79 |
+
label="Reference Transcript",
|
| 80 |
+
placeholder="Enter what is being said in the reference audio...",
|
| 81 |
+
lines=2,
|
| 82 |
+
)
|
| 83 |
+
text_input = gr.Textbox(
|
| 84 |
+
label="Text to Synthesize",
|
| 85 |
+
placeholder="Enter the text you want to convert to speech...",
|
| 86 |
+
lines=3,
|
| 87 |
+
)
|
| 88 |
+
language = gr.Dropdown(
|
| 89 |
+
label="Language",
|
| 90 |
+
choices=[
|
| 91 |
+
|
| 92 |
+
("English", "eng"),
|
| 93 |
+
|
| 94 |
+
],
|
| 95 |
+
value="eng",
|
| 96 |
+
)
|
| 97 |
+
submit_btn = gr.Button("Synthesize", variant="primary")
|
| 98 |
+
|
| 99 |
+
with gr.Column():
|
| 100 |
+
output_audio = gr.Audio(
|
| 101 |
+
label="Synthesized Audio",
|
| 102 |
+
type="numpy",
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# Example inputs from test_data.yaml
|
| 106 |
+
# Assume packaged example audios live under the repository `examples/` folder.
|
| 107 |
+
_runtime_examples = []
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
_rel = Path("examples/ref_eng.mp3")
|
| 111 |
+
_src = Path(__file__).parent / _rel
|
| 112 |
+
if _src.exists():
|
| 113 |
+
_runtime_examples.append([_src, "Were the leaders in this luckless change, though our own Baskerville, who was at work some years before them, went much on the same lines.", "With tenure, Suzie'd have all the more leisure for yachting, but her publications are no good.", "eng"])
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
_rel = Path("examples/ref_zho.wav")
|
| 118 |
+
_src = Path(__file__).parent / _rel
|
| 119 |
+
if _src.exists():
|
| 120 |
+
_runtime_examples.append([_src, "對,這就是我,萬人敬仰的太乙真人。雖然有點嬰兒肥,但也掩不住我,逼人的帥氣。", "視野無限廣,窗外有藍天", "zho"])
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
gr.Examples(
|
| 125 |
+
examples=_runtime_examples,
|
| 126 |
+
inputs=[reference_audio, reference_text, text_input, language],
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
submit_btn.click(
|
| 130 |
+
fn=synthesize,
|
| 131 |
+
inputs=[text_input, reference_audio, reference_text, language],
|
| 132 |
+
outputs=[output_audio],
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# Footer with model information and citations
|
| 136 |
+
gr.Markdown(
|
| 137 |
+
"""
|
| 138 |
+
## Model Information
|
| 139 |
+
|
| 140 |
+
| Property | Value |
|
| 141 |
+
|----------|-------|
|
| 142 |
+
| **Architecture** | Autoregressive, Diffusion, Language Modeling |
|
| 143 |
+
| **Sample Rate** | 24000 Hz |
|
| 144 |
+
| **Parameters** | 960M |
|
| 145 |
+
| **Languages** | English |
|
| 146 |
+
| **Release Date** | 2022-05-17 |
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
## Citations
|
| 151 |
+
|
| 152 |
+
If you use this model, please cite the original work:
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
```bibtex
|
| 156 |
+
|
| 157 |
+
@misc{betker2023betterspeechsynthesisscaling,
|
| 158 |
+
title={Better speech synthesis through scaling},
|
| 159 |
+
author={James Betker},
|
| 160 |
+
year={2023},
|
| 161 |
+
eprint={2305.07243},
|
| 162 |
+
archivePrefix={arXiv},
|
| 163 |
+
primaryClass={cs.SD},
|
| 164 |
+
url={https://arxiv.org/abs/2305.07243},
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
```
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
"""
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
if __name__ == "__main__":
|
| 176 |
+
# HuggingFace Spaces exposes the service via $PORT and requires binding to 0.0.0.0
|
| 177 |
+
port = int(os.environ.get("PORT", "7860"))
|
| 178 |
+
demo.launch(server_name="0.0.0.0", server_port=port, share=False, allowed_paths=["examples"])
|
examples/ref_eng.mp3
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f26a0c66a70e53ac2e68864e69c14f6b2d7c4783d2023fb12d88a04a7ede15fa
|
| 3 |
+
size 211322
|
examples/ref_zho.wav
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:62c1171817d707b2418ea191badfc4861a0a7f1efdaaeb40d6ea715a7a6459f0
|
| 3 |
+
size 307244
|