Spaces:
Sleeping
Sleeping
update app.py
Browse files
app.py
CHANGED
|
@@ -5,42 +5,56 @@ import torch
|
|
| 5 |
import gradio as gr
|
| 6 |
from TTS.api import TTS
|
| 7 |
|
|
|
|
| 8 |
old_torch_load = torch.load
|
| 9 |
def patched_torch_load(*args, **kwargs):
|
| 10 |
kwargs["weights_only"] = False
|
| 11 |
return old_torch_load(*args, **kwargs)
|
| 12 |
torch.load = patched_torch_load
|
| 13 |
|
|
|
|
| 14 |
os.environ["COQUI_TOS_AGREED"] = "1"
|
| 15 |
|
|
|
|
| 16 |
MODEL = "tts_models/multilingual/multi-dataset/xtts_v2"
|
| 17 |
-
|
| 18 |
print("Loading model:", MODEL)
|
| 19 |
|
|
|
|
| 20 |
use_gpu = torch.cuda.is_available()
|
| 21 |
tts = TTS(MODEL, gpu=use_gpu)
|
| 22 |
|
|
|
|
| 23 |
LANGS = [
|
| 24 |
"en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl",
|
| 25 |
"cs", "ar", "zh-cn", "ja", "ko", "hu", "hi"
|
| 26 |
]
|
| 27 |
|
|
|
|
| 28 |
DEFAULT_SPEAKER = tts.speakers[0] if tts.speakers else None
|
| 29 |
print("Default speaker:", DEFAULT_SPEAKER)
|
| 30 |
|
| 31 |
-
_ = tts.tts("Hello world, the model is ready!", language="en", speaker=DEFAULT_SPEAKER)
|
| 32 |
-
|
| 33 |
def generate_audio(text, language, speaker_wav):
|
| 34 |
if not text or len(text.strip()) < 2:
|
| 35 |
return None
|
| 36 |
|
| 37 |
out_path = tempfile.mktemp(suffix=".wav")
|
| 38 |
-
speaker_path = speaker_wav if speaker_wav else None
|
| 39 |
|
| 40 |
-
if
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
else:
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
return out_path
|
| 46 |
|
|
@@ -52,7 +66,7 @@ demo = gr.Interface(
|
|
| 52 |
gr.Audio(label="Speaker reference (optional, WAV)", type="filepath")
|
| 53 |
],
|
| 54 |
outputs=gr.Audio(type="filepath", label="Generated speech"),
|
| 55 |
-
title="XTTS-v2 (
|
| 56 |
allow_flagging="never",
|
| 57 |
)
|
| 58 |
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
from TTS.api import TTS
|
| 7 |
|
| 8 |
+
# Patch torch.load for compatibility
|
| 9 |
old_torch_load = torch.load
|
| 10 |
def patched_torch_load(*args, **kwargs):
|
| 11 |
kwargs["weights_only"] = False
|
| 12 |
return old_torch_load(*args, **kwargs)
|
| 13 |
torch.load = patched_torch_load
|
| 14 |
|
| 15 |
+
# Accept Coqui TOS
|
| 16 |
os.environ["COQUI_TOS_AGREED"] = "1"
|
| 17 |
|
| 18 |
+
# Model name
|
| 19 |
MODEL = "tts_models/multilingual/multi-dataset/xtts_v2"
|
|
|
|
| 20 |
print("Loading model:", MODEL)
|
| 21 |
|
| 22 |
+
# Detect if GPU available (Hugging Face ZeroGPU = CPU only)
|
| 23 |
use_gpu = torch.cuda.is_available()
|
| 24 |
tts = TTS(MODEL, gpu=use_gpu)
|
| 25 |
|
| 26 |
+
# Supported languages
|
| 27 |
LANGS = [
|
| 28 |
"en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl",
|
| 29 |
"cs", "ar", "zh-cn", "ja", "ko", "hu", "hi"
|
| 30 |
]
|
| 31 |
|
| 32 |
+
# Default speaker if no wav provided
|
| 33 |
DEFAULT_SPEAKER = tts.speakers[0] if tts.speakers else None
|
| 34 |
print("Default speaker:", DEFAULT_SPEAKER)
|
| 35 |
|
|
|
|
|
|
|
| 36 |
def generate_audio(text, language, speaker_wav):
|
| 37 |
if not text or len(text.strip()) < 2:
|
| 38 |
return None
|
| 39 |
|
| 40 |
out_path = tempfile.mktemp(suffix=".wav")
|
|
|
|
| 41 |
|
| 42 |
+
if speaker_wav:
|
| 43 |
+
# Use reference WAV for voice cloning
|
| 44 |
+
tts.tts_to_file(
|
| 45 |
+
text=text,
|
| 46 |
+
speaker_wav=speaker_wav,
|
| 47 |
+
language=language,
|
| 48 |
+
file_path=out_path
|
| 49 |
+
)
|
| 50 |
else:
|
| 51 |
+
# Use built-in default voice
|
| 52 |
+
tts.tts_to_file(
|
| 53 |
+
text=text,
|
| 54 |
+
speaker=DEFAULT_SPEAKER,
|
| 55 |
+
language=language,
|
| 56 |
+
file_path=out_path
|
| 57 |
+
)
|
| 58 |
|
| 59 |
return out_path
|
| 60 |
|
|
|
|
| 66 |
gr.Audio(label="Speaker reference (optional, WAV)", type="filepath")
|
| 67 |
],
|
| 68 |
outputs=gr.Audio(type="filepath", label="Generated speech"),
|
| 69 |
+
title="XTTS-v2 (Multilingual + Voice Cloning)",
|
| 70 |
allow_flagging="never",
|
| 71 |
)
|
| 72 |
|