Spaces:
Runtime error
Runtime error
| import sys | |
| import os | |
| import re | |
| import time | |
| import math | |
| import torch | |
| import random | |
| import spaces | |
| # By using XTTS you agree to CPML license https://coqui.ai/cpml | |
| os.environ["COQUI_TOS_AGREED"] = "1" | |
| import gradio as gr | |
| from TTS.api import TTS | |
| from TTS.utils.manage import ModelManager | |
| max_64_bit_int = 2**63 - 1 | |
| model_names = TTS().list_models() | |
| print(model_names.__dict__) | |
| print(model_names.__dir__()) | |
| model_name = "tts_models/multilingual/multi-dataset/xtts_v2" | |
| m = model_name | |
| # Automatic device detection | |
| if torch.cuda.is_available(): | |
| # cuda only | |
| device_type = "cuda" | |
| device_selection = "cuda:0" | |
| data_type = torch.float16 | |
| else: | |
| # no GPU or Amd | |
| device_type = "cpu" | |
| device_selection = "cpu" | |
| data_type = torch.float32 | |
| tts = TTS(model_name, gpu=torch.cuda.is_available()) | |
| tts.to(device_type) | |
| def update_output(output_number): | |
| return [ | |
| gr.update(visible = (2 <= output_number)), | |
| gr.update(visible = (3 <= output_number)), | |
| gr.update(visible = (4 <= output_number)), | |
| gr.update(visible = (5 <= output_number)), | |
| gr.update(visible = (6 <= output_number)), | |
| gr.update(visible = (7 <= output_number)), | |
| gr.update(visible = (8 <= output_number)), | |
| gr.update(visible = (9 <= output_number)) | |
| ] | |
| def predict0(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 0, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict1(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 1, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict2(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 2, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict3(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 3, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict4(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 4, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict5(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 5, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict6(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 6, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict7(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 7, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict8(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, generation_number, temperature, is_randomize_seed, seed, progress = gr.Progress()): | |
| return predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic, 8, generation_number, temperature, is_randomize_seed, seed, progress) | |
| def predict( | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| i, | |
| generation_number, | |
| temperature, | |
| is_randomize_seed, | |
| seed, | |
| progress = gr.Progress() | |
| ): | |
| if generation_number <= i: | |
| return ( | |
| None, | |
| None, | |
| ) | |
| start = time.time() | |
| progress(0, desc = "Preparing data...") | |
| if len(prompt) < 2: | |
| gr.Warning("Please give a longer prompt text") | |
| return ( | |
| None, | |
| None, | |
| ) | |
| if 50000 < len(prompt): | |
| gr.Warning("Text length limited to 50,000 characters for this demo, please try shorter text") | |
| return ( | |
| None, | |
| None, | |
| ) | |
| if use_mic: | |
| if mic_file_path is None: | |
| gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios") | |
| return ( | |
| None, | |
| None, | |
| ) | |
| else: | |
| speaker_wav = mic_file_path | |
| else: | |
| speaker_wav = audio_file_pth | |
| if speaker_wav is None: | |
| if gender == "male": | |
| speaker_wav = "./examples/male.mp3" | |
| else: | |
| speaker_wav = "./examples/female.wav" | |
| output_filename = f"{i + 1}_{re.sub('[^a-zA-Z0-9]', '_', language)}_{re.sub('[^a-zA-Z0-9]', '_', prompt)}"[:180] + ".wav" | |
| try: | |
| if language == "fr": | |
| if m.find("your") != -1: | |
| language = "fr-fr" | |
| if m.find("/fr/") != -1: | |
| language = None | |
| predict_on_gpu(i, generation_number, prompt, speaker_wav, language, output_filename, temperature, is_randomize_seed, seed, progress) | |
| except RuntimeError as e : | |
| if "device-assert" in str(e): | |
| # cannot do anything on cuda device side error, need to restart | |
| gr.Warning("Unhandled Exception encounter, please retry in a minute") | |
| print("Cuda device-assert Runtime encountered need restart") | |
| sys.exit("Exit due to cuda device-assert") | |
| else: | |
| raise e | |
| end = time.time() | |
| secondes = int(end - start) | |
| minutes = math.floor(secondes / 60) | |
| secondes = secondes - (minutes * 60) | |
| hours = math.floor(minutes / 60) | |
| minutes = minutes - (hours * 60) | |
| information = ("Start again to get a different result. " if is_randomize_seed else "") + "The sound has been generated in " + ((str(hours) + " h, ") if hours != 0 else "") + ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + str(secondes) + " sec." | |
| return ( | |
| output_filename, | |
| information, | |
| ) | |
| def predict_on_gpu( | |
| i, | |
| generation_number, | |
| prompt, | |
| speaker_wav, | |
| language, | |
| output_filename, | |
| temperature, | |
| is_randomize_seed, | |
| seed, | |
| progress | |
| ): | |
| progress((i + .5) / generation_number, desc = "Generating the audio #" + str(i + 1) + "...") | |
| if is_randomize_seed: | |
| seed = random.randint(0, max_64_bit_int) | |
| random.seed(seed) | |
| torch.manual_seed(seed) | |
| tts.tts_to_file( | |
| text = prompt, | |
| file_path = output_filename, | |
| speaker_wav = speaker_wav, | |
| language = language, | |
| temperature = temperature | |
| ) | |
| with gr.Blocks() as interface: | |
| gr.HTML( | |
| """ | |
| <h1><center>XTTS</center></h1> | |
| <big><center>Generate long vocal from text in several languages following voice freely, without account, without watermark and download it</center></big> | |
| <br/> | |
| <a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip. | |
| <br/> | |
| XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible. | |
| <br/> | |
| This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible. | |
| <br/> | |
| Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives. | |
| <br/> | |
| <p>To avoid the queue, you can duplicate this space on CPU, GPU or ZERO space GPU: | |
| <br/> | |
| <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/Multi-language_Text-to-Speech?duplicate=true"> | |
| <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> | |
| </p> | |
| """ | |
| ) | |
| with gr.Column(): | |
| prompt = gr.Textbox( | |
| label = "Text Prompt", | |
| info = "One or two sentences at a time is better", | |
| value = "Hello, World! Here is an example of light voice cloning. Try to upload your best audio samples quality", | |
| elem_id = "prompt-id", | |
| ) | |
| with gr.Group(): | |
| language = gr.Dropdown( | |
| label="Language", | |
| info="Select an output language for the synthesised speech", | |
| choices=[ | |
| ["Arabic", "ar"], | |
| ["Brazilian Portuguese", "pt"], | |
| ["Mandarin Chinese", "zh-cn"], | |
| ["Czech", "cs"], | |
| ["Dutch", "nl"], | |
| ["English", "en"], | |
| ["French", "fr"], | |
| ["German", "de"], | |
| ["Italian", "it"], | |
| ["Polish", "pl"], | |
| ["Russian", "ru"], | |
| ["Spanish", "es"], | |
| ["Turkish", "tr"] | |
| ], | |
| max_choices=1, | |
| value="en", | |
| elem_id = "language-id", | |
| ) | |
| gr.HTML("More languages <a href='https://huggingface.co/spaces/Brasd99/TTS-Voice-Cloner'>here</a>") | |
| gender = gr.Radio( | |
| ["female", "male"], | |
| label="Gender", | |
| info="Gender of the voice", | |
| elem_id = "gender-id", | |
| ) | |
| audio_file_pth = gr.Audio( | |
| label="Reference Audio", | |
| #info="Click on the ✎ button to upload your own target speaker audio", | |
| type="filepath", | |
| value=None, | |
| elem_id = "audio-file-pth-id", | |
| ) | |
| mic_file_path = gr.Audio( | |
| sources=["microphone"], | |
| type="filepath", | |
| #info="Use your microphone to record audio", | |
| label="Use Microphone for Reference", | |
| elem_id = "mic-file-path-id", | |
| ) | |
| use_mic = gr.Checkbox( | |
| label = "Check to use Microphone as Reference", | |
| value = False, | |
| info = "Notice: Microphone input may not work properly under traffic", | |
| elem_id = "use-mic-id", | |
| ) | |
| generation_number = gr.Slider( | |
| minimum = 1, | |
| maximum = 9, | |
| step = 1, | |
| value = 1, | |
| label = "Generation number", | |
| info = "How many audios to generate", | |
| elem_id = "generation-number-id" | |
| ) | |
| with gr.Accordion("Advanced options", open = False): | |
| temperature = gr.Slider( | |
| minimum = 0, | |
| maximum = 10, | |
| step = .1, | |
| value = .75, | |
| label = "Temperature", | |
| info = "Maybe useless", | |
| elem_id = "temperature-id" | |
| ) | |
| randomize_seed = gr.Checkbox( | |
| label = "\U0001F3B2 Randomize seed", | |
| value = True, | |
| info = "If checked, result is always different", | |
| elem_id = "randomize-seed-id" | |
| ) | |
| seed = gr.Slider( | |
| minimum = 0, | |
| maximum = max_64_bit_int, | |
| step = 1, | |
| randomize = True, | |
| label = "Seed", | |
| elem_id = "seed-id" | |
| ) | |
| submit = gr.Button( | |
| "🚀 Speak", | |
| variant = "primary", | |
| elem_id = "submit-id" | |
| ) | |
| synthesised_audio_1 = gr.Audio( | |
| label="Synthesised Audio #1", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-1-id" | |
| ) | |
| synthesised_audio_2 = gr.Audio( | |
| label="Synthesised Audio #2", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-2-id", | |
| visible = False | |
| ) | |
| synthesised_audio_3 = gr.Audio( | |
| label="Synthesised Audio #3", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-3-id", | |
| visible = False | |
| ) | |
| synthesised_audio_4 = gr.Audio( | |
| label="Synthesised Audio #4", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-4-id", | |
| visible = False | |
| ) | |
| synthesised_audio_5 = gr.Audio( | |
| label="Synthesised Audio #5", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-5-id", | |
| visible = False | |
| ) | |
| synthesised_audio_6 = gr.Audio( | |
| label="Synthesised Audio #6", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-6-id", | |
| visible = False | |
| ) | |
| synthesised_audio_7 = gr.Audio( | |
| label="Synthesised Audio #7", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-7-id", | |
| visible = False | |
| ) | |
| synthesised_audio_8 = gr.Audio( | |
| label="Synthesised Audio #8", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-8-id", | |
| visible = False | |
| ) | |
| synthesised_audio_9 = gr.Audio( | |
| label="Synthesised Audio #9", | |
| autoplay = False, | |
| elem_id = "synthesised-audio-9-id", | |
| visible = False | |
| ) | |
| information = gr.HTML() | |
| submit.click(fn = update_output, inputs = [ | |
| generation_number | |
| ], outputs = [ | |
| synthesised_audio_2, | |
| synthesised_audio_3, | |
| synthesised_audio_4, | |
| synthesised_audio_5, | |
| synthesised_audio_6, | |
| synthesised_audio_7, | |
| synthesised_audio_8, | |
| synthesised_audio_9 | |
| ], queue = False, show_progress = False).success(predict0, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_1, | |
| information | |
| ], scroll_to_output = True).success(predict1, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_2, | |
| information | |
| ], scroll_to_output = True).success(predict2, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_3, | |
| information | |
| ], scroll_to_output = True).success(predict3, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_4, | |
| information | |
| ], scroll_to_output = True).success(predict4, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_5, | |
| information | |
| ], scroll_to_output = True).success(predict5, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_6, | |
| information | |
| ], scroll_to_output = True).success(predict6, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_7, | |
| information | |
| ], scroll_to_output = True).success(predict7, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_8, | |
| information | |
| ], scroll_to_output = True).success(predict8, inputs = [ | |
| prompt, | |
| language, | |
| gender, | |
| audio_file_pth, | |
| mic_file_path, | |
| use_mic, | |
| generation_number, | |
| temperature, | |
| randomize_seed, | |
| seed | |
| ], outputs = [ | |
| synthesised_audio_9, | |
| information | |
| ], scroll_to_output = True) | |
| interface.queue(max_size = 5).launch(debug=True) |