import gc import json import re import tempfile import os from collections import OrderedDict from importlib.resources import files import click import gradio as gr import numpy as np import soundfile as sf import torch import torchaudio from cached_path import cached_path from transformers import AutoModelForCausalLM, AutoTokenizer try: import spaces USING_SPACES = True except ImportError: USING_SPACES = False def gpu_decorator(func): if USING_SPACES: return spaces.GPU(func) else: return func from f5_tts.model import DiT, UNetT from f5_tts.infer.utils_infer import ( load_vocoder, load_model, preprocess_ref_audio_text, infer_process, save_spectrogram, ) # Проверка наличия remove_silence_for_generated_wav try: from f5_tts.infer.utils_infer import remove_silence_for_generated_wav except ImportError: print("Warning: remove_silence_for_generated_wav not found in f5_tts.infer.utils_infer. Silence removal disabled.") def remove_silence_for_generated_wav(wav_path): print(f"Silence removal not available for {wav_path}. Returning original audio.") return wav_path # Инициализация моделей device = "cuda" if torch.cuda.is_available() else "cpu" DEFAULT_TTS_MODEL = "F5-TTS_v1" tts_model_choice = DEFAULT_TTS_MODEL # Конфигурация для F5-TTS (английский/китайский) DEFAULT_TTS_MODEL_CFG = [ "hf://SWivid/F5-TTS/F5TTS_v1_Base/model_1250000.safetensors", "hf://SWivid/F5-TTS/F5TTS_v1_Base/vocab.txt", json.dumps(dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)), ] # Конфигурация для F5-TTS_RUSSIAN (русский) RUSSIAN_TTS_MODEL_CFG = [ "hf://Misha24-10/F5-TTS_RUSSIAN/F5TTS_v1_Base/model_240000_inference.safetensors", "hf://Misha24-10/F5-TTS_RUSSIAN/vocab.txt", json.dumps(dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)), ] # Загрузка моделей vocoder = load_vocoder() def load_f5tts(): ckpt_path = str(cached_path(DEFAULT_TTS_MODEL_CFG[0])) F5TTS_model_cfg = json.loads(DEFAULT_TTS_MODEL_CFG[2]) return load_model(DiT, F5TTS_model_cfg, ckpt_path) def load_f5tts_russian(): ckpt_path = str(cached_path(RUSSIAN_TTS_MODEL_CFG[0])) F5TTS_model_cfg = json.loads(RUSSIAN_TTS_MODEL_CFG[2]) return load_model(DiT, F5TTS_model_cfg, ckpt_path) def load_e2tts(): ckpt_path = str(cached_path("hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.safetensors")) E2TTS_model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4, text_mask_padding=False, pe_attn_head=1) return load_model(UNetT, E2TTS_model_cfg, ckpt_path) def load_custom(ckpt_path: str, vocab_path="", model_cfg=None): ckpt_path, vocab_path = ckpt_path.strip(), vocab_path.strip() if ckpt_path.startswith("hf://"): ckpt_path = str(cached_path(ckpt_path)) if vocab_path.startswith("hf://"): vocab_path = str(cached_path(vocab_path)) if model_cfg is None: model_cfg = json.loads(DEFAULT_TTS_MODEL_CFG[2]) return load_model(DiT, model_cfg, ckpt_path, vocab_file=vocab_path) try: F5TTS_ema_model = load_f5tts() print("F5-TTS (English/Chinese) loaded successfully.") except Exception as e: print(f"Failed to load F5-TTS: {str(e)}") F5TTS_ema_model = None try: F5TTS_russian_model = load_f5tts_russian() print("F5-TTS_RUSSIAN loaded successfully.") except Exception as e: print(f"Failed to load F5-TTS_RUSSIAN: {str(e)}") F5TTS_russian_model = None E2TTS_ema_model = load_e2tts() if USING_SPACES else None custom_ema_model, pre_custom_path = None, "" chat_model_state = None chat_tokenizer_state = None @gpu_decorator def generate_response(messages, model, tokenizer): """Generate response using Qwen""" text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, ) model_inputs = tokenizer([text], return_tensors="pt").to(model.device) generated_ids = model.generate( **model_inputs, max_new_tokens=512, temperature=0.7, top_p=0.95, ) generated_ids = [ output_ids[len(input_ids) :] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] return tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] @gpu_decorator def infer( ref_audio_orig, ref_text, gen_text, model, remove_silence, cross_fade_duration=0.15, nfe_step=32, speed=1, language="en", show_info=gr.Info, ): if not ref_audio_orig: gr.Warning("Please provide reference audio.") return None, None, ref_text if not gen_text.strip(): gr.Warning("Please enter text to generate.") return None, None, ref_text ref_audio, ref_text = preprocess_ref_audio_text(ref_audio_orig, ref_text, show_info=show_info) # Выбор модели в зависимости от языка if language == "ru": if F5TTS_russian_model is None: gr.Warning("F5-TTS_RUSSIAN model failed to load. Cannot generate Russian audio.") return None, None, ref_text ema_model = F5TTS_russian_model else: if model == DEFAULT_TTS_MODEL: if F5TTS_ema_model is None: gr.Warning("F5-TTS model failed to load.") return None, None, ref_text ema_model = F5TTS_ema_model elif model == "E2-TTS": global E2TTS_ema_model if E2TTS_ema_model is None: show_info("Loading E2-TTS model...") E2TTS_ema_model = load_e2tts() ema_model = E2TTS_ema_model elif isinstance(model, list) and model[0] == "Custom": assert not USING_SPACES, "Only official checkpoints allowed in Spaces." global custom_ema_model, pre_custom_path if pre_custom_path != model[1]: show_info("Loading Custom TTS model...") custom_ema_model = load_custom(model[1], vocab_path=model[2], model_cfg=model[3]) pre_custom_path = model[1] ema_model = custom_ema_model final_wave, final_sample_rate, combined_spectrogram = infer_process( ref_audio, ref_text, gen_text, ema_model, vocoder, cross_fade_duration=cross_fade_duration, nfe_step=nfe_step, speed=speed, show_info=show_info, progress=gr.Progress(), ) if remove_silence: with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f: sf.write(f.name, final_wave, final_sample_rate) remove_silence_for_generated_wav(f.name) final_wave, _ = torchaudio.load(f.name) final_wave = final_wave.squeeze().cpu().numpy() with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_spectrogram: spectrogram_path = tmp_spectrogram.name save_spectrogram(combined_spectrogram, spectrogram_path) return (final_sample_rate, final_wave), spectrogram_path, ref_text with gr.Blocks() as app_credits: gr.Markdown(""" # Credits * [mrfakename](https://github.com/fakerybakery) for the original [online demo](https://huggingface.co/spaces/mrfakename/E2-F5-TTS) * [RootingInLoad](https://github.com/RootingInLoad) for initial chunk generation and podcast app exploration * [jpgallegoar](https://github.com/jpgallegoar) for multiple speech-type generation & voice chat * [Misha24-10](https://huggingface.co/Misha24-10) for the [F5-TTS_RUSSIAN](https://huggingface.co/Misha24-10/F5-TTS_RUSSIAN) model """) with gr.Blocks() as app_tts: gr.Markdown("# Batched TTS") ref_audio_input = gr.Audio(label="Reference Audio", type="filepath") gen_text_input = gr.Textbox(label="Text to Generate", lines=10) language_input = gr.Dropdown(choices=["ru", "en", "zh"], label="Language", value="en") generate_btn = gr.Button("Synthesize", variant="primary") with gr.Accordion("Advanced Settings", open=False): ref_text_input = gr.Textbox( label="Reference Text", info="Leave blank to automatically transcribe the reference audio. If you enter text it will override automatic transcription.", lines=2, ) remove_silence = gr.Checkbox( label="Remove Silences", info="The model tends to produce silences, especially on longer audio. We can manually remove silences if needed. Note that this is an experimental feature and may produce strange results. This will also increase generation time.", value=False, ) speed_slider = gr.Slider( label="Speed", minimum=0.3, maximum=2.0, value=1.0, step=0.1, info="Adjust the speed of the audio.", ) nfe_slider = gr.Slider( label="NFE Steps", minimum=4, maximum=64, value=32, step=2, info="Set the number of denoising steps.", ) cross_fade_duration_slider = gr.Slider( label="Cross-Fade Duration (s)", minimum=0.0, maximum=1.0, value=0.15, step=0.01, info="Set the duration of the cross-fade between audio clips.", ) audio_output = gr.Audio(label="Synthesized Audio") spectrogram_output = gr.Image(label="Spectrogram") @gpu_decorator def basic_tts( ref_audio_input, ref_text_input, gen_text_input, language_input, remove_silence, cross_fade_duration_slider, nfe_slider, speed_slider, ): audio_out, spectrogram_path, ref_text_out = infer( ref_audio_input, ref_text_input, gen_text_input, tts_model_choice, remove_silence, cross_fade_duration=cross_fade_duration_slider, nfe_step=nfe_slider, speed=speed_slider, language=language_input, ) return audio_out, spectrogram_path, ref_text_out generate_btn.click( basic_tts, inputs=[ ref_audio_input, ref_text_input, gen_text_input, language_input, remove_silence, cross_fade_duration_slider, nfe_slider, speed_slider, ], outputs=[audio_output, spectrogram_output, ref_text_input], ) def parse_speechtypes_text(gen_text): pattern = r"\{(.*?)\}" tokens = re.split(pattern, gen_text) segments = [] current_style = "Regular" for i in range(len(tokens)): if i % 2 == 0: text = tokens[i].strip() if text: segments.append({"style": current_style, "text": text}) else: style = tokens[i].strip() current_style = style return segments with gr.Blocks() as app_multistyle: gr.Markdown( """ # Multiple Speech-Type Generation This section allows you to generate multiple speech types or multiple people's voices. Enter your text in the format shown below, and the system will generate speech using the appropriate type. If unspecified, the model will use the regular speech type. The current speech type will be used until the next speech type is specified. """ ) with gr.Row(): gr.Markdown( """ **Example Input:** {Regular} Hello, I'd like to order a sandwich please. {Surprised} What do you mean you're out of bread? {Sad} I really wanted a sandwich though... {Angry} You know what, darn you and your little shop! {Whisper} I'll just go back home and cry now. {Shouting} Why me?! """ ) gr.Markdown( """ **Example Input 2:** {Speaker1_Happy} Привет, я хочу заказать пиццу. {Speaker2_Regular} Sorry, we're out of pepperoni. {Speaker1_Sad} Я так хотел пиццу... {Speaker2_Whisper} I'll give you the last one I was hiding. """ ) gr.Markdown( "Upload different audio clips for each speech type. The first speech type is mandatory. You can add additional speech types by clicking the 'Add Speech Type' button." ) with gr.Row() as regular_row: with gr.Column(): regular_name = gr.Textbox(value="Regular", label="Speech Type Name") regular_insert = gr.Button("Insert Label", variant="secondary") regular_audio = gr.Audio(label="Regular Reference Audio", type="filepath") regular_ref_text = gr.Textbox(label="Reference Text (Regular)", lines=2) regular_language = gr.Dropdown(choices=["ru", "en", "zh"], label="Language", value="en") max_speech_types = 100 speech_type_rows = [regular_row] speech_type_names = [regular_name] speech_type_audios = [regular_audio] speech_type_ref_texts = [regular_ref_text] speech_type_languages = [regular_language] speech_type_delete_btns = [None] speech_type_insert_btns = [regular_insert] for i in range(max_speech_types - 1): with gr.Row(visible=False) as row: with gr.Column(): name_input = gr.Textbox(label="Speech Type Name") delete_btn = gr.Button("Delete Type", variant="secondary") insert_btn = gr.Button("Insert Label", variant="secondary") audio_input = gr.Audio(label="Reference Audio", type="filepath") ref_text_input = gr.Textbox(label="Reference Text", lines=2) language_input = gr.Dropdown(choices=["ru", "en", "zh"], label="Language", value="en") speech_type_rows.append(row) speech_type_names.append(name_input) speech_type_audios.append(audio_input) speech_type_ref_texts.append(ref_text_input) speech_type_languages.append(language_input) speech_type_delete_btns.append(delete_btn) speech_type_insert_btns.append(insert_btn) add_speech_type_btn = gr.Button("Add Speech Type") speech_type_count = 1 def add_speech_type_fn(): row_updates = [gr.update() for _ in range(max_speech_types)] global speech_type_count if speech_type_count < max_speech_types: row_updates[speech_type_count] = gr.update(visible=True) speech_type_count += 1 else: gr.Warning("Exhausted maximum number of speech types. Consider restart the app.") return row_updates add_speech_type_btn.click(add_speech_type_fn, outputs=speech_type_rows) def delete_speech_type_fn(): return gr.update(visible=False), None, None, None, None for i in range(1, len(speech_type_delete_btns)): speech_type_delete_btns[i].click( delete_speech_type_fn, outputs=[speech_type_rows[i], speech_type_names[i], speech_type_audios[i], speech_type_ref_texts[i], speech_type_languages[i]], ) gen_text_input_multistyle = gr.Textbox( label="Text to Generate", lines=10, placeholder="Enter the script with speaker names (or emotion types) at the start of each block, e.g.:\n\n{Regular} Привет, я хочу заказать пиццу.\n{Surprised} What, no pepperoni?\n{Sad} Я так хотел пиццу...\n{Angry} Ты что, шутишь?\n{Whisper} Ладно, я уйду.\n{Shouting} Почему так всегда?!", ) def make_insert_speech_type_fn(index): def insert_speech_type_fn(current_text, speech_type_name): current_text = current_text or "" speech_type_name = speech_type_name or "None" updated_text = current_text + f"{{{speech_type_name}}} " return updated_text return insert_speech_type_fn for i, insert_btn in enumerate(speech_type_insert_btns): insert_fn = make_insert_speech_type_fn(i) insert_btn.click( insert_fn, inputs=[gen_text_input_multistyle, speech_type_names[i]], outputs=gen_text_input_multistyle, ) with gr.Accordion("Advanced Settings", open=False): remove_silence_multistyle = gr.Checkbox(label="Remove Silences", value=True) generate_multistyle_btn = gr.Button("Generate Multi-Style Speech", variant="primary") audio_output_multistyle = gr.Audio(label="Synthesized Audio") @gpu_decorator def generate_multistyle_speech(gen_text, *args): speech_type_names_list = args[:max_speech_types] speech_type_audios_list = args[max_speech_types : 2 * max_speech_types] speech_type_ref_texts_list = args[2 * max_speech_types : 3 * max_speech_types] speech_type_languages_list = args[3 * max_speech_types : 4 * max_speech_types] remove_silence = args[4 * max_speech_types] speech_types = OrderedDict() ref_text_idx = 0 for name_input, audio_input, ref_text_input, language_input in zip( speech_type_names_list, speech_type_audios_list, speech_type_ref_texts_list, speech_type_languages_list ): if name_input and audio_input: speech_types[name_input] = {"audio": audio_input, "ref_text": ref_text_input, "language": language_input} else: speech_types[f"@{ref_text_idx}@"] = {"audio": "", "ref_text": "", "language": "en"} ref_text_idx += 1 segments = parse_speechtypes_text(gen_text) generated_audio_segments = [] current_style = "Regular" for segment in segments: style = segment["style"] text = segment["text"] if style in speech_types: current_style = style else: gr.Warning(f"Type {style} is not available, will use Regular as default.") current_style = "Regular" try: ref_audio = speech_types[current_style]["audio"] language = speech_types[current_style]["language"] except KeyError: gr.Warning(f"Please provide reference audio for type {current_style}.") return [None] + [speech_types[style]["ref_text"] for style in speech_types] ref_text = speech_types[current_style].get("ref_text", "") audio_out, _, ref_text_out = infer( ref_audio, ref_text, text, tts_model_choice, remove_silence, 0, language=language, show_info=print ) if audio_out is None: continue sr, audio_data = audio_out generated_audio_segments.append(audio_data) speech_types[current_style]["ref_text"] = ref_text_out if generated_audio_segments: final_audio_data = np.concatenate(generated_audio_segments) return [(sr, final_audio_data)] + [speech_types[style]["ref_text"] for style in speech_types] else: gr.Warning("No audio generated.") return [None] + [speech_types[style]["ref_text"] for style in speech_types] generate_multistyle_btn.click( generate_multistyle_speech, inputs=[ gen_text_input_multistyle, ] + speech_type_names + speech_type_audios + speech_type_ref_texts + speech_type_languages + [remove_silence_multistyle], outputs=[audio_output_multistyle] + speech_type_ref_texts, ) def validate_speech_types(gen_text, regular_name, *args): speech_type_names_list = args[:max_speech_types] speech_types_available = set() if regular_name: speech_types_available.add(regular_name) for name_input in speech_type_names_list: if name_input: speech_types_available.add(name_input) segments = parse_speechtypes_text(gen_text) speech_types_in_text = set(segment["style"] for segment in segments) missing_speech_types = speech_types_in_text - speech_types_available if missing_speech_types: return gr.update(interactive=False) else: return gr.update(interactive=True) gen_text_input_multistyle.change( validate_speech_types, inputs=[gen_text_input_multistyle, regular_name] + speech_type_names, outputs=generate_multistyle_btn, ) with gr.Blocks() as app_chat: gr.Markdown( """ # Voice Chat Have a conversation with an AI using your reference voice! 1. Upload a reference audio clip and optionally its transcript. 2. Load the chat model. 3. Record your message through your microphone. 4. The AI will respond using the reference voice. """ ) chat_model_name_list = [ "Qwen/Qwen2.5-3B-Instruct", "microsoft/Phi-4-mini-instruct", ] @gpu_decorator def load_chat_model(chat_model_name): show_info = gr.Info global chat_model_state, chat_tokenizer_state if chat_model_state is not None: chat_model_state = None chat_tokenizer_state = None gc.collect() torch.cuda.empty_cache() show_info(f"Loading chat model: {chat_model_name}") chat_model_state = AutoModelForCausalLM.from_pretrained(chat_model_name, torch_dtype="auto", device_map="auto") chat_tokenizer_state = AutoTokenizer.from_pretrained(chat_model_name) show_info(f"Chat model {chat_model_name} loaded successfully!") return gr.update(visible=False), gr.update(visible=True) if USING_SPACES: load_chat_model(chat_model_name_list[0]) chat_model_name_input = gr.Dropdown( choices=chat_model_name_list, value=chat_model_name_list[0], label="Chat Model Name", info="Enter the name of a HuggingFace chat model", allow_custom_value=not USING_SPACES, ) load_chat_model_btn = gr.Button("Load Chat Model", variant="primary", visible=not USING_SPACES) chat_interface_container = gr.Column(visible=USING_SPACES) chat_model_name_input.change( lambda: gr.update(visible=True), None, load_chat_model_btn, show_progress="hidden", ) load_chat_model_btn.click( load_chat_model, inputs=[chat_model_name_input], outputs=[load_chat_model_btn, chat_interface_container] ) with chat_interface_container: with gr.Row(): with gr.Column(): ref_audio_chat = gr.Audio(label="Reference Audio", type="filepath") language_chat = gr.Dropdown(choices=["ru", "en", "zh"], label="Language", value="en") with gr.Column(): with gr.Accordion("Advanced Settings", open=False): remove_silence_chat = gr.Checkbox(label="Remove Silences", value=True) ref_text_chat = gr.Textbox( label="Reference Text", info="Optional: Leave blank to auto-transcribe", lines=2, ) system_prompt_chat = gr.Textbox( label="System Prompt", value="You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.", lines=2, ) chatbot_interface = gr.Chatbot(label="Conversation") with gr.Row(): with gr.Column(): audio_input_chat = gr.Microphone(label="Speak your message", type="filepath") audio_output_chat = gr.Audio(autoplay=True) with gr.Column(): text_input_chat = gr.Textbox(label="Type your message", lines=1) send_btn_chat = gr.Button("Send Message") clear_btn_chat = gr.Button("Clear Conversation") conversation_state = gr.State( value=[ { "role": "system", "content": "You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.", } ] ) @gpu_decorator def process_audio_input(audio_path, text, history, conv_state, language): if not audio_path and not text.strip(): return history, conv_state, "" if audio_path: text = preprocess_ref_audio_text(audio_path, text)[1] if not text.strip(): return history, conv_state, "" conv_state.append({"role": "user", "content": text}) history.append((text, None)) response = generate_response(conv_state, chat_model_state, chat_tokenizer_state) conv_state.append({"role": "assistant", "content": response}) history[-1] = (text, response) return history, conv_state, "" @gpu_decorator def generate_audio_response(history, ref_audio, ref_text, remove_silence, language): if not history or not ref_audio: return None last_user_message, last_ai_response = history[-1] if not last_ai_response: return None audio_result, _, ref_text_out = infer( ref_audio, ref_text, last_ai_response, tts_model_choice, remove_silence, cross_fade_duration=0.15, speed=1.0, language=language, show_info=print, ) return audio_result, ref_text_out def clear_conversation(): return [], [ { "role": "system", "content": "You are not an AI assistant, you are whoever the user says you are. You must stay in character. Keep your responses concise since they will be spoken out loud.", } ] def update_system_prompt(new_prompt): new_conv_state = [{"role": "system", "content": new_prompt}] return [], new_conv_state audio_input_chat.stop_recording( process_audio_input, inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state, language_chat], outputs=[chatbot_interface, conversation_state], ).then( generate_audio_response, inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, remove_silence_chat, language_chat], outputs=[audio_output_chat, ref_text_chat], ).then( lambda: None, None, audio_input_chat, ) text_input_chat.submit( process_audio_input, inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state, language_chat], outputs=[chatbot_interface, conversation_state], ).then( generate_audio_response, inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, remove_silence_chat, language_chat], outputs=[audio_output_chat, ref_text_chat], ).then( lambda: None, None, text_input_chat, ) send_btn_chat.click( process_audio_input, inputs=[audio_input_chat, text_input_chat, chatbot_interface, conversation_state, language_chat], outputs=[chatbot_interface, conversation_state], ).then( generate_audio_response, inputs=[chatbot_interface, ref_audio_chat, ref_text_chat, remove_silence_chat, language_chat], outputs=[audio_output_chat, ref_text_chat], ).then( lambda: None, None, text_input_chat, ) clear_btn_chat.click( clear_conversation, outputs=[chatbot_interface, conversation_state], ) system_prompt_chat.change( update_system_prompt, inputs=system_prompt_chat, outputs=[chatbot_interface, conversation_state], ) with gr.Blocks() as app: gr.Markdown( f""" # E2/F5 TTS with Russian Support This is {"a local web UI for [F5 TTS](https://github.com/SWivid/F5-TTS)" if not USING_SPACES else "an online demo for [F5-TTS](https://github.com/SWivid/F5-TTS)"} with advanced batch processing support. This app supports the following TTS models: * [F5-TTS](https://arxiv.org/abs/2410.06885) (A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching) for English and Chinese * [E2 TTS](https://arxiv.org/abs/2406.18009) (Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS) for English and Chinese * [F5-TTS_RUSSIAN](https://huggingface.co/Misha24-10/F5-TTS_RUSSIAN) by [Misha24-10](https://huggingface.co/Misha24-10) for Russian The checkpoints support English, Chinese, and Russian (via F5-TTS_RUSSIAN, licensed under CC-BY-NC-SA-4.0). If you're having issues, try converting your reference audio to WAV or MP3, clipping it to 12s with ✂ in the bottom right corner (otherwise might have non-optimal auto-trimmed result). **NOTE: Reference text will be automatically transcribed with Whisper if not provided. For best results, keep your reference clips short (<12s). Ensure the audio is fully uploaded before generating.** """ ) last_used_custom = files("f5_tts").joinpath("infer/.cache/last_used_custom_model_info_v1.txt") def load_last_used_custom(): try: custom = [] with open(last_used_custom, "r", encoding="utf-8") as f: for line in f: custom.append(line.strip()) return custom except FileNotFoundError: last_used_custom.parent.mkdir(parents=True, exist_ok=True) return DEFAULT_TTS_MODEL_CFG def switch_tts_model(new_choice): global tts_model_choice if new_choice == "Custom": custom_ckpt_path, custom_vocab_path, custom_model_cfg = load_last_used_custom() tts_model_choice = ["Custom", custom_ckpt_path, custom_vocab_path, json.loads(custom_model_cfg)] return ( gr.update(visible=True, value=custom_ckpt_path), gr.update(visible=True, value=custom_vocab_path), gr.update(visible=True, value=custom_model_cfg), ) else: tts_model_choice = new_choice return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) def set_custom_model(custom_ckpt_path, custom_vocab_path, custom_model_cfg): global tts_model_choice tts_model_choice = ["Custom", custom_ckpt_path, custom_vocab_path, json.loads(custom_model_cfg)] with open(last_used_custom, "w", encoding="utf-8") as f: f.write(custom_ckpt_path + "\n" + custom_vocab_path + "\n" + custom_model_cfg + "\n") with gr.Row(): if not USING_SPACES: choose_tts_model = gr.Radio( choices=[DEFAULT_TTS_MODEL, "E2-TTS", "Custom"], label="Choose TTS Model", value=DEFAULT_TTS_MODEL ) else: choose_tts_model = gr.Radio( choices=[DEFAULT_TTS_MODEL, "E2-TTS"], label="Choose TTS Model", value=DEFAULT_TTS_MODEL ) custom_ckpt_path = gr.Dropdown( choices=[DEFAULT_TTS_MODEL_CFG[0]], value=load_last_used_custom()[0], allow_custom_value=True, label="Model: local_path | hf://user_id/repo_id/model_ckpt", visible=False, ) custom_vocab_path = gr.Dropdown( choices=[DEFAULT_TTS_MODEL_CFG[1]], value=load_last_used_custom()[1], allow_custom_value=True, label="Vocab: local_path | hf://user_id/repo_id/vocab_file", visible=False, ) custom_model_cfg = gr.Dropdown( choices=[ DEFAULT_TTS_MODEL_CFG[2], json.dumps( dict( dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, text_mask_padding=False, conv_layers=4, pe_attn_head=1, ) ), json.dumps( dict( dim=768, depth=18, heads=12, ff_mult=2, text_dim=512, text_mask_padding=False, conv_layers=4, pe_attn_head=1, ) ), ], value=load_last_used_custom()[2], allow_custom_value=True, label="Config: in a dictionary form", visible=False, ) choose_tts_model.change( switch_tts_model, inputs=[choose_tts_model], outputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg], show_progress="hidden", ) custom_ckpt_path.change( set_custom_model, inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg], show_progress="hidden", ) custom_vocab_path.change( set_custom_model, inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg], show_progress="hidden", ) custom_model_cfg.change( set_custom_model, inputs=[custom_ckpt_path, custom_vocab_path, custom_model_cfg], show_progress="hidden", ) gr.TabbedInterface( [app_tts, app_multistyle, app_chat, app_credits], ["Basic-TTS", "Multi-Speech", "Voice-Chat", "Credits"], ) @click.command() @click.option("--port", "-p", default=None, type=int, help="Port to run the app on") @click.option("--host", "-H", default=None, help="Host to run the app on") @click.option("--share", "-s", default=False, is_flag=True, help="Share the app via Gradio share link") @click.option("--api", "-a", default=True, is_flag=True, help="Allow API access") @click.option( "--root_path", "-r", default=None, type=str, help='The root path (or "mount point") of the application, if it\'s not served from the root ("/") of the domain.', ) @click.option( "--inbrowser", "-i", is_flag=True, default=False, help="Automatically launch the interface in the default web browser", ) def main(port, host, share, api, root_path, inbrowser): global app print("Starting app...") app.queue(api_open=api).launch( server_name=host, server_port=port, share=share, show_api=api, root_path=root_path, inbrowser=inbrowser, ) if __name__ == "__main__": if not USING_SPACES: main() else: app.queue().launch()