Spaces:
Running
Running
CryptoCreeper commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,46 +11,44 @@ import os
|
|
| 11 |
|
| 12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 13 |
|
| 14 |
-
# --- Chat ---
|
| 15 |
chat_models = {
|
| 16 |
"Normal": "Qwen/Qwen3-0.6B",
|
| 17 |
"Thinking": "Qwen/Qwen2.5-1.5B-Instruct"
|
| 18 |
}
|
| 19 |
loaded_chat_models = {}
|
| 20 |
loaded_chat_tokenizers = {}
|
| 21 |
-
chat_status_label = None
|
| 22 |
chat_model_loaded = {}
|
| 23 |
|
| 24 |
def load_chat_model(mode):
|
| 25 |
-
global chat_status_label
|
| 26 |
model_id = chat_models[mode]
|
| 27 |
if model_id not in loaded_chat_models:
|
| 28 |
-
|
| 29 |
-
chat_status_label.update("π‘ Model Loading...")
|
| 30 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 31 |
model = AutoModelForCausalLM.from_pretrained(
|
| 32 |
model_id,
|
| 33 |
-
torch_dtype="
|
| 34 |
device_map="auto"
|
| 35 |
)
|
| 36 |
loaded_chat_models[model_id] = model
|
| 37 |
loaded_chat_tokenizers[model_id] = tokenizer
|
| 38 |
chat_model_loaded[model_id] = True
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
return "β
Chat model loaded"
|
| 42 |
|
| 43 |
def chat_logic(user_input, mode):
|
| 44 |
model_id = chat_models[mode]
|
| 45 |
if model_id not in chat_model_loaded:
|
| 46 |
-
return "β Model Not Loaded"
|
|
|
|
| 47 |
model, tokenizer = loaded_chat_models[model_id], loaded_chat_tokenizers[model_id]
|
| 48 |
messages = [{"role": "user", "content": user_input}]
|
| 49 |
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 50 |
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
|
|
|
| 51 |
generated_ids = model.generate(**model_inputs, max_new_tokens=1024)
|
| 52 |
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)]
|
| 53 |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
|
|
|
| 54 |
cleaned_response = re.sub(r'<think>.*?</think>\s*\n?', '', response, flags=re.DOTALL)
|
| 55 |
return cleaned_response.strip()
|
| 56 |
|
|
@@ -62,21 +60,16 @@ def clear_chat_model(password):
|
|
| 62 |
del loaded_chat_tokenizers[model_id]
|
| 63 |
chat_model_loaded.pop(model_id, None)
|
| 64 |
torch.cuda.empty_cache()
|
| 65 |
-
|
| 66 |
-
chat_status_label.update("π΄ Model Not Loaded")
|
| 67 |
-
return "β
Chat model cleared from RAM"
|
| 68 |
|
| 69 |
-
# --- Image ---
|
| 70 |
image_model_id = "stabilityai/sdxl-turbo"
|
| 71 |
image_pipe = None
|
| 72 |
-
image_status_label = None
|
| 73 |
image_model_loaded = False
|
| 74 |
|
| 75 |
def load_image_model():
|
| 76 |
global image_pipe, image_model_loaded
|
| 77 |
if image_pipe is None:
|
| 78 |
-
|
| 79 |
-
image_status_label.update("π‘ Model Loading...")
|
| 80 |
pipe = DiffusionPipeline.from_pretrained(
|
| 81 |
image_model_id,
|
| 82 |
torch_dtype=torch.float16 if device == "cuda" else torch.float32
|
|
@@ -84,25 +77,27 @@ def load_image_model():
|
|
| 84 |
pipe.to(device)
|
| 85 |
image_pipe = pipe
|
| 86 |
image_model_loaded = True
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
return "β
Image model loaded"
|
| 90 |
|
| 91 |
def image_logic(prompt, width, height, steps):
|
| 92 |
if not image_model_loaded or image_pipe is None:
|
| 93 |
-
|
|
|
|
|
|
|
| 94 |
start_time = time.time()
|
| 95 |
final_prompt = f"{prompt}, centered and realistic (if applicable)"
|
| 96 |
yield "π₯ IGNITING... (Image generator AI)...", None
|
|
|
|
| 97 |
image = image_pipe(
|
| 98 |
prompt=final_prompt,
|
| 99 |
width=int(width),
|
| 100 |
height=int(height),
|
| 101 |
num_inference_steps=int(steps),
|
| 102 |
-
guidance_scale=
|
| 103 |
-
lcm_origin_steps=50,
|
| 104 |
output_type="pil"
|
| 105 |
).images[0]
|
|
|
|
| 106 |
duration = round(time.time() - start_time, 2)
|
| 107 |
yield f"π₯ EXPLODED in {duration}s", image
|
| 108 |
|
|
@@ -115,31 +110,25 @@ def clear_image_model(password):
|
|
| 115 |
image_pipe = None
|
| 116 |
image_model_loaded = False
|
| 117 |
torch.cuda.empty_cache()
|
| 118 |
-
|
| 119 |
-
image_status_label.update("π΄ Model Not Loaded")
|
| 120 |
-
return "β
Image model cleared from RAM"
|
| 121 |
|
| 122 |
-
# --- TTS ---
|
| 123 |
tts_model_id = "Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice"
|
| 124 |
SUPPORTED_VOICES = ['aiden', 'dylan', 'eric', 'ono_anna', 'ryan', 'serena', 'sohee', 'uncle_fu', 'vivian']
|
| 125 |
tts_model = None
|
| 126 |
-
tts_status_label = None
|
| 127 |
tts_model_loaded = False
|
| 128 |
|
| 129 |
def load_tts_model():
|
| 130 |
global tts_model, tts_model_loaded
|
| 131 |
if tts_model is None:
|
| 132 |
-
|
| 133 |
-
tts_status_label.update("π‘ Model Loading...")
|
| 134 |
tts_model = Qwen3TTSModel.from_pretrained(
|
| 135 |
tts_model_id,
|
| 136 |
device_map=device,
|
| 137 |
torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32
|
| 138 |
)
|
| 139 |
tts_model_loaded = True
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
return "β
TTS model loaded"
|
| 143 |
|
| 144 |
def tts_logic(text, voice, instructions, auto_detect):
|
| 145 |
if not tts_model_loaded or tts_model is None:
|
|
@@ -157,6 +146,7 @@ def tts_logic(text, voice, instructions, auto_detect):
|
|
| 157 |
detected_lang = lang_map.get(raw_lang, "English")
|
| 158 |
except:
|
| 159 |
pass
|
|
|
|
| 160 |
wavs, sr = tts_model.generate_custom_voice(
|
| 161 |
language=detected_lang,
|
| 162 |
speaker=voice,
|
|
@@ -178,11 +168,8 @@ def clear_tts_model(password):
|
|
| 178 |
tts_model = None
|
| 179 |
tts_model_loaded = False
|
| 180 |
torch.cuda.empty_cache()
|
| 181 |
-
|
| 182 |
-
tts_status_label.update("π΄ Model Not Loaded")
|
| 183 |
-
return "β
TTS model cleared from RAM"
|
| 184 |
|
| 185 |
-
# --- UI ---
|
| 186 |
creeper_css = """
|
| 187 |
body { background-color: #000000; }
|
| 188 |
.gradio-container { background-color: #1e1e1e; border: 10px solid #2e8b57 !important; font-family: 'Courier New', Courier, monospace; color: #00ff00; }
|
|
@@ -199,23 +186,23 @@ with gr.Blocks(css=creeper_css, title="CREEPER AI HUB") as demo:
|
|
| 199 |
gr.Markdown("# π© CREEPER AI HUB π©")
|
| 200 |
|
| 201 |
with gr.Tabs():
|
| 202 |
-
# --- Chat ---
|
| 203 |
with gr.TabItem("SSSSS-CHAT"):
|
| 204 |
gr.Markdown("### Qwen Chat System")
|
| 205 |
chat_status_label = gr.Label("π΄ Model Not Loaded", label="Status")
|
| 206 |
with gr.Row():
|
| 207 |
mode_radio = gr.Radio(choices=["Normal", "Thinking"], value="Normal", label="Select Brain Mode")
|
| 208 |
load_chat_btn = gr.Button("Load Chat Model")
|
| 209 |
-
|
|
|
|
| 210 |
with gr.Column():
|
| 211 |
chat_input = gr.Textbox(lines=4, placeholder="Ssssss... Talk to the Creeper...", label="Message")
|
| 212 |
chat_output = gr.Textbox(label="Creeper Says")
|
| 213 |
chat_btn = gr.Button("EXPLODE TEXT", variant="primary")
|
|
|
|
| 214 |
load_chat_btn.click(fn=load_chat_model, inputs=mode_radio, outputs=chat_status_label)
|
| 215 |
chat_btn.click(fn=chat_logic, inputs=[chat_input, mode_radio], outputs=chat_output)
|
| 216 |
-
clear_chat_btn.click(fn=clear_chat_model, inputs=
|
| 217 |
|
| 218 |
-
# --- Image ---
|
| 219 |
with gr.TabItem("TNT-IMAGE"):
|
| 220 |
gr.Markdown("### Image Generator System")
|
| 221 |
image_status_label = gr.Label("π΄ Model Not Loaded", label="Status")
|
|
@@ -223,38 +210,41 @@ with gr.Blocks(css=creeper_css, title="CREEPER AI HUB") as demo:
|
|
| 223 |
with gr.Column(scale=1):
|
| 224 |
img_prompt = gr.Textbox(label="Visual Idea", placeholder="A pixelated forest...", lines=3)
|
| 225 |
with gr.Row():
|
| 226 |
-
w_slider = gr.Slider(256,
|
| 227 |
-
h_slider = gr.Slider(256,
|
| 228 |
-
s_slider = gr.Slider(
|
| 229 |
load_image_btn = gr.Button("Load Image Model")
|
| 230 |
img_btn = gr.Button("EXPLODE IMAGE", variant="primary")
|
| 231 |
-
|
|
|
|
| 232 |
with gr.Column(scale=1):
|
| 233 |
img_output = gr.Image(label="Rendered Loot")
|
|
|
|
| 234 |
load_image_btn.click(fn=load_image_model, inputs=[], outputs=image_status_label)
|
| 235 |
img_btn.click(fn=image_logic, inputs=[img_prompt, w_slider, h_slider, s_slider], outputs=[image_status_label, img_output])
|
| 236 |
-
clear_image_btn.click(fn=clear_image_model, inputs=
|
| 237 |
|
| 238 |
-
# --- TTS ---
|
| 239 |
with gr.TabItem("NOTE-BLOCK (TTS)"):
|
| 240 |
gr.Markdown("### Smart Audio Studio")
|
| 241 |
tts_status_label = gr.Label("π΄ Model Not Loaded", label="Status")
|
| 242 |
with gr.Row():
|
| 243 |
with gr.Column():
|
| 244 |
-
tts_input = gr.Textbox(label="Text to Speak", placeholder="Enter text
|
| 245 |
with gr.Row():
|
| 246 |
voice_select = gr.Dropdown(choices=SUPPORTED_VOICES, value="vivian", label="Select Speaker")
|
| 247 |
auto_lang = gr.Checkbox(label="Auto-detect Language", value=True)
|
| 248 |
style_instruct = gr.Textbox(label="Style Instruction", value="Speak naturally")
|
| 249 |
load_tts_btn = gr.Button("Load TTS Model")
|
| 250 |
tts_btn = gr.Button("EXPLODE AUDIO", variant="primary")
|
| 251 |
-
|
|
|
|
| 252 |
with gr.Column():
|
| 253 |
audio_output = gr.Audio(label="Audio Output", type="filepath")
|
| 254 |
status_info = gr.Label(label="Block Metadata")
|
|
|
|
| 255 |
load_tts_btn.click(fn=load_tts_model, inputs=[], outputs=tts_status_label)
|
| 256 |
tts_btn.click(fn=tts_logic, inputs=[tts_input, voice_select, style_instruct, auto_lang], outputs=[audio_output, status_info])
|
| 257 |
-
clear_tts_btn.click(fn=clear_tts_model, inputs=
|
| 258 |
|
| 259 |
if __name__ == "__main__":
|
| 260 |
-
demo.launch()
|
|
|
|
| 11 |
|
| 12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 13 |
|
|
|
|
| 14 |
chat_models = {
|
| 15 |
"Normal": "Qwen/Qwen3-0.6B",
|
| 16 |
"Thinking": "Qwen/Qwen2.5-1.5B-Instruct"
|
| 17 |
}
|
| 18 |
loaded_chat_models = {}
|
| 19 |
loaded_chat_tokenizers = {}
|
|
|
|
| 20 |
chat_model_loaded = {}
|
| 21 |
|
| 22 |
def load_chat_model(mode):
|
|
|
|
| 23 |
model_id = chat_models[mode]
|
| 24 |
if model_id not in loaded_chat_models:
|
| 25 |
+
gr.Info(f"π‘ Loading {mode} Brain...")
|
|
|
|
| 26 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 27 |
model = AutoModelForCausalLM.from_pretrained(
|
| 28 |
model_id,
|
| 29 |
+
torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
|
| 30 |
device_map="auto"
|
| 31 |
)
|
| 32 |
loaded_chat_models[model_id] = model
|
| 33 |
loaded_chat_tokenizers[model_id] = tokenizer
|
| 34 |
chat_model_loaded[model_id] = True
|
| 35 |
+
return "π’ Model Loaded"
|
| 36 |
+
return "π’ Model Already Loaded"
|
|
|
|
| 37 |
|
| 38 |
def chat_logic(user_input, mode):
|
| 39 |
model_id = chat_models[mode]
|
| 40 |
if model_id not in chat_model_loaded:
|
| 41 |
+
return "β Model Not Loaded. Click 'Load Chat Model' first!"
|
| 42 |
+
|
| 43 |
model, tokenizer = loaded_chat_models[model_id], loaded_chat_tokenizers[model_id]
|
| 44 |
messages = [{"role": "user", "content": user_input}]
|
| 45 |
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 46 |
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
| 47 |
+
|
| 48 |
generated_ids = model.generate(**model_inputs, max_new_tokens=1024)
|
| 49 |
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)]
|
| 50 |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 51 |
+
|
| 52 |
cleaned_response = re.sub(r'<think>.*?</think>\s*\n?', '', response, flags=re.DOTALL)
|
| 53 |
return cleaned_response.strip()
|
| 54 |
|
|
|
|
| 60 |
del loaded_chat_tokenizers[model_id]
|
| 61 |
chat_model_loaded.pop(model_id, None)
|
| 62 |
torch.cuda.empty_cache()
|
| 63 |
+
return "π΄ Model Not Loaded"
|
|
|
|
|
|
|
| 64 |
|
|
|
|
| 65 |
image_model_id = "stabilityai/sdxl-turbo"
|
| 66 |
image_pipe = None
|
|
|
|
| 67 |
image_model_loaded = False
|
| 68 |
|
| 69 |
def load_image_model():
|
| 70 |
global image_pipe, image_model_loaded
|
| 71 |
if image_pipe is None:
|
| 72 |
+
gr.Info("π‘ Priming TNT (Loading Image Model)...")
|
|
|
|
| 73 |
pipe = DiffusionPipeline.from_pretrained(
|
| 74 |
image_model_id,
|
| 75 |
torch_dtype=torch.float16 if device == "cuda" else torch.float32
|
|
|
|
| 77 |
pipe.to(device)
|
| 78 |
image_pipe = pipe
|
| 79 |
image_model_loaded = True
|
| 80 |
+
return "π’ Model Loaded"
|
| 81 |
+
return "π’ Model Already Loaded"
|
|
|
|
| 82 |
|
| 83 |
def image_logic(prompt, width, height, steps):
|
| 84 |
if not image_model_loaded or image_pipe is None:
|
| 85 |
+
yield "β Model Not Loaded", None
|
| 86 |
+
return
|
| 87 |
+
|
| 88 |
start_time = time.time()
|
| 89 |
final_prompt = f"{prompt}, centered and realistic (if applicable)"
|
| 90 |
yield "π₯ IGNITING... (Image generator AI)...", None
|
| 91 |
+
|
| 92 |
image = image_pipe(
|
| 93 |
prompt=final_prompt,
|
| 94 |
width=int(width),
|
| 95 |
height=int(height),
|
| 96 |
num_inference_steps=int(steps),
|
| 97 |
+
guidance_scale=0.0,
|
|
|
|
| 98 |
output_type="pil"
|
| 99 |
).images[0]
|
| 100 |
+
|
| 101 |
duration = round(time.time() - start_time, 2)
|
| 102 |
yield f"π₯ EXPLODED in {duration}s", image
|
| 103 |
|
|
|
|
| 110 |
image_pipe = None
|
| 111 |
image_model_loaded = False
|
| 112 |
torch.cuda.empty_cache()
|
| 113 |
+
return "π΄ Model Not Loaded"
|
|
|
|
|
|
|
| 114 |
|
|
|
|
| 115 |
tts_model_id = "Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice"
|
| 116 |
SUPPORTED_VOICES = ['aiden', 'dylan', 'eric', 'ono_anna', 'ryan', 'serena', 'sohee', 'uncle_fu', 'vivian']
|
| 117 |
tts_model = None
|
|
|
|
| 118 |
tts_model_loaded = False
|
| 119 |
|
| 120 |
def load_tts_model():
|
| 121 |
global tts_model, tts_model_loaded
|
| 122 |
if tts_model is None:
|
| 123 |
+
gr.Info("π‘ Tuning Note-Blocks (Loading TTS)...")
|
|
|
|
| 124 |
tts_model = Qwen3TTSModel.from_pretrained(
|
| 125 |
tts_model_id,
|
| 126 |
device_map=device,
|
| 127 |
torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32
|
| 128 |
)
|
| 129 |
tts_model_loaded = True
|
| 130 |
+
return "π’ Model Loaded"
|
| 131 |
+
return "π’ Model Already Loaded"
|
|
|
|
| 132 |
|
| 133 |
def tts_logic(text, voice, instructions, auto_detect):
|
| 134 |
if not tts_model_loaded or tts_model is None:
|
|
|
|
| 146 |
detected_lang = lang_map.get(raw_lang, "English")
|
| 147 |
except:
|
| 148 |
pass
|
| 149 |
+
|
| 150 |
wavs, sr = tts_model.generate_custom_voice(
|
| 151 |
language=detected_lang,
|
| 152 |
speaker=voice,
|
|
|
|
| 168 |
tts_model = None
|
| 169 |
tts_model_loaded = False
|
| 170 |
torch.cuda.empty_cache()
|
| 171 |
+
return "π΄ Model Not Loaded"
|
|
|
|
|
|
|
| 172 |
|
|
|
|
| 173 |
creeper_css = """
|
| 174 |
body { background-color: #000000; }
|
| 175 |
.gradio-container { background-color: #1e1e1e; border: 10px solid #2e8b57 !important; font-family: 'Courier New', Courier, monospace; color: #00ff00; }
|
|
|
|
| 186 |
gr.Markdown("# π© CREEPER AI HUB π©")
|
| 187 |
|
| 188 |
with gr.Tabs():
|
|
|
|
| 189 |
with gr.TabItem("SSSSS-CHAT"):
|
| 190 |
gr.Markdown("### Qwen Chat System")
|
| 191 |
chat_status_label = gr.Label("π΄ Model Not Loaded", label="Status")
|
| 192 |
with gr.Row():
|
| 193 |
mode_radio = gr.Radio(choices=["Normal", "Thinking"], value="Normal", label="Select Brain Mode")
|
| 194 |
load_chat_btn = gr.Button("Load Chat Model")
|
| 195 |
+
chat_pw = gr.Textbox(label="Password", type="password")
|
| 196 |
+
clear_chat_btn = gr.Button("Clear Model")
|
| 197 |
with gr.Column():
|
| 198 |
chat_input = gr.Textbox(lines=4, placeholder="Ssssss... Talk to the Creeper...", label="Message")
|
| 199 |
chat_output = gr.Textbox(label="Creeper Says")
|
| 200 |
chat_btn = gr.Button("EXPLODE TEXT", variant="primary")
|
| 201 |
+
|
| 202 |
load_chat_btn.click(fn=load_chat_model, inputs=mode_radio, outputs=chat_status_label)
|
| 203 |
chat_btn.click(fn=chat_logic, inputs=[chat_input, mode_radio], outputs=chat_output)
|
| 204 |
+
clear_chat_btn.click(fn=clear_chat_model, inputs=chat_pw, outputs=chat_status_label)
|
| 205 |
|
|
|
|
| 206 |
with gr.TabItem("TNT-IMAGE"):
|
| 207 |
gr.Markdown("### Image Generator System")
|
| 208 |
image_status_label = gr.Label("π΄ Model Not Loaded", label="Status")
|
|
|
|
| 210 |
with gr.Column(scale=1):
|
| 211 |
img_prompt = gr.Textbox(label="Visual Idea", placeholder="A pixelated forest...", lines=3)
|
| 212 |
with gr.Row():
|
| 213 |
+
w_slider = gr.Slider(256, 1024, 512, step=64, label="Block Width")
|
| 214 |
+
h_slider = gr.Slider(256, 1024, 512, step=64, label="Block Height")
|
| 215 |
+
s_slider = gr.Slider(1, 10, 4, step=1, label="Detonation Steps")
|
| 216 |
load_image_btn = gr.Button("Load Image Model")
|
| 217 |
img_btn = gr.Button("EXPLODE IMAGE", variant="primary")
|
| 218 |
+
img_pw = gr.Textbox(label="Password", type="password")
|
| 219 |
+
clear_image_btn = gr.Button("Clear Model")
|
| 220 |
with gr.Column(scale=1):
|
| 221 |
img_output = gr.Image(label="Rendered Loot")
|
| 222 |
+
|
| 223 |
load_image_btn.click(fn=load_image_model, inputs=[], outputs=image_status_label)
|
| 224 |
img_btn.click(fn=image_logic, inputs=[img_prompt, w_slider, h_slider, s_slider], outputs=[image_status_label, img_output])
|
| 225 |
+
clear_image_btn.click(fn=clear_image_model, inputs=img_pw, outputs=image_status_label)
|
| 226 |
|
|
|
|
| 227 |
with gr.TabItem("NOTE-BLOCK (TTS)"):
|
| 228 |
gr.Markdown("### Smart Audio Studio")
|
| 229 |
tts_status_label = gr.Label("π΄ Model Not Loaded", label="Status")
|
| 230 |
with gr.Row():
|
| 231 |
with gr.Column():
|
| 232 |
+
tts_input = gr.Textbox(label="Text to Speak", placeholder="Enter text...", lines=4)
|
| 233 |
with gr.Row():
|
| 234 |
voice_select = gr.Dropdown(choices=SUPPORTED_VOICES, value="vivian", label="Select Speaker")
|
| 235 |
auto_lang = gr.Checkbox(label="Auto-detect Language", value=True)
|
| 236 |
style_instruct = gr.Textbox(label="Style Instruction", value="Speak naturally")
|
| 237 |
load_tts_btn = gr.Button("Load TTS Model")
|
| 238 |
tts_btn = gr.Button("EXPLODE AUDIO", variant="primary")
|
| 239 |
+
tts_pw = gr.Textbox(label="Password", type="password")
|
| 240 |
+
clear_tts_btn = gr.Button("Clear Model")
|
| 241 |
with gr.Column():
|
| 242 |
audio_output = gr.Audio(label="Audio Output", type="filepath")
|
| 243 |
status_info = gr.Label(label="Block Metadata")
|
| 244 |
+
|
| 245 |
load_tts_btn.click(fn=load_tts_model, inputs=[], outputs=tts_status_label)
|
| 246 |
tts_btn.click(fn=tts_logic, inputs=[tts_input, voice_select, style_instruct, auto_lang], outputs=[audio_output, status_info])
|
| 247 |
+
clear_tts_btn.click(fn=clear_tts_model, inputs=tts_pw, outputs=tts_status_label)
|
| 248 |
|
| 249 |
if __name__ == "__main__":
|
| 250 |
+
demo.launch()
|