CryptoCreeper commited on
Commit
e9b6534
·
verified ·
1 Parent(s): e62c3bb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import soundfile as sf
4
+ from qwen_tts import Qwen3TTSModel
5
+ from langdetect import detect
6
+ import os
7
+
8
+ # Load model - optimized for BF16 to save memory
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ model_id = "Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice"
11
+
12
+ print(f"Loading model to {device}...")
13
+ model = Qwen3TTSModel.from_pretrained(
14
+ model_id,
15
+ device_map=device,
16
+ torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32
17
+ )
18
+
19
+ def smart_tts(text, voice, instructions, auto_detect):
20
+ try:
21
+ # Smart Language Detection
22
+ lang_map = {
23
+ 'zh': 'Chinese', 'en': 'English', 'jp': 'Japanese',
24
+ 'ko': 'Korean', 'de': 'German', 'fr': 'French',
25
+ 'ru': 'Russian', 'pt': 'Portuguese', 'es': 'Spanish', 'it': 'Italian'
26
+ }
27
+
28
+ detected_lang = "English" # Default
29
+ if auto_detect:
30
+ try:
31
+ raw_lang = detect(text).split('-')[0]
32
+ detected_lang = lang_map.get(raw_lang, "English")
33
+ except:
34
+ pass
35
+
36
+ # Generate Audio
37
+ # The CustomVoice model uses instructions for style/emotion
38
+ wavs, sr = model.generate_custom_voice(
39
+ language=detected_lang,
40
+ speaker=voice,
41
+ instruct=instructions,
42
+ text=text
43
+ )
44
+
45
+ output_path = "output.wav"
46
+ sf.write(output_path, wavs[0], sr)
47
+ return output_path, f"Detected Language: {detected_lang}"
48
+
49
+ except Exception as e:
50
+ return None, str(e)
51
+
52
+ # UI Layout
53
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
54
+ gr.Markdown(f"# 🗣️ Qwen3-TTS Smart Studio")
55
+ gr.Markdown("Experience natural speech with style control using Qwen3-TTS-12Hz.")
56
+
57
+ with gr.Row():
58
+ with gr.Column():
59
+ input_text = gr.Textbox(
60
+ label="Input Text",
61
+ placeholder="Type something here...",
62
+ lines=4
63
+ )
64
+
65
+ with gr.Row():
66
+ voice_select = gr.Dropdown(
67
+ choices=["Vivian", "Ryan", "Bella", "Daisy", "George"],
68
+ value="Vivian",
69
+ label="Speaker"
70
+ )
71
+ auto_lang = gr.Checkbox(label="Auto-detect Language", value=True)
72
+
73
+ style_instruct = gr.Textbox(
74
+ label="Style Instruction (e.g., 'Speak with a happy tone')",
75
+ placeholder="Angry, Sad, Excited, Whisper...",
76
+ value="Speak naturally"
77
+ )
78
+
79
+ generate_btn = gr.Button("Generate Speech", variant="primary")
80
+
81
+ with gr.Column():
82
+ audio_output = gr.Audio(label="Generated Audio", type="filepath")
83
+ status_info = gr.Label(label="System Status")
84
+
85
+ generate_btn.click(
86
+ fn=smart_tts,
87
+ inputs=[input_text, voice_select, style_instruct, auto_lang],
88
+ outputs=[audio_output, status_info]
89
+ )
90
+
91
+ if __name__ == "__main__":
92
+ demo.launch()