YatharthS commited on
Commit
f239f3d
·
verified ·
1 Parent(s): c4c31b4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import subprocess
4
+
5
+ # 1. Clone the repo
6
+ if not os.path.exists("LuxTTS"):
7
+ subprocess.run(["git", "clone", "https://github.com/ysharma3501/LuxTTS.git"])
8
+
9
+ # 2. Install requirements from the cloned folder
10
+ # We use sys.executable to ensure it installs to the Space's current environment
11
+ subprocess.run([sys.executable, "-m", "pip", "install", "-r", "LuxTTS/requirements.txt"])
12
+
13
+ # 3. Add the folder to the Python path
14
+ # This allows 'from zipvoice.luxtts import LuxTTS' to work
15
+ sys.path.append(os.path.abspath("LuxTTS"))
16
+
17
+ import gradio as gr
18
+ import torch
19
+ from zipvoice.luxtts import LuxTTS
20
+
21
+ # Standard setup continues...
22
+ device = "cuda" if torch.cuda.is_available() else "cpu"
23
+ lux_tts = LuxTTS('YatharthS/LuxTTS', device=device, threads=2)
24
+
25
+ def infer(text, audio_prompt, rms, t_shift, num_steps, speed, return_smooth):
26
+ if audio_prompt is None or not text:
27
+ return None
28
+
29
+ encoded_prompt = lux_tts.encode_prompt(audio_prompt, rms=rms)
30
+ final_wav = lux_tts.generate_speech(
31
+ text,
32
+ encoded_prompt,
33
+ num_steps=int(num_steps),
34
+ t_shift=t_shift,
35
+ speed=speed,
36
+ return_smooth=return_smooth
37
+ )
38
+ return (48000, final_wav)
39
+
40
+ # Gradio UI
41
+ with gr.Blocks() as demo:
42
+ gr.Markdown("# LuxTTS (Automatic Setup)")
43
+ with gr.Row():
44
+ with gr.Column():
45
+ input_text = gr.Textbox(label="Text", value="Hello there!")
46
+ input_audio = gr.Audio(label="Reference Voice", type="filepath")
47
+ with gr.Row():
48
+ rms_val = gr.Number(value=0.01, label="RMS")
49
+ t_shift_val = gr.Number(value=0.9, label="T-Shift")
50
+ steps_val = gr.Slider(1, 10, value=4, step=1, label="Steps")
51
+ btn = gr.Button("Synthesize")
52
+ with gr.Column():
53
+ audio_out = gr.Audio(label="Output")
54
+
55
+ btn.click(infer, [input_text, input_audio, rms_val, t_shift_val, steps_val], audio_out)
56
+
57
+ demo.launch()