simler commited on
Commit
62f3cea
·
verified ·
1 Parent(s): 160abcd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -85
app.py CHANGED
@@ -1,119 +1,113 @@
1
  import os
2
  import sys
3
- import logging
4
 
5
- # --- 1. 核弹级补丁:强行阉割 CUDA (防报错核心) ---
6
- # 这一步必须最先执行!在导入 torch 之前!
7
  os.environ["CUDA_VISIBLE_DEVICES"] = ""
8
-
9
  import torch
10
-
11
- # 欺骗 torch,告诉它没有显卡
12
  torch.cuda.is_available = lambda: False
13
  torch.cuda.device_count = lambda: 0
14
-
15
- # 欺骗 tensor,如果代码调用了 .cuda(),我们把它变成“原地不动”
16
- # 这样原来的代码写了 x.cuda() 也不会炸,而是继续在 CPU 上跑
17
- def no_op(self, *args, **kwargs):
18
- return self
19
  torch.Tensor.cuda = no_op
20
  torch.nn.Module.cuda = no_op
21
 
22
- print("💉 CUDA 阉割补丁已注入,所有 GPU 操作已被重定向至 CPU。")
23
 
24
- # --- 2. 导入原版逻辑 ---
25
- now_dir = os.getcwd()
26
- sys.path.append(now_dir)
27
 
 
28
  try:
29
- # 直接从根目录的 inference_webui.py 导入函数
30
- # 这样就能复用它原本的所有逻辑,不用我们自己写路径了
31
- from inference_webui import change_gpt_weights, change_sovits_weights, get_tts_model
32
- print("✅ 成功导入原版推理函数!")
33
- except ImportError as e:
34
- print(f"❌ 导入失败: {e}")
35
- print("请检查 Files 列表里有没有 inference_webui.py")
36
-
37
- import gradio as gr
38
- import soundfile as sf
39
- import numpy as np
40
-
41
- # --- 3. 自动寻找模型文件 ---
42
- def find_file(pattern, search_path="."):
43
- # 优先找 pretrained_models 文件夹
44
- potential_paths = [
45
- os.path.join(search_path, "pretrained_models"),
46
- os.path.join(search_path, "GPT_SoVITS/pretrained_models"),
47
- search_path
48
- ]
49
-
50
- for path in potential_paths:
51
- if os.path.exists(path):
52
- for root, dirs, files in os.walk(path):
53
- for file in files:
54
- if pattern in file:
55
- return os.path.join(root, file)
 
 
56
  return None
57
 
58
- print("🔍 正在寻找模型...")
59
- # 寻找 GPT 模型
60
- gpt_path = find_file("s1v3.ckpt")
61
- if not gpt_path: gpt_path = find_file("s1bert")
62
-
63
- # 寻找 SoVITS 模型
64
- sovits_path = find_file("s2Gv2ProPlus.pth")
65
- if not sovits_path: sovits_path = find_file("s2G")
66
 
67
- print(f"👉 GPT模型: {gpt_path}")
68
- print(f"👉 SoVITS模型: {sovits_path}")
69
 
70
  # --- 4. 加载模型 ---
71
  try:
72
  if gpt_path and sovits_path:
73
- change_gpt_weights(gpt_path=gpt_path)
74
- change_sovits_weights(sovits_path=sovits_path)
 
 
 
75
  print("✅ 模型加载完成!")
76
  else:
77
- print("❌ 没找到模型文件,请检查 Logs 里的下载记录。")
78
  except Exception as e:
79
- print(f"⚠️ 模型加载警告 (可能是内存不够): {e}")
80
 
81
- # --- 5. 推理函数 ---
82
- # 你的参考音频配置
83
- REF_AUDIO_PATH = "ref.wav"
84
- REF_TEXT = "你好" # 建议修改为 ref.wav 实际说的话,不改也行,GPT-SoVITS 容错高
 
 
 
85
  REF_LANG = "zh"
86
 
87
- def predict_worker(text):
88
- if not os.path.exists(REF_AUDIO_PATH):
89
- return None, "❌ ���误:根目录下没找到 ref.wav,请上传!"
90
-
91
- print(f"📥 收到任务: {text[:15]}...")
92
 
 
 
 
 
93
  try:
94
- # 调用原版的 get_tts_model
95
- # 这里的参数完全照搬 inference_webui.py 里的定义
96
- generator = get_tts_model(
97
- ref_wav_path=REF_AUDIO_PATH,
 
 
 
98
  prompt_text=REF_TEXT,
99
  prompt_language=REF_LANG,
100
  text=text,
101
  text_language="zh",
102
  how_to_cut="凑四句一切",
103
- top_k=5,
104
- top_p=1.0,
105
- temperature=1.0,
106
- ref_free=False
107
  )
108
 
 
109
  result_list = list(generator)
110
  if result_list:
111
- sampling_rate, audio_data = result_list[0]
112
- output_path = f"out_{os.urandom(4).hex()}.wav"
113
- sf.write(output_path, audio_data, sampling_rate)
114
- return output_path, "✅ 生成成功"
115
- else:
116
- return None, "❌ 生成结果为空"
117
 
118
  except Exception as e:
119
  import traceback
@@ -121,18 +115,19 @@ def predict_worker(text):
121
  return None, f"💥 报错: {e}"
122
 
123
  # --- 6. 界面 ---
124
- with gr.Blocks(title="GPT-SoVITS CPU Worker") as app:
125
- gr.Markdown(f"### 运行模型: `{os.path.basename(gpt_path) if gpt_path else '未找到'}`")
 
126
 
127
  with gr.Row():
128
- inp = gr.Textbox(label="输入文本", value="测试一下,今天天气真不错。")
129
- btn = gr.Button("生成 (CPU渲染较慢,请耐心)")
130
 
131
  with gr.Row():
132
  out = gr.Audio(label="音频")
133
  log = gr.Textbox(label="日志")
134
-
135
- btn.click(predict_worker, [inp], [out, log], api_name="predict")
136
 
137
  if __name__ == "__main__":
138
  app.queue().launch()
 
1
  import os
2
  import sys
 
3
 
4
+ # --- 1. 强行阉割 CUDA (最优先执行) ---
 
5
  os.environ["CUDA_VISIBLE_DEVICES"] = ""
 
6
  import torch
 
 
7
  torch.cuda.is_available = lambda: False
8
  torch.cuda.device_count = lambda: 0
9
+ def no_op(self, *args, **kwargs): return self
 
 
 
 
10
  torch.Tensor.cuda = no_op
11
  torch.nn.Module.cuda = no_op
12
 
13
+ print("💉 CUDA 补丁已注入")
14
 
15
+ # --- 2. 导入推理函数 (适配扁平结构) ---
16
+ sys.path.append(os.getcwd())
 
17
 
18
+ # 尝试导入核心逻辑
19
  try:
20
+ # 先试试直接导入
21
+ import inference_webui as core
22
+ print("✅ 成功导入 inference_webui")
23
+ except ImportError:
24
+ print("❌ 严重错误:找不到 inference_webui.py")
25
+ sys.exit(1)
26
+
27
+ # 自动寻找可用的推理函数
28
+ inference_func = None
29
+ if hasattr(core, "get_tts_model"):
30
+ inference_func = core.get_tts_model
31
+ print("👉 使用新版函数: get_tts_model")
32
+ elif hasattr(core, "get_tts_wav"):
33
+ inference_func = core.get_tts_wav
34
+ print("👉 使用旧版函数: get_tts_wav")
35
+ else:
36
+ # 如果都找不到,打印所有函数名供调试
37
+ print("❌ 没找到推理函数!可用函数如下:")
38
+ print([d for d in dir(core) if "__" not in d])
39
+
40
+ # --- 3. 自动寻找模型 (修复 .lock 问题) ---
41
+ def find_real_model(pattern, search_path="."):
42
+ for root, dirs, files in os.walk(search_path):
43
+ for file in files:
44
+ # 关键修改:排除 .lock 文件!
45
+ if pattern in file and not file.endswith(".lock"):
46
+ path = os.path.join(root, file)
47
+ print(f"🔍 发现模型: {path}")
48
+ return path
49
  return None
50
 
51
+ gpt_path = find_real_model("s1v3.ckpt")
52
+ if not gpt_path: gpt_path = find_real_model("s1bert")
 
 
 
 
 
 
53
 
54
+ sovits_path = find_real_model("s2Gv2ProPlus.pth")
55
+ if not sovits_path: sovits_path = find_real_model("s2G")
56
 
57
  # --- 4. 加载模型 ---
58
  try:
59
  if gpt_path and sovits_path:
60
+ # 调用 inference_webui 里的加载函数
61
+ if hasattr(core, "change_gpt_weights"):
62
+ core.change_gpt_weights(gpt_path=gpt_path)
63
+ if hasattr(core, "change_sovits_weights"):
64
+ core.change_sovits_weights(sovits_path=sovits_path)
65
  print("✅ 模型加载完成!")
66
  else:
67
+ print("❌ 依然没找到模型,请检查 Files 里的文件下载情况。")
68
  except Exception as e:
69
+ print(f"⚠️ 模型加载报错 (可能内存不足): {e}")
70
 
71
+ # --- 5. 推理逻辑 ---
72
+ import soundfile as sf
73
+ import numpy as np
74
+ import gradio as gr
75
+
76
+ REF_AUDIO = "ref.wav"
77
+ REF_TEXT = "你好"
78
  REF_LANG = "zh"
79
 
80
+ def run_predict(text):
81
+ if not os.path.exists(REF_AUDIO):
82
+ return None, "❌ 错误:请上传 ref.wav"
 
 
83
 
84
+ if not inference_func:
85
+ return None, "❌ 错误:未找到推理函数"
86
+
87
+ print(f"📥 任务: {text}")
88
  try:
89
+ # 构造参数 - 这是最通用的参数列表
90
+ # 如果是旧版函数 get_tts_wav,它通常接受以下参数
91
+ # ref_wav_path, prompt_text, prompt_language, text, text_language
92
+
93
+ # 尝试调用
94
+ generator = inference_func(
95
+ ref_wav_path=REF_AUDIO,
96
  prompt_text=REF_TEXT,
97
  prompt_language=REF_LANG,
98
  text=text,
99
  text_language="zh",
100
  how_to_cut="凑四句一切",
101
+ top_k=5, top_p=1, temperature=1, ref_free=False
 
 
 
102
  )
103
 
104
+ # 处理结果
105
  result_list = list(generator)
106
  if result_list:
107
+ sr, data = result_list[0]
108
+ out_path = f"out_{os.urandom(4).hex()}.wav"
109
+ sf.write(out_path, data, sr)
110
+ return out_path, "✅ 成功"
 
 
111
 
112
  except Exception as e:
113
  import traceback
 
115
  return None, f"💥 报错: {e}"
116
 
117
  # --- 6. 界面 ---
118
+ with gr.Blocks() as app:
119
+ gr.Markdown(f"### GPT-SoVITS 终极适配版")
120
+ gr.Markdown(f"GPT: `{gpt_path}` \n SoVITS: `{sovits_path}`")
121
 
122
  with gr.Row():
123
+ inp = gr.Textbox(label="文本", value="测试一下语音合成。")
124
+ btn = gr.Button("生成")
125
 
126
  with gr.Row():
127
  out = gr.Audio(label="音频")
128
  log = gr.Textbox(label="日志")
129
+
130
+ btn.click(run_predict, [inp], [out, log], api_name="predict")
131
 
132
  if __name__ == "__main__":
133
  app.queue().launch()