simler commited on
Commit
9ee61c3
·
verified ·
1 Parent(s): 5218a1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -17
app.py CHANGED
@@ -1,34 +1,50 @@
1
  import os
2
  import sys
3
 
4
- # --- 1. 强行阉割 CUDA (最优先执行) ---
 
 
 
 
5
  os.environ["CUDA_VISIBLE_DEVICES"] = ""
 
 
 
 
 
 
6
  import torch
 
 
7
  torch.cuda.is_available = lambda: False
8
  torch.cuda.device_count = lambda: 0
9
  def no_op(self, *args, **kwargs): return self
10
  torch.Tensor.cuda = no_op
11
  torch.nn.Module.cuda = no_op
12
 
13
- print("💉 CUDA 补丁已注入")
14
 
15
- # --- 2. 导入推理函数 ---
 
 
16
  sys.path.append(os.getcwd())
 
 
17
  try:
18
  import inference_webui as core
19
  print("✅ 成功导入 inference_webui")
20
  except ImportError:
21
- print("❌ 严重错误:找不到 inference_webui.py")
22
  sys.exit(1)
23
 
24
- # 自动寻找可用的推理函数
25
  inference_func = None
26
  if hasattr(core, "get_tts_model"):
27
  inference_func = core.get_tts_model
28
  elif hasattr(core, "get_tts_wav"):
29
  inference_func = core.get_tts_wav
30
 
31
- # --- 3. 自动寻找模型 ---
32
  def find_real_model(pattern, search_path="."):
33
  candidates = []
34
  for root, dirs, files in os.walk(search_path):
@@ -50,25 +66,29 @@ if not gpt_path: gpt_path = find_real_model("s1bert")
50
  sovits_path = find_real_model("s2Gv2ProPlus.pth")
51
  if not sovits_path: sovits_path = find_real_model("s2G")
52
 
53
- # --- 4. 加载模型 ---
54
  try:
55
  if gpt_path and sovits_path:
 
 
 
 
56
  if hasattr(core, "change_gpt_weights"):
57
  core.change_gpt_weights(gpt_path=gpt_path)
58
  if hasattr(core, "change_sovits_weights"):
59
  core.change_sovits_weights(sovits_path=sovits_path)
60
- print("🎉 模型加载完成!")
61
  except Exception as e:
62
  print(f"⚠️ 模型加载报错: {e}")
63
 
64
- # --- 5. 推理逻辑 ---
65
  import soundfile as sf
66
  import gradio as gr
 
67
 
68
  REF_AUDIO = "ref.wav"
69
  REF_TEXT = "你好"
70
- # 关键修改:必须用中文名称,不能用 zh
71
- REF_LANG = "中文"
72
 
73
  def run_predict(text):
74
  if not os.path.exists(REF_AUDIO):
@@ -76,9 +96,7 @@ def run_predict(text):
76
 
77
  print(f"📥 任务: {text}")
78
  try:
79
- # 这里的参数名根据不同版本可能略有不同
80
- # 我们按照最常见的旧版逻辑传递
81
- # 注意:text_language 也改成了 "中文"
82
  generator = inference_func(
83
  ref_wav_path=REF_AUDIO,
84
  prompt_text=REF_TEXT,
@@ -94,6 +112,7 @@ def run_predict(text):
94
  sr, data = result_list[0]
95
  out_path = f"out_{os.urandom(4).hex()}.wav"
96
  sf.write(out_path, data, sr)
 
97
  return out_path, "✅ 成功"
98
 
99
  except Exception as e:
@@ -101,12 +120,12 @@ def run_predict(text):
101
  traceback.print_exc()
102
  return None, f"💥 报错: {e}"
103
 
104
- # --- 6. 界面 ---
105
  with gr.Blocks() as app:
106
- gr.Markdown(f"### GPT-SoVITS V2 (CPU Worker)")
107
 
108
  with gr.Row():
109
- inp = gr.Textbox(label="文本", value="终于成功了,这真是不容易啊。")
110
  btn = gr.Button("生成")
111
 
112
  with gr.Row():
 
1
  import os
2
  import sys
3
 
4
+ # ==========================================
5
+ # 🛑 核心屏蔽补丁 (必须放在最最前面)
6
+ # ==========================================
7
+
8
+ # 1. 屏蔽 CUDA (显卡)
9
  os.environ["CUDA_VISIBLE_DEVICES"] = ""
10
+
11
+ # 2. 屏蔽 Flash Attention (关键!防崩核心)
12
+ # 我们直接把这个模块设为 None,假装没安装
13
+ # 这样 GPT-SoVITS 就会回退到普通 CPU 模式
14
+ sys.modules["flash_attn"] = None
15
+
16
  import torch
17
+
18
+ # 3. 彻底欺骗 Torch
19
  torch.cuda.is_available = lambda: False
20
  torch.cuda.device_count = lambda: 0
21
  def no_op(self, *args, **kwargs): return self
22
  torch.Tensor.cuda = no_op
23
  torch.nn.Module.cuda = no_op
24
 
25
+ print("💉 环境手术完成: CUDA已移除, FlashAttn已禁用。")
26
 
27
+ # ==========================================
28
+ # 🚀 业务逻辑
29
+ # ==========================================
30
  sys.path.append(os.getcwd())
31
+
32
+ # 导入推理核心
33
  try:
34
  import inference_webui as core
35
  print("✅ 成功导入 inference_webui")
36
  except ImportError:
37
+ print("❌ 找不到 inference_webui.py")
38
  sys.exit(1)
39
 
40
+ # 自动寻找推理函数
41
  inference_func = None
42
  if hasattr(core, "get_tts_model"):
43
  inference_func = core.get_tts_model
44
  elif hasattr(core, "get_tts_wav"):
45
  inference_func = core.get_tts_wav
46
 
47
+ # 自动寻找模型
48
  def find_real_model(pattern, search_path="."):
49
  candidates = []
50
  for root, dirs, files in os.walk(search_path):
 
66
  sovits_path = find_real_model("s2Gv2ProPlus.pth")
67
  if not sovits_path: sovits_path = find_real_model("s2G")
68
 
69
+ # 加载模型
70
  try:
71
  if gpt_path and sovits_path:
72
+ # 强制设置 config 为非半精度 (CPU不支持 half)
73
+ # 这也是为了防止 Flash Attn 被错误触发
74
+ if hasattr(core, "is_half"): core.is_half = False
75
+
76
  if hasattr(core, "change_gpt_weights"):
77
  core.change_gpt_weights(gpt_path=gpt_path)
78
  if hasattr(core, "change_sovits_weights"):
79
  core.change_sovits_weights(sovits_path=sovits_path)
80
+ print("🎉 模型加载完成 (CPU模式)!")
81
  except Exception as e:
82
  print(f"⚠️ 模型加载报错: {e}")
83
 
84
+ # 推理逻辑
85
  import soundfile as sf
86
  import gradio as gr
87
+ import numpy as np
88
 
89
  REF_AUDIO = "ref.wav"
90
  REF_TEXT = "你好"
91
+ REF_LANG = "中文" # 必须是中文
 
92
 
93
  def run_predict(text):
94
  if not os.path.exists(REF_AUDIO):
 
96
 
97
  print(f"📥 任务: {text}")
98
  try:
99
+ # 核心推理
 
 
100
  generator = inference_func(
101
  ref_wav_path=REF_AUDIO,
102
  prompt_text=REF_TEXT,
 
112
  sr, data = result_list[0]
113
  out_path = f"out_{os.urandom(4).hex()}.wav"
114
  sf.write(out_path, data, sr)
115
+ print(f"✅ 生成完毕: {out_path}")
116
  return out_path, "✅ 成功"
117
 
118
  except Exception as e:
 
120
  traceback.print_exc()
121
  return None, f"💥 报错: {e}"
122
 
123
+ # 界面
124
  with gr.Blocks() as app:
125
+ gr.Markdown(f"### GPT-SoVITS V2 (CPU)")
126
 
127
  with gr.Row():
128
+ inp = gr.Textbox(label="文本", value="终于成功了,这次一定能响。")
129
  btn = gr.Button("生成")
130
 
131
  with gr.Row():