simler commited on
Commit
46a4752
·
verified ·
1 Parent(s): cbc36f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -27
app.py CHANGED
@@ -1,9 +1,23 @@
1
  import os
2
  import sys
3
- import logging
4
 
5
- # --- 1. 基础环境设置 ---
6
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  import torch
8
  torch.cuda.is_available = lambda: False
9
  torch.cuda.device_count = lambda: 0
@@ -11,29 +25,29 @@ def no_op(self, *args, **kwargs): return self
11
  torch.Tensor.cuda = no_op
12
  torch.nn.Module.cuda = no_op
13
 
14
- print("💉 CUDA 补丁已注入")
15
 
16
- # --- 2. 导入核心逻辑 ---
 
 
17
  sys.path.append(os.getcwd())
18
 
19
  try:
20
  import inference_webui as core
21
  print("✅ 成功导入 inference_webui")
22
 
23
- # 🛑 关键修正:强制关闭半精度 (CPU 不支持 FP16,也能顺便避开 Flash Attention)
24
  if hasattr(core, "is_half"):
25
  core.is_half = False
26
- print("✅ 强制禁用半精度 (is_half = False)")
27
-
28
- if hasattr(core, "device"):
29
- core.device = "cpu"
30
- print("✅ 强制指定设备 (device = cpu)")
31
 
32
  except ImportError:
33
- print("❌ 严重错误:找不到 inference_webui.py")
34
  sys.exit(1)
35
 
36
- # --- 3. 自动寻找模型 ---
 
 
37
  def find_real_model(pattern, search_path="."):
38
  candidates = []
39
  for root, dirs, files in os.walk(search_path):
@@ -51,31 +65,34 @@ def find_real_model(pattern, search_path="."):
51
  gpt_path = find_real_model("s1v3.ckpt") or find_real_model("s1bert")
52
  sovits_path = find_real_model("s2Gv2ProPlus.pth") or find_real_model("s2G")
53
 
54
- # --- 4. 加载模型 ---
 
 
55
  try:
56
  if gpt_path and sovits_path:
57
- # 再次确保加载模型时不会启用半精度
58
  core.is_half = False
59
 
60
  if hasattr(core, "change_gpt_weights"):
61
  core.change_gpt_weights(gpt_path=gpt_path)
62
  if hasattr(core, "change_sovits_weights"):
63
  core.change_sovits_weights(sovits_path=sovits_path)
64
- print(f"🎉 模型加载成功!")
65
  else:
66
  print("❌ 未找到模型文件")
67
  except Exception as e:
68
  print(f"⚠️ 模型加载报错: {e}")
69
 
70
- # --- 5. 推理逻辑 ---
 
 
71
  import soundfile as sf
72
  import gradio as gr
73
  import numpy as np
74
 
75
  REF_AUDIO = "ref.wav"
76
  REF_TEXT = "你好"
77
- # 🛑 关键修正:语言必须是中文
78
- REF_LANG = "中文"
79
 
80
  def run_predict(text):
81
  if not os.path.exists(REF_AUDIO):
@@ -83,18 +100,16 @@ def run_predict(text):
83
 
84
  print(f"📥 任务: {text}")
85
  try:
86
- # 自动识别函数
87
  inference_func = getattr(core, "get_tts_model", getattr(core, "get_tts_wav", None))
88
- if not inference_func:
89
- return None, "❌ 找不到推理函数"
90
 
91
  # 核心调用
92
  generator = inference_func(
93
  ref_wav_path=REF_AUDIO,
94
  prompt_text=REF_TEXT,
95
- prompt_language=REF_LANG, # 中文
96
  text=text,
97
- text_language="中文", # 中文
98
  how_to_cut="凑四句一切",
99
  top_k=5, top_p=1, temperature=1, ref_free=False
100
  )
@@ -112,12 +127,15 @@ def run_predict(text):
112
  traceback.print_exc()
113
  return None, f"💥 报错: {e}"
114
 
115
- # --- 6. 界面 ---
 
 
116
  with gr.Blocks() as app:
117
- gr.Markdown(f"### GPT-SoVITS V2 (Final CPU)")
 
118
 
119
  with gr.Row():
120
- inp = gr.Textbox(label="文本", value="这次一定行,不行我就吃键盘。")
121
  btn = gr.Button("生成")
122
 
123
  with gr.Row():
 
1
  import os
2
  import sys
3
+ import types
4
 
5
+ # ==========================================
6
+ # 1. 核心欺骗:制造“假”的 Flash Attention
7
+ # ==========================================
8
+ # 这一步必须在所有 imports 之前!
9
+ # 我们创建一个空的模块,骗过系统的 import 检查
10
+ # 但因为里面没有 functional 接口,模型会报错并回退到普通模式
11
+ dummy_module = types.ModuleType("flash_attn")
12
+ sys.modules["flash_attn"] = dummy_module
13
+ sys.modules["flash_attn.flash_attn_interface"] = dummy_module
14
+
15
+ print("💉 已注入 Flash Attention 假模块,强制开启 CPU 兼容模式。")
16
+
17
+ # ==========================================
18
+ # 2. 屏蔽 CUDA
19
+ # ==========================================
20
+ os.environ["CUDA_VISIBLE_DEVICES"] = ""
21
  import torch
22
  torch.cuda.is_available = lambda: False
23
  torch.cuda.device_count = lambda: 0
 
25
  torch.Tensor.cuda = no_op
26
  torch.nn.Module.cuda = no_op
27
 
28
+ print("💉 CUDA 已屏蔽。")
29
 
30
+ # ==========================================
31
+ # 3. 导入核心逻辑
32
+ # ==========================================
33
  sys.path.append(os.getcwd())
34
 
35
  try:
36
  import inference_webui as core
37
  print("✅ 成功导入 inference_webui")
38
 
39
+ # 强制关闭半精度
40
  if hasattr(core, "is_half"):
41
  core.is_half = False
42
+ print("✅ 强制禁用半精度")
 
 
 
 
43
 
44
  except ImportError:
45
+ print("❌ 找不到 inference_webui.py")
46
  sys.exit(1)
47
 
48
+ # ==========================================
49
+ # 4. 自动寻找模型
50
+ # ==========================================
51
  def find_real_model(pattern, search_path="."):
52
  candidates = []
53
  for root, dirs, files in os.walk(search_path):
 
65
  gpt_path = find_real_model("s1v3.ckpt") or find_real_model("s1bert")
66
  sovits_path = find_real_model("s2Gv2ProPlus.pth") or find_real_model("s2G")
67
 
68
+ # ==========================================
69
+ # 5. 加载模型
70
+ # ==========================================
71
  try:
72
  if gpt_path and sovits_path:
73
+ # 再次确保
74
  core.is_half = False
75
 
76
  if hasattr(core, "change_gpt_weights"):
77
  core.change_gpt_weights(gpt_path=gpt_path)
78
  if hasattr(core, "change_sovits_weights"):
79
  core.change_sovits_weights(sovits_path=sovits_path)
80
+ print(f"🎉 模型加载成功!(CPU模式)")
81
  else:
82
  print("❌ 未找到模型文件")
83
  except Exception as e:
84
  print(f"⚠️ 模型加载报错: {e}")
85
 
86
+ # ==========================================
87
+ # 6. 推理逻辑
88
+ # ==========================================
89
  import soundfile as sf
90
  import gradio as gr
91
  import numpy as np
92
 
93
  REF_AUDIO = "ref.wav"
94
  REF_TEXT = "你好"
95
+ REF_LANG = "中文" # 必须是中文
 
96
 
97
  def run_predict(text):
98
  if not os.path.exists(REF_AUDIO):
 
100
 
101
  print(f"📥 任务: {text}")
102
  try:
 
103
  inference_func = getattr(core, "get_tts_model", getattr(core, "get_tts_wav", None))
104
+ if not inference_func: return None, "❌ 找不到推理函数"
 
105
 
106
  # 核心调用
107
  generator = inference_func(
108
  ref_wav_path=REF_AUDIO,
109
  prompt_text=REF_TEXT,
110
+ prompt_language=REF_LANG,
111
  text=text,
112
+ text_language="中文",
113
  how_to_cut="凑四句一切",
114
  top_k=5, top_p=1, temperature=1, ref_free=False
115
  )
 
127
  traceback.print_exc()
128
  return None, f"💥 报错: {e}"
129
 
130
+ # ==========================================
131
+ # 7. 界面
132
+ # ==========================================
133
  with gr.Blocks() as app:
134
+ gr.Markdown(f"### GPT-SoVITS CPU 终极版")
135
+ gr.Markdown(f"Status: FlashAttn Disabled, CUDA Disabled, FP32 Mode")
136
 
137
  with gr.Row():
138
+ inp = gr.Textbox(label="文本", value="如果听到这句话,说明你成功了!")
139
  btn = gr.Button("生成")
140
 
141
  with gr.Row():