simler commited on
Commit
5af2d24
·
verified ·
1 Parent(s): d6136a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -79
app.py CHANGED
@@ -1,64 +1,9 @@
1
  import os
2
  import sys
 
3
 
4
  # ==========================================
5
- # 1. 核心补丁:全局搜索并摧毁 Flash Attention
6
- # ==========================================
7
- print("🛰️ 启动全盘扫描,寻找 GPU 毒瘤文件...")
8
-
9
- # 定义我们要写入的“CPU 伪装代码”
10
- # 这段代码会把所有对 GPU 模型的调用,转发给 CPU 模型
11
- CPU_HACK_CODE = """
12
- import sys
13
- import logging
14
- print("🛡️ [HACK] 成功拦截 FlashAttn 调用,强制重定向至 CPU 模型!")
15
-
16
- # 试图导入普通的 CPU 模型
17
- try:
18
- # 尝试多种可能的路径
19
- try:
20
- from AR.models.t2s_model import Text2SemanticDecoder
21
- except ImportError:
22
- from GPT_SoVITS.AR.models.t2s_model import Text2SemanticDecoder
23
- except ImportError:
24
- # 如果都找不到,手动定义一个空的 CPU 兼容类(防止报错)
25
- import torch.nn as nn
26
- class Text2SemanticDecoder(nn.Module):
27
- def __init__(self, *args, **kwargs):
28
- super().__init__()
29
- print("⚠️ 使用了紧急备用 CPU Decoder 类")
30
- def forward(self, *args, **kwargs):
31
- print("⚠️ 紧急备用 Forward 被调用")
32
- return None
33
-
34
- # 导出这个类,骗过调用者
35
- Text2SemanticDecoderFlashAttn = Text2SemanticDecoder
36
- """
37
-
38
- # 递归搜索并覆盖
39
- target_filename = "t2s_model_flash_attn.py"
40
- hacked_count = 0
41
-
42
- for root, dirs, files in os.walk("."):
43
- if target_filename in files:
44
- full_path = os.path.join(root, target_filename)
45
- print(f"🎯 锁定目标: {full_path}")
46
-
47
- try:
48
- with open(full_path, "w", encoding="utf-8") as f:
49
- f.write(CPU_HACK_CODE)
50
- print("✅ 已执行物理覆盖 (写入 CPU 伪装代码)")
51
- hacked_count += 1
52
- except Exception as e:
53
- print(f"❌ 覆盖失败: {e}")
54
-
55
- if hacked_count == 0:
56
- print("⚠️ 警告:未找到任何 FlashAttn 文件!可能目录结构极其特殊。")
57
- else:
58
- print(f"🎉 成功处理了 {hacked_count} 个 GPU 文件。")
59
-
60
- # ==========================================
61
- # 2. 基础环境设置
62
  # ==========================================
63
  os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
64
  import torch
@@ -67,9 +12,50 @@ torch.cuda.device_count = lambda: 0
67
  def no_op(self, *args, **kwargs): return self
68
  torch.Tensor.cuda = no_op
69
  torch.nn.Module.cuda = no_op
 
70
 
71
  # ==========================================
72
- # 3. 导入推理核心
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  # ==========================================
74
  sys.path.append(os.getcwd())
75
 
@@ -77,49 +63,51 @@ try:
77
  import inference_webui as core
78
  print("✅ 成功导入 inference_webui")
79
 
 
80
  if hasattr(core, "is_half"): core.is_half = False
81
  if hasattr(core, "device"): core.device = "cpu"
82
 
83
- except ImportError:
84
- print("❌ 找不到 inference_webui.py")
 
 
85
  sys.exit(1)
86
 
87
  # ==========================================
88
- # 4. 自动寻找模型文件
89
  # ==========================================
90
- def find_real_model(pattern, search_path="."):
91
- candidates = []
92
- for root, dirs, files in os.walk(search_path):
93
  for file in files:
94
  if pattern in file and not file.endswith(".lock") and not file.endswith(".metadata"):
95
  path = os.path.join(root, file)
96
  size_mb = os.path.getsize(path) / (1024 * 1024)
97
- if size_mb > 10:
98
- candidates.append((path, size_mb))
99
- if candidates:
100
- candidates.sort(key=lambda x: x[1], reverse=True)
101
- return candidates[0][0]
102
  return None
103
 
104
- gpt_path = find_real_model("s1v3.ckpt") or find_real_model("s1bert")
105
- sovits_path = find_real_model("s2Gv2ProPlus.pth") or find_real_model("s2G")
106
 
107
  # ==========================================
108
  # 5. 加载模型
109
  # ==========================================
110
  try:
111
  if gpt_path and sovits_path:
112
- core.is_half = False
113
- if hasattr(core, "change_gpt_weights"): core.change_gpt_weights(gpt_path=gpt_path)
114
- if hasattr(core, "change_sovits_weights"): core.change_sovits_weights(sovits_path=sovits_path)
115
- print(f"🎉 模型加载成功!")
 
 
 
 
116
  else:
117
  print("❌ 未找到模型文件")
118
  except Exception as e:
119
  print(f"⚠️ 模型加载报错: {e}")
120
 
121
  # ==========================================
122
- # 6. 推理接口
123
  # ==========================================
124
  import soundfile as sf
125
  import gradio as gr
@@ -127,7 +115,7 @@ import numpy as np
127
 
128
  REF_AUDIO = "ref.wav"
129
  REF_TEXT = "你好"
130
- REF_LANG = "中文"
131
 
132
  def run_predict(text):
133
  if not os.path.exists(REF_AUDIO): return None, "❌ 请上传 ref.wav"
@@ -152,6 +140,7 @@ def run_predict(text):
152
  sr, data = result_list[0]
153
  out_path = f"out_{os.urandom(4).hex()}.wav"
154
  sf.write(out_path, data, sr)
 
155
  return out_path, "✅ 成功"
156
 
157
  except Exception as e:
@@ -163,13 +152,16 @@ def run_predict(text):
163
  # 7. 启动界面
164
  # ==========================================
165
  with gr.Blocks() as app:
166
- gr.Markdown(f"### GPT-SoVITS V2 (CPU HACKED)")
 
167
  with gr.Row():
168
- inp = gr.Textbox(label="文本", value="这下舒服了,终于跑通了。")
169
  btn = gr.Button("生成")
 
170
  with gr.Row():
171
  out = gr.Audio(label="结果")
172
  log = gr.Textbox(label="日志")
 
173
  btn.click(run_predict, [inp], [out, log], api_name="predict")
174
 
175
  if __name__ == "__main__":
 
1
  import os
2
  import sys
3
+ import shutil
4
 
5
  # ==========================================
6
+ # 1. 净化环境
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  # ==========================================
8
  os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
9
  import torch
 
12
  def no_op(self, *args, **kwargs): return self
13
  torch.Tensor.cuda = no_op
14
  torch.nn.Module.cuda = no_op
15
+ print("💉 CUDA 已屏蔽")
16
 
17
  # ==========================================
18
+ # 2. 物理层克隆:用 CPU 代码覆盖 GPU 代码
19
+ # ==========================================
20
+ print("🧬 启动代码克隆手术...")
21
+
22
+ def find_file(name, search_path="."):
23
+ for root, dirs, files in os.walk(search_path):
24
+ if name in files:
25
+ return os.path.join(root, name)
26
+ return None
27
+
28
+ # 1. 找到源文件 (CPU版)
29
+ cpu_src = find_file("t2s_model.py")
30
+ # 2. 找到目标文件 (GPU版)
31
+ gpu_dst = find_file("t2s_model_flash_attn.py")
32
+
33
+ if cpu_src and gpu_dst:
34
+ try:
35
+ # 读取 CPU 代码
36
+ with open(cpu_src, "r", encoding="utf-8") as f:
37
+ cpu_code = f.read()
38
+
39
+ # 写入 GPU 文件
40
+ with open(gpu_dst, "w", encoding="utf-8") as f:
41
+ f.write(cpu_code)
42
+
43
+ print(f"✅ 手术成功!\n源: {cpu_src}\n目标: {gpu_dst}")
44
+ print("现在系统加载 FlashAttn 模块时,实际上运行的是纯 CPU 代码。")
45
+
46
+ # 🧹 清理 pycache 防止缓存作祟
47
+ cache_dir = os.path.join(os.path.dirname(gpu_dst), "__pycache__")
48
+ if os.path.exists(cache_dir):
49
+ shutil.rmtree(cache_dir)
50
+ print("🧹 已清理 __pycache__")
51
+
52
+ except Exception as e:
53
+ print(f"❌ 文件操作失败: {e}")
54
+ else:
55
+ print(f"⚠️ 未找到关键代码文件 (src:{cpu_src}, dst:{gpu_dst})")
56
+
57
+ # ==========================================
58
+ # 3. 导入核心逻辑
59
  # ==========================================
60
  sys.path.append(os.getcwd())
61
 
 
63
  import inference_webui as core
64
  print("✅ 成功导入 inference_webui")
65
 
66
+ # 强制修改配置
67
  if hasattr(core, "is_half"): core.is_half = False
68
  if hasattr(core, "device"): core.device = "cpu"
69
 
70
+ except Exception as e:
71
+ print(f"❌ 导入失败: {e}")
72
+ # 打印一下当前目录,方便排查
73
+ print(f"当前目录文件: {os.listdir('.')}")
74
  sys.exit(1)
75
 
76
  # ==========================================
77
+ # 4. 自动寻找模型
78
  # ==========================================
79
+ def find_model_file(pattern):
80
+ for root, dirs, files in os.walk("."):
 
81
  for file in files:
82
  if pattern in file and not file.endswith(".lock") and not file.endswith(".metadata"):
83
  path = os.path.join(root, file)
84
  size_mb = os.path.getsize(path) / (1024 * 1024)
85
+ if size_mb > 10: return path
 
 
 
 
86
  return None
87
 
88
+ gpt_path = find_model_file("s1v3.ckpt") or find_model_file("s1bert")
89
+ sovits_path = find_model_file("s2Gv2ProPlus.pth") or find_model_file("s2G")
90
 
91
  # ==========================================
92
  # 5. 加载模型
93
  # ==========================================
94
  try:
95
  if gpt_path and sovits_path:
96
+ # 再次强制关闭半精度
97
+ core.is_half = False
98
+
99
+ if hasattr(core, "change_gpt_weights"):
100
+ core.change_gpt_weights(gpt_path=gpt_path)
101
+ if hasattr(core, "change_sovits_weights"):
102
+ core.change_sovits_weights(sovits_path=sovits_path)
103
+ print(f"🎉 模型加载成功!(CPU Mode)")
104
  else:
105
  print("❌ 未找到模型文件")
106
  except Exception as e:
107
  print(f"⚠️ 模型加载报错: {e}")
108
 
109
  # ==========================================
110
+ # 6. 推理逻辑
111
  # ==========================================
112
  import soundfile as sf
113
  import gradio as gr
 
115
 
116
  REF_AUDIO = "ref.wav"
117
  REF_TEXT = "你好"
118
+ REF_LANG = "中文"
119
 
120
  def run_predict(text):
121
  if not os.path.exists(REF_AUDIO): return None, "❌ 请上传 ref.wav"
 
140
  sr, data = result_list[0]
141
  out_path = f"out_{os.urandom(4).hex()}.wav"
142
  sf.write(out_path, data, sr)
143
+ print(f"✅ 生成完毕: {out_path}")
144
  return out_path, "✅ 成功"
145
 
146
  except Exception as e:
 
152
  # 7. 启动界面
153
  # ==========================================
154
  with gr.Blocks() as app:
155
+ gr.Markdown(f"### GPT-SoVITS V2 (CPU Clone Edition)")
156
+
157
  with gr.Row():
158
+ inp = gr.Textbox(label="文本", value="这一波,稳了。")
159
  btn = gr.Button("生成")
160
+
161
  with gr.Row():
162
  out = gr.Audio(label="结果")
163
  log = gr.Textbox(label="日志")
164
+
165
  btn.click(run_predict, [inp], [out, log], api_name="predict")
166
 
167
  if __name__ == "__main__":