Ayan11451 commited on
Commit
a22170a
·
verified ·
1 Parent(s): de8b5fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -153
app.py CHANGED
@@ -1,291 +1,198 @@
1
  import sys, subprocess
2
 
3
-
4
-
5
  # 確保依賴套件安裝
6
-
7
  subprocess.run(
8
-
9
  [sys.executable, "-m", "pip", "install", "--quiet", "--no-deps", "--upgrade", "huggingface-hub==0.25.2"],
10
-
11
  check=False
12
-
13
  )
14
 
15
-
16
-
17
  try:
18
-
19
  import huggingface_hub
20
-
21
  print("huggingface-hub pinned to:", huggingface_hub.__version__)
22
-
23
  except Exception as e:
24
-
25
  print("check hub version failed:", e)
26
 
27
-
28
-
29
  import os, shutil, pathlib, time
30
 
31
-
32
-
33
  # ===== 1) 環境設定 =====
34
-
35
  os.environ.setdefault("TMPDIR", "/tmp")
36
-
37
  os.environ.setdefault("HF_HOME", "/tmp/.huggingface")
38
-
39
  os.environ.setdefault("HF_HUB_CACHE", os.path.join(os.environ["HF_HOME"], "hub"))
40
-
41
  os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1")
42
-
43
  os.environ.setdefault("GRADIO_ANALYTICS_ENABLED", "false")
44
 
45
-
46
-
47
  for p in ["/home/user/.cache/pip", "/root/.cache/pip"]:
48
-
49
  shutil.rmtree(p, ignore_errors=True)
50
 
51
-
52
-
53
  pathlib.Path(os.environ["HF_HOME"]).mkdir(parents=True, exist_ok=True)
54
 
55
-
56
-
57
  # ===== 2) 載入模型 =====
58
-
59
  import gradio as gr
60
-
61
  import torch
62
-
63
  from diffusers import StableDiffusionPipeline
64
 
65
-
66
-
67
  device = "cuda" if torch.cuda.is_available() else "cpu"
68
-
69
  torch_dtype = torch.float16 if device == "cuda" else torch.float32
70
 
71
-
72
-
73
  pipe = StableDiffusionPipeline.from_pretrained(
74
-
75
  "hakurei/waifu-diffusion",
76
-
77
  torch_dtype=torch_dtype,
78
-
79
  safety_checker=None,
80
-
81
  cache_dir=os.environ["HF_HOME"],
82
-
83
  use_safetensors=True
84
-
85
  ).to(device)
86
 
87
-
88
-
89
  # 省顯存設定
90
-
91
  if torch.cuda.is_available():
92
-
93
  for fn in ("enable_attention_slicing", "enable_vae_tiling", "enable_model_cpu_offload"):
94
-
95
  try:
96
-
97
  getattr(pipe, fn)()
98
-
99
  except Exception:
100
-
101
  pass
102
-
103
  else:
104
-
105
  try:
106
-
107
  pipe.enable_attention_slicing()
108
-
109
  except Exception:
110
-
111
  pass
112
-
113
  pipe.to("cpu")
114
 
115
-
116
-
117
  print(f"✅ Model loaded on {device}")
118
 
119
-
120
-
121
- # ===== 3) 生成函式 (先定義基礎函式) =====
122
-
123
  def generate_image(prompt: str):
124
-
125
  prompt = (prompt or "").strip()
126
-
127
  t0 = time.time()
128
-
129
 
130
-
131
  negative_prompt = (
132
-
133
  "low quality, worst quality, blurry, deformed, extra limbs, "
134
-
135
  "extra fingers, missing fingers, bad anatomy, bad hands, arms, "
136
-
137
  "text, watermark, username"
138
-
139
  )
140
 
141
-
142
-
143
  result = pipe(
144
-
145
  prompt=prompt,
146
-
147
  negative_prompt=negative_prompt,
148
-
149
  width=512, height=512,
150
-
151
  num_inference_steps=28,
152
-
153
  guidance_scale=7.0
154
-
155
  )
156
-
157
  img = result.images[0]
158
-
159
  del result
160
-
161
 
162
-
163
  try:
164
-
165
  import gc
166
-
167
  gc.collect()
168
-
169
  if device == "cuda":
170
-
171
  torch.cuda.empty_cache()
172
-
173
  except Exception:
174
-
175
  pass
176
-
177
 
178
-
179
  elapsed = time.time() - t0
180
-
181
  print(f"✅ Image generated in {elapsed:.1f}s")
182
-
183
  return img
184
 
185
-
186
-
187
- # ===== 4) 包裝函式 (在基礎函式之後定義) =====
188
-
189
  def generate_with_timeout_warning(prompt):
190
-
191
  """包裝生成函式,加入錯誤處理"""
192
-
193
  try:
194
-
195
  return generate_image(prompt)
196
-
197
  except Exception as e:
198
-
199
  if "timeout" in str(e).lower():
200
-
201
  raise gr.Error("⏱️ 生成超時。")
202
-
203
  raise gr.Error(f"生成失敗: {str(e)}")
204
 
205
-
206
-
207
- # ===== 5) 提示詞模板 =====
208
-
209
  default_prompt = "(masterpiece, best quality:1.3), 1girl, cute, long silver hair, red eyes, smiling, gentle sunlight, (headshot:1.6), (extreme close-up:1.5), (face focus:1.5), face only portrait, shoulders up only, cropped image, highly detailed face and eyes, (sakura trees, japanese town blurred background), soft lighting, intricate details"
210
 
211
-
212
-
213
  macro_prompt = "(masterpiece, best quality:1.3), 1girl, cute, long silver hair, red eyes, smiling, gentle sunlight, (macro lens portrait:1.4), (shallow depth of field:1.5), focus on eyes, sharp focus face, bokeh background, (sakura trees, japanese town), soft lighting, intricate details, head and shoulders portrait"
214
 
215
-
216
-
217
- # ===== 6) Gradio 介面 (只定義一次!) =====
218
-
219
- # 根據硬體動態調整警告訊息
220
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  if device == "cpu":
222
-
223
  title = "Waifu Diffusion 1:1 動漫風大頭照生成器 (CPU 模式)"
224
-
225
  description = """
226
-
227
- **提醒**: 當前使用 CPU 運算,單張圖片需要 **5-15 分鐘**
228
 
229
- 點擊下方 Examples 可載入預設模板,記得按下【Submit】開始生成
230
-
231
  """
232
-
233
  else:
234
-
235
  title = "Waifu Diffusion 1:1 動漫風大頭照生成器 (GPU 模式)"
236
-
237
- description = "點擊下方 Examples 可載入預設模板,記得按下【Submit】開始生成。預計 10-30 秒完成。"
238
-
239
-
240
 
241
  demo = gr.Interface(
242
-
243
  fn=generate_with_timeout_warning, # 使用包裝後的函式
244
-
245
  inputs=gr.Textbox(
246
-
247
  label="輸入提示詞 (Prompt)",
248
-
249
  value=default_prompt,
250
-
251
  lines=5,
252
-
253
  placeholder="在此輸入英文提示詞..."
254
-
255
  ),
256
-
257
  outputs=gr.Image(label="生成結果 (Image)", type="pil", format="png"),
258
-
259
  title=title,
260
-
261
  description=description,
262
-
263
  examples=[
264
-
265
  [default_prompt],
266
-
267
  [macro_prompt],
268
-
269
  ],
270
-
271
- cache_examples=False
272
-
273
  ).queue()
274
 
275
-
276
-
277
  # ===== 7) 啟動服務 =====
278
-
279
  if __name__ == "__main__":
280
-
281
  demo.launch(
282
-
283
  server_name="0.0.0.0",
284
-
285
  server_port=7860,
286
-
287
  share=False,
288
-
289
  max_threads=1
290
-
291
  )
 
1
  import sys, subprocess
2
 
 
 
3
  # 確保依賴套件安裝
 
4
  subprocess.run(
 
5
  [sys.executable, "-m", "pip", "install", "--quiet", "--no-deps", "--upgrade", "huggingface-hub==0.25.2"],
 
6
  check=False
 
7
  )
8
 
 
 
9
  try:
 
10
  import huggingface_hub
 
11
  print("huggingface-hub pinned to:", huggingface_hub.__version__)
 
12
  except Exception as e:
 
13
  print("check hub version failed:", e)
14
 
 
 
15
  import os, shutil, pathlib, time
16
 
 
 
17
  # ===== 1) 環境設定 =====
 
18
  os.environ.setdefault("TMPDIR", "/tmp")
 
19
  os.environ.setdefault("HF_HOME", "/tmp/.huggingface")
 
20
  os.environ.setdefault("HF_HUB_CACHE", os.path.join(os.environ["HF_HOME"], "hub"))
 
21
  os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1")
 
22
  os.environ.setdefault("GRADIO_ANALYTICS_ENABLED", "false")
23
 
 
 
24
  for p in ["/home/user/.cache/pip", "/root/.cache/pip"]:
 
25
  shutil.rmtree(p, ignore_errors=True)
26
 
 
 
27
  pathlib.Path(os.environ["HF_HOME"]).mkdir(parents=True, exist_ok=True)
28
 
 
 
29
  # ===== 2) 載入模型 =====
 
30
  import gradio as gr
 
31
  import torch
 
32
  from diffusers import StableDiffusionPipeline
33
 
 
 
34
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
35
  torch_dtype = torch.float16 if device == "cuda" else torch.float32
36
 
 
 
37
  pipe = StableDiffusionPipeline.from_pretrained(
 
38
  "hakurei/waifu-diffusion",
 
39
  torch_dtype=torch_dtype,
 
40
  safety_checker=None,
 
41
  cache_dir=os.environ["HF_HOME"],
 
42
  use_safetensors=True
 
43
  ).to(device)
44
 
 
 
45
  # 省顯存設定
 
46
  if torch.cuda.is_available():
 
47
  for fn in ("enable_attention_slicing", "enable_vae_tiling", "enable_model_cpu_offload"):
 
48
  try:
 
49
  getattr(pipe, fn)()
 
50
  except Exception:
 
51
  pass
 
52
  else:
 
53
  try:
 
54
  pipe.enable_attention_slicing()
 
55
  except Exception:
 
56
  pass
 
57
  pipe.to("cpu")
58
 
 
 
59
  print(f"✅ Model loaded on {device}")
60
 
61
+ # ===== 3) 生成函式 =====
 
 
 
62
  def generate_image(prompt: str):
 
63
  prompt = (prompt or "").strip()
 
64
  t0 = time.time()
 
65
 
 
66
  negative_prompt = (
 
67
  "low quality, worst quality, blurry, deformed, extra limbs, "
 
68
  "extra fingers, missing fingers, bad anatomy, bad hands, arms, "
 
69
  "text, watermark, username"
 
70
  )
71
 
 
 
72
  result = pipe(
 
73
  prompt=prompt,
 
74
  negative_prompt=negative_prompt,
 
75
  width=512, height=512,
 
76
  num_inference_steps=28,
 
77
  guidance_scale=7.0
 
78
  )
 
79
  img = result.images[0]
 
80
  del result
 
81
 
 
82
  try:
 
83
  import gc
 
84
  gc.collect()
 
85
  if device == "cuda":
 
86
  torch.cuda.empty_cache()
 
87
  except Exception:
 
88
  pass
 
89
 
 
90
  elapsed = time.time() - t0
 
91
  print(f"✅ Image generated in {elapsed:.1f}s")
 
92
  return img
93
 
 
 
 
 
94
  def generate_with_timeout_warning(prompt):
 
95
  """包裝生成函式,加入錯誤處理"""
 
96
  try:
 
97
  return generate_image(prompt)
 
98
  except Exception as e:
 
99
  if "timeout" in str(e).lower():
 
100
  raise gr.Error("⏱️ 生成超時。")
 
101
  raise gr.Error(f"生成失敗: {str(e)}")
102
 
103
+ # ===== 4) 提示詞模板 =====
 
 
 
104
  default_prompt = "(masterpiece, best quality:1.3), 1girl, cute, long silver hair, red eyes, smiling, gentle sunlight, (headshot:1.6), (extreme close-up:1.5), (face focus:1.5), face only portrait, shoulders up only, cropped image, highly detailed face and eyes, (sakura trees, japanese town blurred background), soft lighting, intricate details"
105
 
 
 
106
  macro_prompt = "(masterpiece, best quality:1.3), 1girl, cute, long silver hair, red eyes, smiling, gentle sunlight, (macro lens portrait:1.4), (shallow depth of field:1.5), focus on eyes, sharp focus face, bokeh background, (sakura trees, japanese town), soft lighting, intricate details, head and shoulders portrait"
107
 
108
+ # ===== 5) 自訂 CSS 和 JavaScript (禁用右鍵和開發者工具) =====
109
+ custom_js = """
110
+ function() {
111
+ // 禁用右鍵選單
112
+ document.addEventListener('contextmenu', function(e) {
113
+ e.preventDefault();
114
+ return false;
115
+ }, false);
116
+
117
+ // 禁用 F12、Ctrl+Shift+I、Ctrl+U 等快捷鍵
118
+ document.addEventListener('keydown', function(e) {
119
+ if (e.keyCode == 123 || // F12
120
+ (e.ctrlKey && e.shiftKey && e.keyCode == 73) || // Ctrl+Shift+I
121
+ (e.ctrlKey && e.shiftKey && e.keyCode == 74) || // Ctrl+Shift+J
122
+ (e.ctrlKey && e.keyCode == 85)) { // Ctrl+U
123
+ e.preventDefault();
124
+ return false;
125
+ }
126
+ });
127
+
128
+ // 禁用文字選取和複製
129
+ document.addEventListener('selectstart', function(e) {
130
+ e.preventDefault();
131
+ return false;
132
+ });
133
+
134
+ document.addEventListener('copy', function(e) {
135
+ e.preventDefault();
136
+ return false;
137
+ });
138
+ }
139
+ """
140
+
141
+ custom_css = """
142
+ /* 禁用使用者選取文字 */
143
+ * {
144
+ -webkit-user-select: none;
145
+ -moz-user-select: none;
146
+ -ms-user-select: none;
147
+ user-select: none;
148
+ }
149
+
150
+ /* 保留輸入框可以選取文字 */
151
+ input, textarea {
152
+ -webkit-user-select: text !important;
153
+ -moz-user-select: text !important;
154
+ -ms-user-select: text !important;
155
+ user-select: text !important;
156
+ }
157
+ """
158
+
159
+ # ===== 6) Gradio 介面 =====
160
  if device == "cpu":
 
161
  title = "Waifu Diffusion 1:1 動漫風大頭照生成器 (CPU 模式)"
 
162
  description = """
163
+ **提醒**: 當前使用雲端 CPU 運算,單張圖片需要 **5-15 分鐘**
 
164
 
165
+ 點擊下方 Examples 可載入預設模板,記得按下【Submit】開始生成
 
166
  """
 
167
  else:
 
168
  title = "Waifu Diffusion 1:1 動漫風大頭照生成器 (GPU 模式)"
169
+ description = "點擊下方 Examples 可載入預設模板,記得按下【Submit】開始生成。預計 10-30 秒完成。"
 
 
 
170
 
171
  demo = gr.Interface(
 
172
  fn=generate_with_timeout_warning, # 使用包裝後的函式
 
173
  inputs=gr.Textbox(
 
174
  label="輸入提示詞 (Prompt)",
 
175
  value=default_prompt,
 
176
  lines=5,
 
177
  placeholder="在此輸入英文提示詞..."
 
178
  ),
 
179
  outputs=gr.Image(label="生成結果 (Image)", type="pil", format="png"),
 
180
  title=title,
 
181
  description=description,
 
182
  examples=[
 
183
  [default_prompt],
 
184
  [macro_prompt],
 
185
  ],
186
+ cache_examples=False,
187
+ css=custom_css,
188
+ js=custom_js
189
  ).queue()
190
 
 
 
191
  # ===== 7) 啟動服務 =====
 
192
  if __name__ == "__main__":
 
193
  demo.launch(
 
194
  server_name="0.0.0.0",
 
195
  server_port=7860,
 
196
  share=False,
 
197
  max_threads=1
 
198
  )