IdlecloudX commited on
Commit
5f0885c
·
verified ·
1 Parent(s): 4d9a9e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -16
app.py CHANGED
@@ -17,6 +17,7 @@ from diffusers import ZImagePipeline
17
  from diffusers.models.transformers.transformer_z_image import ZImageTransformer2DModel
18
 
19
  # ==================== Environment Variables ==================================
 
20
  MODEL_PATH = os.environ.get("MODEL_PATH", "Tongyi-MAI/Z-Image")
21
  ENABLE_COMPILE = os.environ.get("ENABLE_COMPILE", "true").lower() == "true"
22
  ENABLE_WARMUP = os.environ.get("ENABLE_WARMUP", "true").lower() == "true"
@@ -105,7 +106,6 @@ def load_models(model_path, enable_compile=False, attention_backend="native"):
105
 
106
  pipe.transformer = transformer
107
 
108
- # 尝试设置指定的 Backend,如果失败则回退
109
  try:
110
  pipe.transformer.set_attention_backend(attention_backend)
111
  except Exception as e:
@@ -121,7 +121,7 @@ def load_models(model_path, enable_compile=False, attention_backend="native"):
121
 
122
  def generate_image(pipe, prompt, negative_prompt="", width=1024, height=1024, seed=42, guidance_scale=4.0, num_inference_steps=30, shift=3.0, max_sequence_length=512, progress=gr.Progress(track_tqdm=True)):
123
  generator = torch.Generator("cuda").manual_seed(seed)
124
- # Z-Image 使用 FlowMatch
125
  scheduler = FlowMatchEulerDiscreteScheduler(num_train_timesteps=1000, shift=shift)
126
  pipe.scheduler = scheduler
127
 
@@ -134,7 +134,7 @@ def generate_image(pipe, prompt, negative_prompt="", width=1024, height=1024, se
134
  num_inference_steps=num_inference_steps,
135
  generator=generator,
136
  max_sequence_length=max_sequence_length,
137
- cfg_normalization=False, # 官方推荐
138
  ).images[0]
139
 
140
  return image
@@ -145,13 +145,13 @@ def warmup_model(pipe, resolutions):
145
  for res_str in resolutions:
146
  try:
147
  w, h = get_resolution(res_str)
148
- for i in range(1): # 减少基础模型的预热次数以节省资源
149
  generate_image(pipe, prompt=dummy_prompt, width=w, height=h, num_inference_steps=28, guidance_scale=4.0, seed=42 + i)
150
  except Exception as e:
151
  print(f"Warmup failed for {res_str}: {e}")
152
  print("Warmup completed.")
153
 
154
- # Global Pipe Variable
155
  pipe = None
156
 
157
  def init_app():
@@ -161,7 +161,7 @@ def init_app():
161
  print(f"Model loaded: {MODEL_PATH}. Compile: {ENABLE_COMPILE}, Backend: {ATTENTION_BACKEND}")
162
 
163
  if ENABLE_WARMUP:
164
- # 仅预热常用分辨率以加快启动
165
  warmup_res = ["1024x1024 ( 1:1 )"]
166
  warmup_model(pipe, warmup_res)
167
 
@@ -197,22 +197,18 @@ def generate(prompt, negative_prompt, width=1024, height=1024, seed=42, steps=30
197
 
198
  return gallery_images, str(new_seed), int(new_seed)
199
 
200
- # Initialize
201
  init_app()
202
 
203
- # ==================== AoTI Optimization ====================
204
  if pipe is not None:
205
  try:
206
- # 针对 ZeroGPU 的优化配置
207
  pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
208
  spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
209
  except Exception as e:
210
  print(f"Warning: Failed to load AoTI blocks: {e}")
211
 
212
- # ==================== UI Construction ====================
213
- custom_css = ".fillable{max-width: 1230px !important}"
214
-
215
- with gr.Blocks(title="Z-Image Demo", css=custom_css) as demo:
216
  gr.Markdown(
217
  """<div align="center">
218
  # ⚡️ Z-Image Generation Demo
@@ -223,8 +219,8 @@ with gr.Blocks(title="Z-Image Demo", css=custom_css) as demo:
223
 
224
  with gr.Row():
225
  with gr.Column(scale=1):
226
- prompt_input = gr.Textbox(label="Prompt (提示词)", lines=3, placeholder="输入你想要生成的图像描述...")
227
- negative_prompt_input = gr.Textbox(label="Negative Prompt (负面提示词)", lines=2, placeholder="输入你不想要出现在图像中的内容...")
228
 
229
  with gr.Row():
230
  width = gr.Slider(label="Width (宽)", minimum=512, maximum=2048, value=1024, step=64)
@@ -248,7 +244,7 @@ with gr.Blocks(title="Z-Image Demo", css=custom_css) as demo:
248
  output_gallery = gr.Gallery(
249
  label="Generated Images", columns=1, rows=1, height=600, object_fit="contain", format="png", interactive=False
250
  )
251
- used_seed = gr.Textbox(label="Seed Used (本次使用的种子)", interactive=False)
252
 
253
  generate_btn.click(
254
  generate,
 
17
  from diffusers.models.transformers.transformer_z_image import ZImageTransformer2DModel
18
 
19
  # ==================== Environment Variables ==================================
20
+ # 使用 Z-Image 基础模型
21
  MODEL_PATH = os.environ.get("MODEL_PATH", "Tongyi-MAI/Z-Image")
22
  ENABLE_COMPILE = os.environ.get("ENABLE_COMPILE", "true").lower() == "true"
23
  ENABLE_WARMUP = os.environ.get("ENABLE_WARMUP", "true").lower() == "true"
 
106
 
107
  pipe.transformer = transformer
108
 
 
109
  try:
110
  pipe.transformer.set_attention_backend(attention_backend)
111
  except Exception as e:
 
121
 
122
  def generate_image(pipe, prompt, negative_prompt="", width=1024, height=1024, seed=42, guidance_scale=4.0, num_inference_steps=30, shift=3.0, max_sequence_length=512, progress=gr.Progress(track_tqdm=True)):
123
  generator = torch.Generator("cuda").manual_seed(seed)
124
+ # Z-Image 使用 FlowMatch 调度器
125
  scheduler = FlowMatchEulerDiscreteScheduler(num_train_timesteps=1000, shift=shift)
126
  pipe.scheduler = scheduler
127
 
 
134
  num_inference_steps=num_inference_steps,
135
  generator=generator,
136
  max_sequence_length=max_sequence_length,
137
+ cfg_normalization=False,
138
  ).images[0]
139
 
140
  return image
 
145
  for res_str in resolutions:
146
  try:
147
  w, h = get_resolution(res_str)
148
+ for i in range(1):
149
  generate_image(pipe, prompt=dummy_prompt, width=w, height=h, num_inference_steps=28, guidance_scale=4.0, seed=42 + i)
150
  except Exception as e:
151
  print(f"Warmup failed for {res_str}: {e}")
152
  print("Warmup completed.")
153
 
154
+ # 全局变量
155
  pipe = None
156
 
157
  def init_app():
 
161
  print(f"Model loaded: {MODEL_PATH}. Compile: {ENABLE_COMPILE}, Backend: {ATTENTION_BACKEND}")
162
 
163
  if ENABLE_WARMUP:
164
+ # 仅预热基础分辨率
165
  warmup_res = ["1024x1024 ( 1:1 )"]
166
  warmup_model(pipe, warmup_res)
167
 
 
197
 
198
  return gallery_images, str(new_seed), int(new_seed)
199
 
200
+ # 初始化
201
  init_app()
202
 
203
+ # ==================== AoTI 优化 ====================
204
  if pipe is not None:
205
  try:
 
206
  pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
207
  spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
208
  except Exception as e:
209
  print(f"Warning: Failed to load AoTI blocks: {e}")
210
 
211
+ with gr.Blocks(title="Z-Image Demo") as demo:
 
 
 
212
  gr.Markdown(
213
  """<div align="center">
214
  # ⚡️ Z-Image Generation Demo
 
219
 
220
  with gr.Row():
221
  with gr.Column(scale=1):
222
+ prompt_input = gr.Textbox(label="Prompt (提示词)", lines=3, placeholder="输入图像描述...")
223
+ negative_prompt_input = gr.Textbox(label="Negative Prompt (负面提示词)", lines=2, placeholder="输入不想要出现的内容...")
224
 
225
  with gr.Row():
226
  width = gr.Slider(label="Width (宽)", minimum=512, maximum=2048, value=1024, step=64)
 
244
  output_gallery = gr.Gallery(
245
  label="Generated Images", columns=1, rows=1, height=600, object_fit="contain", format="png", interactive=False
246
  )
247
+ used_seed = gr.Textbox(label="Seed Used (使用的种子)", interactive=False)
248
 
249
  generate_btn.click(
250
  generate,