multimodalart HF Staff commited on
Commit
78f66e5
·
verified ·
1 Parent(s): f8f1d1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -186,14 +186,14 @@ except Exception as e:
186
  print(f"Warning: Could not load Lightning LoRA weights: {e}")
187
  print("Continuing with base model...")
188
 
189
- spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
190
 
191
  # Apply the same optimizations from the first version
192
- # pipe.transformer.__class__ = QwenImageTransformer2DModel
193
- # pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
194
 
195
  # --- Ahead-of-time compilation ---
196
- # optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt="prompt")
197
 
198
  # --- UI Constants and Helpers ---
199
  MAX_SEED = np.iinfo(np.int32).max
 
186
  print(f"Warning: Could not load Lightning LoRA weights: {e}")
187
  print("Continuing with base model...")
188
 
189
+ #spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
190
 
191
  # Apply the same optimizations from the first version
192
+ pipe.transformer.__class__ = QwenImageTransformer2DModel
193
+ pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
194
 
195
  # --- Ahead-of-time compilation ---
196
+ optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt="prompt")
197
 
198
  # --- UI Constants and Helpers ---
199
  MAX_SEED = np.iinfo(np.int32).max