Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -333,9 +333,11 @@ scheduler_config = {
|
|
| 333 |
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
|
| 334 |
|
| 335 |
# Load the model pipeline
|
| 336 |
-
pipe = QwenImagePipeline.from_pretrained("Qwen/Qwen-Image-2512",
|
|
|
|
|
|
|
| 337 |
pipe.load_lora_weights(
|
| 338 |
-
"lightx2v/Qwen-Image-Lightning", weight_name="Qwen-Image-Lightning-
|
| 339 |
)
|
| 340 |
pipe.fuse_lora()
|
| 341 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
|
@@ -374,7 +376,7 @@ def infer(
|
|
| 374 |
randomize_seed=False,
|
| 375 |
aspect_ratio="16:9",
|
| 376 |
guidance_scale=1.0,
|
| 377 |
-
num_inference_steps=
|
| 378 |
prompt_enhance=False,
|
| 379 |
progress=gr.Progress(track_tqdm=True),
|
| 380 |
):
|
|
@@ -440,7 +442,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 440 |
gr.HTML("""
|
| 441 |
<div id="logo-title">
|
| 442 |
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" alt="Qwen-Image Logo" width="400" style="display: block; margin: 0 auto;">
|
| 443 |
-
<h2 style="font-style: italic;color: #5b47d1;margin-top: -33px !important;text-align: center;">Fast,
|
| 444 |
</div>
|
| 445 |
""")
|
| 446 |
gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image) to run locally with ComfyUI or diffusers.")
|
|
@@ -490,7 +492,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 490 |
minimum=1,
|
| 491 |
maximum=20,
|
| 492 |
step=1,
|
| 493 |
-
value=
|
| 494 |
)
|
| 495 |
|
| 496 |
gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|
|
|
|
| 333 |
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
|
| 334 |
|
| 335 |
# Load the model pipeline
|
| 336 |
+
pipe = QwenImagePipeline.from_pretrained("Qwen/Qwen-Image-2512",
|
| 337 |
+
# scheduler=scheduler,
|
| 338 |
+
torch_dtype=dtype).to(device)
|
| 339 |
pipe.load_lora_weights(
|
| 340 |
+
"lightx2v/Qwen-Image-2512-Lightning", weight_name="Qwen-Image-2512-Lightning-4steps-V1.0-bf16.safetensors"
|
| 341 |
)
|
| 342 |
pipe.fuse_lora()
|
| 343 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
|
|
|
| 376 |
randomize_seed=False,
|
| 377 |
aspect_ratio="16:9",
|
| 378 |
guidance_scale=1.0,
|
| 379 |
+
num_inference_steps=4,
|
| 380 |
prompt_enhance=False,
|
| 381 |
progress=gr.Progress(track_tqdm=True),
|
| 382 |
):
|
|
|
|
| 442 |
gr.HTML("""
|
| 443 |
<div id="logo-title">
|
| 444 |
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" alt="Qwen-Image Logo" width="400" style="display: block; margin: 0 auto;">
|
| 445 |
+
<h2 style="font-style: italic;color: #5b47d1;margin-top: -33px !important;text-align: center;">Fast, 4-steps with Lightx2v Lightining LoRA</h2>
|
| 446 |
</div>
|
| 447 |
""")
|
| 448 |
gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image) to run locally with ComfyUI or diffusers.")
|
|
|
|
| 492 |
minimum=1,
|
| 493 |
maximum=20,
|
| 494 |
step=1,
|
| 495 |
+
value=4,
|
| 496 |
)
|
| 497 |
|
| 498 |
gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|