linoyts HF Staff commited on
Commit
8cde3cb
·
verified ·
1 Parent(s): c10249f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -6
app.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
  import spaces
6
 
7
  from PIL import Image
8
- from diffusers import QwenImagePipeline
9
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
10
  from optimization import optimize_pipeline_
11
  import os
@@ -333,8 +333,30 @@ def rewrite(input_prompt):
333
  dtype = torch.bfloat16
334
  device = "cuda" if torch.cuda.is_available() else "cpu"
335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
  # Load the model pipeline
337
- pipe = QwenImagePipeline.from_pretrained("Qwen/Qwen-Image-2512", torch_dtype=dtype).to(device)
 
 
 
 
338
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
339
 
340
  # --- Ahead-of-time compilation ---
@@ -433,7 +455,13 @@ css = """
433
 
434
  with gr.Blocks(css=css) as demo:
435
  with gr.Column(elem_id="col-container"):
436
- gr.Markdown('<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" alt="Qwen-Image Logo" width="400" style="display: block; margin: 0 auto;">')
 
 
 
 
 
 
437
  gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image) to run locally with ComfyUI or diffusers.")
438
  with gr.Row():
439
  prompt = gr.Text(
@@ -473,15 +501,15 @@ with gr.Blocks(css=css) as demo:
473
  minimum=0.0,
474
  maximum=10.0,
475
  step=0.1,
476
- value=4.0,
477
  )
478
 
479
  num_inference_steps = gr.Slider(
480
  label="Number of inference steps",
481
  minimum=1,
482
- maximum=50,
483
  step=1,
484
- value=50,
485
  )
486
 
487
  gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
 
5
  import spaces
6
 
7
  from PIL import Image
8
+ from diffusers import QwenImagePipeline, FlowMatchEulerDiscreteScheduler
9
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
10
  from optimization import optimize_pipeline_
11
  import os
 
333
  dtype = torch.bfloat16
334
  device = "cuda" if torch.cuda.is_available() else "cpu"
335
 
336
+ scheduler_config = {
337
+ "base_image_seq_len": 256,
338
+ "base_shift": math.log(3), # We use shift=3 in distillation
339
+ "invert_sigmas": False,
340
+ "max_image_seq_len": 8192,
341
+ "max_shift": math.log(3), # We use shift=3 in distillation
342
+ "num_train_timesteps": 1000,
343
+ "shift": 1.0,
344
+ "shift_terminal": None, # set shift_terminal to None
345
+ "stochastic_sampling": False,
346
+ "time_shift_type": "exponential",
347
+ "use_beta_sigmas": False,
348
+ "use_dynamic_shifting": True,
349
+ "use_exponential_sigmas": False,
350
+ "use_karras_sigmas": False,
351
+ }
352
+ scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
353
+
354
  # Load the model pipeline
355
+ pipe = QwenImagePipeline.from_pretrained("Qwen/Qwen-Image-2512", scheduler=scheduler, torch_dtype=dtype).to(device)
356
+ pipe.load_lora_weights(
357
+ "lightx2v/Qwen-Image-Lightning", weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors"
358
+ )
359
+ pipe.fuse_lora()
360
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
361
 
362
  # --- Ahead-of-time compilation ---
 
455
 
456
  with gr.Blocks(css=css) as demo:
457
  with gr.Column(elem_id="col-container"):
458
+ #gr.Markdown('<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" alt="Qwen-Image Logo" width="400" style="display: block; margin: 0 auto;">')
459
+ gr.HTML("""
460
+ <div id="logo-title">
461
+ <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_logo.png" alt="Qwen-Image Logo" width="400" style="display: block; margin: 0 auto;">
462
+ <h2 style="font-style: italic;color: #5b47d1;margin-top: -33px !important;margin-left: 133px;">Fast, 4-steps with Lightining LoRA</h2>
463
+ </div>
464
+ """)
465
  gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image) to run locally with ComfyUI or diffusers.")
466
  with gr.Row():
467
  prompt = gr.Text(
 
501
  minimum=0.0,
502
  maximum=10.0,
503
  step=0.1,
504
+ value=1.0,
505
  )
506
 
507
  num_inference_steps = gr.Slider(
508
  label="Number of inference steps",
509
  minimum=1,
510
+ maximum=20,
511
  step=1,
512
+ value=4,
513
  )
514
 
515
  gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)