multimodalart HF Staff commited on
Commit
836f311
·
verified ·
1 Parent(s): f4b8086

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -10
app.py CHANGED
@@ -7,9 +7,7 @@ import spaces
7
  from PIL import Image
8
  from diffusers import FlowMatchEulerDiscreteScheduler
9
  from optimization import optimize_pipeline_
10
- from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
11
- from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
12
- from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
13
 
14
  import math
15
  from huggingface_hub import hf_hub_download
@@ -27,11 +25,6 @@ from typing import Optional, Tuple, Any
27
  dtype = torch.bfloat16
28
  device = "cuda" if torch.cuda.is_available() else "cpu"
29
 
30
- pipe = QwenImageEditPlusPipeline.from_pretrained(
31
- "Qwen/Qwen-Image-Edit-2509",
32
- torch_dtype=dtype
33
- ).to(device)
34
-
35
  scheduler_config = {
36
  "base_image_seq_len": 256,
37
  "base_shift": math.log(3),
@@ -77,8 +70,7 @@ pipe.unload_lora_weights()
77
 
78
  #spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
79
 
80
- pipe.transformer.__class__ = QwenImageTransformer2DModel
81
- pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
82
 
83
  optimize_pipeline_(
84
  pipe,
 
7
  from PIL import Image
8
  from diffusers import FlowMatchEulerDiscreteScheduler
9
  from optimization import optimize_pipeline_
10
+ from diffusers import QwenImageEditPlusPipeline
 
 
11
 
12
  import math
13
  from huggingface_hub import hf_hub_download
 
25
  dtype = torch.bfloat16
26
  device = "cuda" if torch.cuda.is_available() else "cpu"
27
 
 
 
 
 
 
28
  scheduler_config = {
29
  "base_image_seq_len": 256,
30
  "base_shift": math.log(3),
 
70
 
71
  #spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
72
 
73
+ pipe.transformer.set_attention_backend("_flash_3_hub")
 
74
 
75
  optimize_pipeline_(
76
  pipe,