yeq6x commited on
Commit
a851ecd
·
1 Parent(s): 7449cb1

Refactor app.py to consolidate pipeline loading and adapter management into a single shared pipeline for improved efficiency and clarity

Browse files
Files changed (1) hide show
  1. app.py +14 -24
app.py CHANGED
@@ -47,37 +47,21 @@ scheduler_config = {
47
  # Initialize scheduler with Lightning config
48
  scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
49
 
50
- # Load Stage 1 pipeline (Anime -> Base Body)
51
- pipe_stage1 = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2511",
52
  scheduler=scheduler,
53
  torch_dtype=dtype).to(device)
 
54
  # Load 4-step Lightning LoRA
55
- pipe_stage1.load_lora_weights(
56
  "lightx2v/Qwen-Image-Edit-2511-Lightning",
57
  weight_name="Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors",
58
  adapter_name="lightning"
59
  )
60
  # Load Stage 1 LoRA
61
- pipe_stage1.load_lora_weights(STAGE1_LORA_REPO, weight_name=STAGE1_LORA_WEIGHT, adapter_name="stage1")
62
- # Set both adapters with equal weights and fuse
63
- pipe_stage1.set_adapters(["lightning", "stage1"], adapter_weights=[1.0, 1.0])
64
- pipe_stage1.fuse_lora()
65
-
66
- # Load Stage 2 pipeline (Base Body -> Guide Body)
67
- pipe_stage2 = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2511",
68
- scheduler=scheduler,
69
- torch_dtype=dtype).to(device)
70
- # Load 4-step Lightning LoRA
71
- pipe_stage2.load_lora_weights(
72
- "lightx2v/Qwen-Image-Edit-2511-Lightning",
73
- weight_name="Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors",
74
- adapter_name="lightning"
75
- )
76
  # Load Stage 2 LoRA
77
- pipe_stage2.load_lora_weights(STAGE2_LORA_REPO, weight_name=STAGE2_LORA_WEIGHT, adapter_name="stage2")
78
- # Set both adapters with equal weights and fuse
79
- pipe_stage2.set_adapters(["lightning", "stage2"], adapter_weights=[1.0, 1.0])
80
- pipe_stage2.fuse_lora()
81
 
82
  # # Apply the same optimizations from the first version
83
  # pipe.transformer.__class__ = QwenImageTransformer2DModel
@@ -149,7 +133,10 @@ def infer(
149
  print(f"Prompt: '{STAGE1_PROMPT}'")
150
  print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
151
 
152
- stage1_images = pipe_stage1(
 
 
 
153
  image=pil_images if len(pil_images) > 0 else None,
154
  prompt=STAGE1_PROMPT,
155
  height=height,
@@ -165,10 +152,13 @@ def infer(
165
  print(f"[Stage 2] Converting to guide body...")
166
  print(f"Prompt: '{STAGE2_PROMPT}'")
167
 
 
 
 
168
  # Use same seed for stage 2
169
  generator = torch.Generator(device=device).manual_seed(seed)
170
 
171
- stage2_images = pipe_stage2(
172
  image=stage1_images,
173
  prompt=STAGE2_PROMPT,
174
  height=height,
 
47
  # Initialize scheduler with Lightning config
48
  scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
49
 
50
+ # Load single shared pipeline
51
+ pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2511",
52
  scheduler=scheduler,
53
  torch_dtype=dtype).to(device)
54
+ # Load all LoRAs but don't fuse yet
55
  # Load 4-step Lightning LoRA
56
+ pipe.load_lora_weights(
57
  "lightx2v/Qwen-Image-Edit-2511-Lightning",
58
  weight_name="Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors",
59
  adapter_name="lightning"
60
  )
61
  # Load Stage 1 LoRA
62
+ pipe.load_lora_weights(STAGE1_LORA_REPO, weight_name=STAGE1_LORA_WEIGHT, adapter_name="stage1")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  # Load Stage 2 LoRA
64
+ pipe.load_lora_weights(STAGE2_LORA_REPO, weight_name=STAGE2_LORA_WEIGHT, adapter_name="stage2")
 
 
 
65
 
66
  # # Apply the same optimizations from the first version
67
  # pipe.transformer.__class__ = QwenImageTransformer2DModel
 
133
  print(f"Prompt: '{STAGE1_PROMPT}'")
134
  print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
135
 
136
+ # Set Stage 1 adapters
137
+ pipe.set_adapters(["lightning", "stage1"], adapter_weights=[1.0, 1.0])
138
+
139
+ stage1_images = pipe(
140
  image=pil_images if len(pil_images) > 0 else None,
141
  prompt=STAGE1_PROMPT,
142
  height=height,
 
152
  print(f"[Stage 2] Converting to guide body...")
153
  print(f"Prompt: '{STAGE2_PROMPT}'")
154
 
155
+ # Set Stage 2 adapters
156
+ pipe.set_adapters(["lightning", "stage2"], adapter_weights=[1.0, 1.0])
157
+
158
  # Use same seed for stage 2
159
  generator = torch.Generator(device=device).manual_seed(seed)
160
 
161
+ stage2_images = pipe(
162
  image=stage1_images,
163
  prompt=STAGE2_PROMPT,
164
  height=height,