prithivMLmods commited on
Commit
72c4879
·
verified ·
1 Parent(s): e603549

update app

Browse files
Files changed (1) hide show
  1. app.py +10 -36
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import gradio as gr
3
  import numpy as np
 
4
  import torch
5
  import random
6
  from PIL import Image
@@ -8,18 +9,6 @@ from typing import Iterable
8
  from gradio.themes import Soft
9
  from gradio.themes.utils import colors, fonts, sizes
10
 
11
- # --- Handle optional 'spaces' import for local compatibility ---
12
- try:
13
- import spaces
14
- except ImportError:
15
- class spaces:
16
- @staticmethod
17
- def GPU(duration=60):
18
- def decorator(func):
19
- return func
20
- return decorator
21
-
22
- # --- Custom Theme Setup ---
23
  colors.steel_blue = colors.Color(
24
  name="steel_blue",
25
  c50="#EBF3F8",
@@ -88,17 +77,14 @@ class SteelBlueTheme(Soft):
88
 
89
  steel_blue_theme = SteelBlueTheme()
90
 
91
- # --- Hardware Setup ---
92
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
93
  dtype = torch.bfloat16
94
 
95
- # --- Imports for Custom Pipeline ---
96
  from diffusers import FlowMatchEulerDiscreteScheduler
97
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
98
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
99
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
100
 
101
- # --- Model Initialization ---
102
  print("Loading Qwen Image Edit Pipeline...")
103
  pipe = QwenImageEditPlusPipeline.from_pretrained(
104
  "Qwen/Qwen-Image-Edit-2509",
@@ -111,32 +97,26 @@ pipe = QwenImageEditPlusPipeline.from_pretrained(
111
  torch_dtype=dtype
112
  ).to(device)
113
 
114
- # 1. Load and Fuse Lightning (for speed)
115
  print("Loading and Fusing Lightning LoRA...")
116
  pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
117
  weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors",
118
  adapter_name="lightning")
119
  pipe.fuse_lora(adapter_names=["lightning"], lora_scale=1.0)
120
 
121
- # 2. Load Task Specific LoRAs
122
  print("Loading Task Adapters...")
123
 
124
- # Texture
125
  pipe.load_lora_weights("tarn59/apply_texture_qwen_image_edit_2509",
126
  weight_name="apply_texture_v2_qwen_image_edit_2509.safetensors",
127
  adapter_name="texture")
128
 
129
- # Fusion (Fuse-Objects)
130
  pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Fusion",
131
  weight_name="溶图.safetensors",
132
  adapter_name="fusion")
133
 
134
- # Face Swap
135
  pipe.load_lora_weights("Alissonerdx/BFS-Best-Face-Swap",
136
  weight_name="bfs_head_v3_qwen_image_edit_2509.safetensors",
137
  adapter_name="faceswap")
138
 
139
- # Attempt Flash Attention 3
140
  try:
141
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
142
  print("Flash Attention 3 Processor set successfully.")
@@ -166,7 +146,7 @@ def update_dimensions_on_upload(image):
166
 
167
  return new_width, new_height
168
 
169
- @spaces.GPU(duration=60)
170
  def infer(
171
  image_1,
172
  image_2,
@@ -182,7 +162,6 @@ def infer(
182
  raise gr.Error("Please upload both images for Fusion/Texture/FaceSwap tasks.")
183
 
184
  if not prompt:
185
- # Add default prompts based on mode if user leaves empty (optional helper)
186
  if lora_adapter == "Face-Swap":
187
  prompt = "Swap the face."
188
  elif lora_adapter == "Texture Edit":
@@ -190,8 +169,6 @@ def infer(
190
  elif lora_adapter == "Fuse-Objects":
191
  prompt = "Fuse object into background."
192
 
193
- # Switch Adapters
194
- # Note: Lightning is already fused, so we just enable the style adapter
195
  adapters_map = {
196
  "Texture Edit": "texture",
197
  "Fuse-Objects": "fusion",
@@ -211,15 +188,13 @@ def infer(
211
  generator = torch.Generator(device=device).manual_seed(seed)
212
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
213
 
214
- # Prepare Images
215
  img1_pil = image_1.convert("RGB")
216
  img2_pil = image_2.convert("RGB")
217
 
218
- # Calculate dimensions based on the primary image (Image 1)
219
  width, height = update_dimensions_on_upload(img1_pil)
220
 
221
  result = pipe(
222
- image=[img1_pil, img2_pil], # Pass both images
223
  prompt=prompt,
224
  negative_prompt=negative_prompt,
225
  height=height,
@@ -231,9 +206,8 @@ def infer(
231
 
232
  return result, seed
233
 
234
- @spaces.GPU(duration=60)
235
  def infer_example(image_1, image_2, prompt, lora_adapter):
236
- # Wrapper for examples that sets defaults
237
  if image_1 is None or image_2 is None:
238
  return None, 0
239
  result, seed = infer(
@@ -241,10 +215,10 @@ def infer_example(image_1, image_2, prompt, lora_adapter):
241
  image_2.convert("RGB"),
242
  prompt,
243
  lora_adapter,
244
- 0, # seed
245
- True, # randomize
246
- 1.0, # guidance
247
- 4 # steps (Lightning optimized)
248
  )
249
  return result, seed
250
 
@@ -261,7 +235,7 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
261
  gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast-Fusion**", elem_id="main-title")
262
  gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2509) model.")
263
  with gr.Row(equal_height=True):
264
- # Left Column: Inputs
265
  with gr.Column(scale=1):
266
  with gr.Row():
267
  image_1 = gr.Image(label="Base / Background / Body", type="pil", height=290)
@@ -301,7 +275,7 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
301
  outputs=[output_image, seed],
302
  fn=infer_example,
303
  cache_examples=False,
304
- label="Examples (Ensure files exist in 'examples/' folder)"
305
  )
306
 
307
  run_button.click(
 
1
  import os
2
  import gradio as gr
3
  import numpy as np
4
+ import spaces
5
  import torch
6
  import random
7
  from PIL import Image
 
9
  from gradio.themes import Soft
10
  from gradio.themes.utils import colors, fonts, sizes
11
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  colors.steel_blue = colors.Color(
13
  name="steel_blue",
14
  c50="#EBF3F8",
 
77
 
78
  steel_blue_theme = SteelBlueTheme()
79
 
 
80
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
81
  dtype = torch.bfloat16
82
 
 
83
  from diffusers import FlowMatchEulerDiscreteScheduler
84
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
85
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
86
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
87
 
 
88
  print("Loading Qwen Image Edit Pipeline...")
89
  pipe = QwenImageEditPlusPipeline.from_pretrained(
90
  "Qwen/Qwen-Image-Edit-2509",
 
97
  torch_dtype=dtype
98
  ).to(device)
99
 
 
100
  print("Loading and Fusing Lightning LoRA...")
101
  pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
102
  weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors",
103
  adapter_name="lightning")
104
  pipe.fuse_lora(adapter_names=["lightning"], lora_scale=1.0)
105
 
 
106
  print("Loading Task Adapters...")
107
 
 
108
  pipe.load_lora_weights("tarn59/apply_texture_qwen_image_edit_2509",
109
  weight_name="apply_texture_v2_qwen_image_edit_2509.safetensors",
110
  adapter_name="texture")
111
 
 
112
  pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Fusion",
113
  weight_name="溶图.safetensors",
114
  adapter_name="fusion")
115
 
 
116
  pipe.load_lora_weights("Alissonerdx/BFS-Best-Face-Swap",
117
  weight_name="bfs_head_v3_qwen_image_edit_2509.safetensors",
118
  adapter_name="faceswap")
119
 
 
120
  try:
121
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
122
  print("Flash Attention 3 Processor set successfully.")
 
146
 
147
  return new_width, new_height
148
 
149
+ @spaces.GPU(duration=30)
150
  def infer(
151
  image_1,
152
  image_2,
 
162
  raise gr.Error("Please upload both images for Fusion/Texture/FaceSwap tasks.")
163
 
164
  if not prompt:
 
165
  if lora_adapter == "Face-Swap":
166
  prompt = "Swap the face."
167
  elif lora_adapter == "Texture Edit":
 
169
  elif lora_adapter == "Fuse-Objects":
170
  prompt = "Fuse object into background."
171
 
 
 
172
  adapters_map = {
173
  "Texture Edit": "texture",
174
  "Fuse-Objects": "fusion",
 
188
  generator = torch.Generator(device=device).manual_seed(seed)
189
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
190
 
 
191
  img1_pil = image_1.convert("RGB")
192
  img2_pil = image_2.convert("RGB")
193
 
 
194
  width, height = update_dimensions_on_upload(img1_pil)
195
 
196
  result = pipe(
197
+ image=[img1_pil, img2_pil],
198
  prompt=prompt,
199
  negative_prompt=negative_prompt,
200
  height=height,
 
206
 
207
  return result, seed
208
 
209
+ @spaces.GPU(duration=30)
210
  def infer_example(image_1, image_2, prompt, lora_adapter):
 
211
  if image_1 is None or image_2 is None:
212
  return None, 0
213
  result, seed = infer(
 
215
  image_2.convert("RGB"),
216
  prompt,
217
  lora_adapter,
218
+ 0,
219
+ True,
220
+ 1.0,
221
+ 4
222
  )
223
  return result, seed
224
 
 
235
  gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast-Fusion**", elem_id="main-title")
236
  gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2509) model.")
237
  with gr.Row(equal_height=True):
238
+
239
  with gr.Column(scale=1):
240
  with gr.Row():
241
  image_1 = gr.Image(label="Base / Background / Body", type="pil", height=290)
 
275
  outputs=[output_image, seed],
276
  fn=infer_example,
277
  cache_examples=False,
278
+ label="Examples"
279
  )
280
 
281
  run_button.click(