prithivMLmods commited on
Commit
124d2a8
·
verified ·
1 Parent(s): 5e7e96d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -71
app.py CHANGED
@@ -69,7 +69,7 @@ steel_blue_theme = SteelBlueTheme()
69
 
70
  # --- Model Loading ---
71
  from diffusers import FlowMatchEulerDiscreteScheduler
72
- from optimization import optimize_pipeline_ # Assuming this is a custom file
73
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
74
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
75
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
@@ -102,42 +102,10 @@ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight",
102
  weight_name="Qwen-Edit-Relight.safetensors",
103
  adapter_name="relight")
104
 
105
- pipe.transformer.__class__ = QwenImageTransformer2DModel
106
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
 
107
 
108
- optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
109
-
110
- # --- Helper Function for Aspect Ratio (Corrected) ---
111
- @torch.no_grad()
112
- def update_dimensions_on_upload(image):
113
- # *** FIX: This function now correctly preserves aspect ratio for all image sizes. ***
114
- if image is None:
115
- return 1024, 1024 # Default for no image
116
-
117
- original_width, original_height = image.size
118
- max_dim = 1024
119
-
120
- if original_width > max_dim or original_height > max_dim:
121
- # If the image is larger than the max dimension, scale it down
122
- if original_width > original_height:
123
- new_width = max_dim
124
- new_height = int(max_dim * original_height / original_width)
125
- else:
126
- new_height = max_dim
127
- new_width = int(max_dim * original_width / original_height)
128
- else:
129
- # If the image is smaller, use its original dimensions
130
- new_width = original_width
131
- new_height = original_height
132
-
133
- # Ensure final dimensions are multiples of 8 for model compatibility
134
- final_width = (new_width // 8) * 8
135
- final_height = (new_height // 8) * 8
136
-
137
- return final_width, final_height
138
-
139
-
140
- # --- Main Inference Function ---
141
  @spaces.GPU
142
  def infer(
143
  input_image,
@@ -147,8 +115,6 @@ def infer(
147
  randomize_seed,
148
  guidance_scale,
149
  steps,
150
- width,
151
- height,
152
  progress=gr.Progress(track_tqdm=True)
153
  ):
154
  if input_image is None:
@@ -163,16 +129,19 @@ def infer(
163
  pipe.set_adapters(["light-restoration"], adapter_weights=[1.0])
164
  elif lora_adapter == "Relight":
165
  pipe.set_adapters(["relight"], adapter_weights=[1.0])
166
-
167
  if randomize_seed:
168
  seed = random.randint(0, MAX_SEED)
169
-
170
  generator = torch.Generator(device=device).manual_seed(seed)
171
-
172
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
173
-
 
 
 
 
174
  result = pipe(
175
- image=input_image.convert("RGB"),
176
  prompt=prompt,
177
  negative_prompt=negative_prompt,
178
  height=height,
@@ -180,22 +149,19 @@ def infer(
180
  num_inference_steps=steps,
181
  generator=generator,
182
  true_cfg_scale=guidance_scale,
183
- num_images_per_prompt=1,
184
  ).images[0]
185
 
186
  return result, seed
187
 
188
- # --- Wrapper for Examples ---
189
  @spaces.GPU
190
  def infer_example(input_image, prompt, lora_adapter):
191
  input_pil = input_image.convert("RGB")
192
- # Calculate correct aspect ratio for the example image using the corrected function
193
- width, height = update_dimensions_on_upload(input_pil)
194
  # Set reasonable default values for example inference
195
- guidance_scale = 1.0
196
- steps = 4
197
  # Call the main infer function
198
- result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps, width, height)
199
  return result, seed
200
 
201
  # --- UI Layout ---
@@ -211,36 +177,33 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
211
  with gr.Column(elem_id="col-container"):
212
  gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast**", elem_id="main-title")
213
  gr.Markdown("Perform diverse image edits using specialized LoRA adapters for the Qwen-Image-Edit model.")
214
-
215
  with gr.Row(equal_height=True):
216
  with gr.Column():
217
- input_image = gr.Image(label="Upload Image", type="pil")
218
-
219
  lora_adapter = gr.Dropdown(
220
  label="Choose Editing Style",
221
  choices=["Photo-to-Anime", "Multiple-Angles", "Light-Restoration", "Relight"],
222
  value="Photo-to-Anime"
223
  )
224
-
225
  prompt = gr.Text(
226
  label="Edit Prompt",
227
  show_label=True,
228
  placeholder="e.g., transform into anime",
229
  )
230
-
231
  run_button = gr.Button("Run", variant="primary")
232
-
233
  with gr.Accordion("⚙️ Advanced Settings", open=False):
234
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
235
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
236
- guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
237
- steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
238
- # Hidden sliders to hold image dimensions
239
- height = gr.Slider(label="Height", minimum=256, maximum=1024, step=8, value=1024, visible=False)
240
- width = gr.Slider(label="Width", minimum=256, maximum=1024, step=8, value=1024, visible=False)
241
-
242
  with gr.Column():
243
- output_image = gr.Image(label="Output Image", interactive=False, format="png", height=290)
244
 
245
  gr.Examples(
246
  examples=[
@@ -259,21 +222,17 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
259
  inputs=[input_image, prompt, lora_adapter],
260
  outputs=[output_image, seed],
261
  fn=infer_example,
262
- cache_examples=False,
263
  label="Examples"
264
  )
265
-
266
- # --- Event Handlers ---
267
  run_button.click(
268
  fn=infer,
269
- inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps, width, height],
270
  outputs=[output_image, seed]
271
  )
272
 
273
- input_image.upload(
274
- fn=update_dimensions_on_upload,
275
- inputs=[input_image],
276
- outputs=[width, height]
277
- )
278
 
279
  demo.launch()
 
69
 
70
  # --- Model Loading ---
71
  from diffusers import FlowMatchEulerDiscreteScheduler
72
+ # from optimization import optimize_pipeline_ # Assuming this is a custom file, if not available, comment out the call
73
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
74
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
75
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
 
102
  weight_name="Qwen-Edit-Relight.safetensors",
103
  adapter_name="relight")
104
 
 
105
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
106
+ MAX_SEED = np.iinfo(np.int32).max
107
 
108
+ # --- Main Inference Function (Corrected) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  @spaces.GPU
110
  def infer(
111
  input_image,
 
115
  randomize_seed,
116
  guidance_scale,
117
  steps,
 
 
118
  progress=gr.Progress(track_tqdm=True)
119
  ):
120
  if input_image is None:
 
129
  pipe.set_adapters(["light-restoration"], adapter_weights=[1.0])
130
  elif lora_adapter == "Relight":
131
  pipe.set_adapters(["relight"], adapter_weights=[1.0])
132
+
133
  if randomize_seed:
134
  seed = random.randint(0, MAX_SEED)
135
+
136
  generator = torch.Generator(device=device).manual_seed(seed)
 
137
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
138
+
139
+ # *** FIX: Get dimensions directly from the input image to preserve aspect ratio ***
140
+ original_image = input_image.convert("RGB")
141
+ width, height = original_image.size
142
+
143
  result = pipe(
144
+ image=original_image,
145
  prompt=prompt,
146
  negative_prompt=negative_prompt,
147
  height=height,
 
149
  num_inference_steps=steps,
150
  generator=generator,
151
  true_cfg_scale=guidance_scale,
 
152
  ).images[0]
153
 
154
  return result, seed
155
 
156
+ # --- Wrapper for Examples (Corrected) ---
157
  @spaces.GPU
158
  def infer_example(input_image, prompt, lora_adapter):
159
  input_pil = input_image.convert("RGB")
 
 
160
  # Set reasonable default values for example inference
161
+ guidance_scale = 4.0
162
+ steps = 25
163
  # Call the main infer function
164
+ result, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
165
  return result, seed
166
 
167
  # --- UI Layout ---
 
177
  with gr.Column(elem_id="col-container"):
178
  gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast**", elem_id="main-title")
179
  gr.Markdown("Perform diverse image edits using specialized LoRA adapters for the Qwen-Image-Edit model.")
180
+
181
  with gr.Row(equal_height=True):
182
  with gr.Column():
183
+ input_image = gr.Image(label="Upload Image", type="pil", height=400)
184
+
185
  lora_adapter = gr.Dropdown(
186
  label="Choose Editing Style",
187
  choices=["Photo-to-Anime", "Multiple-Angles", "Light-Restoration", "Relight"],
188
  value="Photo-to-Anime"
189
  )
190
+
191
  prompt = gr.Text(
192
  label="Edit Prompt",
193
  show_label=True,
194
  placeholder="e.g., transform into anime",
195
  )
196
+
197
  run_button = gr.Button("Run", variant="primary")
198
+
199
  with gr.Accordion("⚙️ Advanced Settings", open=False):
200
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
201
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
202
+ guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=4.0)
203
+ steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=25)
204
+
 
 
 
205
  with gr.Column():
206
+ output_image = gr.Image(label="Output Image", interactive=False, format="png", height=400)
207
 
208
  gr.Examples(
209
  examples=[
 
222
  inputs=[input_image, prompt, lora_adapter],
223
  outputs=[output_image, seed],
224
  fn=infer_example,
225
+ cache_examples="lazy",
226
  label="Examples"
227
  )
228
+
229
+ # --- Event Handlers (Corrected) ---
230
  run_button.click(
231
  fn=infer,
232
+ inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
233
  outputs=[output_image, seed]
234
  )
235
 
236
+ # No longer need the upload handler for dimensions
 
 
 
 
237
 
238
  demo.launch()