prithivMLmods commited on
Commit
e1750df
·
verified ·
1 Parent(s): f8d9f42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -16
app.py CHANGED
@@ -222,13 +222,18 @@ def infer(
222
  # Handle different Rerun SDK versions robustly
223
  rec = None
224
  if hasattr(rr, "new_recording"):
 
225
  rec = rr.new_recording(application_id="Qwen-Image-Edit", recording_id=run_id)
226
  elif hasattr(rr, "RecordingStream"):
 
227
  rec = rr.RecordingStream(application_id="Qwen-Image-Edit", recording_id=run_id)
228
  else:
 
229
  rr.init("Qwen-Image-Edit", recording_id=run_id, spawn=False)
230
  rec = rr
231
 
 
 
232
  rec.log("images/original", rr.Image(np.array(original_image)))
233
  rec.log("images/edited", rr.Image(np.array(result_image)))
234
 
@@ -236,11 +241,7 @@ def infer(
236
  rrd_path = os.path.join(TMP_DIR, f"{run_id}.rrd")
237
  rec.save(rrd_path)
238
 
239
- # Save Result Image for Download Box
240
- image_path = os.path.join(TMP_DIR, f"{run_id}.png")
241
- result_image.save(image_path)
242
-
243
- return rrd_path, seed, image_path
244
 
245
  except Exception as e:
246
  raise e
@@ -251,14 +252,14 @@ def infer(
251
  @spaces.GPU
252
  def infer_example(input_image, prompt, lora_adapter):
253
  if input_image is None:
254
- return None, 0, None
255
 
256
  input_pil = input_image.convert("RGB")
257
  guidance_scale = 1.0
258
  steps = 4
259
-
260
- result_rrd, seed, image_path = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
261
- return result_rrd, seed, image_path
262
 
263
  css="""
264
  #col-container {
@@ -273,9 +274,9 @@ with gr.Blocks() as demo:
273
  gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
274
  gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) model.")
275
 
276
- with gr.Row(equal_height=True):
277
  with gr.Column():
278
- input_image = gr.Image(label="Upload Image", type="pil", height=295)
279
 
280
  prompt = gr.Text(
281
  label="Edit Prompt",
@@ -291,8 +292,6 @@ with gr.Blocks() as demo:
291
  label="Rerun Visualization",
292
  height=353
293
  )
294
-
295
- result_file = gr.File(label="Download Edited Image")
296
 
297
  with gr.Row():
298
  lora_adapter = gr.Dropdown(
@@ -300,7 +299,6 @@ with gr.Blocks() as demo:
300
  choices=list(ADAPTER_SPECS.keys()),
301
  value="Photo-to-Anime"
302
  )
303
-
304
  with gr.Accordion("Advanced Settings", open=False, visible=False):
305
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
306
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
@@ -313,7 +311,7 @@ with gr.Blocks() as demo:
313
  ["examples/A.jpeg", "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
314
  ],
315
  inputs=[input_image, prompt, lora_adapter],
316
- outputs=[rerun_output, seed, result_file],
317
  fn=infer_example,
318
  cache_examples=False,
319
  label="Examples"
@@ -324,7 +322,7 @@ with gr.Blocks() as demo:
324
  run_button.click(
325
  fn=infer,
326
  inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
327
- outputs=[rerun_output, seed, result_file]
328
  )
329
 
330
  if __name__ == "__main__":
 
222
  # Handle different Rerun SDK versions robustly
223
  rec = None
224
  if hasattr(rr, "new_recording"):
225
+ # Newer Rerun versions
226
  rec = rr.new_recording(application_id="Qwen-Image-Edit", recording_id=run_id)
227
  elif hasattr(rr, "RecordingStream"):
228
+ # Alternative direct class instantiation
229
  rec = rr.RecordingStream(application_id="Qwen-Image-Edit", recording_id=run_id)
230
  else:
231
+ # Fallback for older versions or simple scripts (Global State)
232
  rr.init("Qwen-Image-Edit", recording_id=run_id, spawn=False)
233
  rec = rr
234
 
235
+ # Log images to Rerun
236
+ # rec.log handles logging for both RecordingStream objects and the global rr module
237
  rec.log("images/original", rr.Image(np.array(original_image)))
238
  rec.log("images/edited", rr.Image(np.array(result_image)))
239
 
 
241
  rrd_path = os.path.join(TMP_DIR, f"{run_id}.rrd")
242
  rec.save(rrd_path)
243
 
244
+ return rrd_path, seed
 
 
 
 
245
 
246
  except Exception as e:
247
  raise e
 
252
  @spaces.GPU
253
  def infer_example(input_image, prompt, lora_adapter):
254
  if input_image is None:
255
+ return None, 0
256
 
257
  input_pil = input_image.convert("RGB")
258
  guidance_scale = 1.0
259
  steps = 4
260
+ # Call main infer but ignore progress for examples if needed
261
+ result_rrd, seed = infer(input_pil, prompt, lora_adapter, 0, True, guidance_scale, steps)
262
+ return result_rrd, seed
263
 
264
  css="""
265
  #col-container {
 
274
  gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
275
  gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511) adapters for the [Qwen-Image-Edit](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) model.")
276
 
277
+ with gr.Row():
278
  with gr.Column():
279
+ input_image = gr.Image(label="Upload Image", type="pil", height=290)
280
 
281
  prompt = gr.Text(
282
  label="Edit Prompt",
 
292
  label="Rerun Visualization",
293
  height=353
294
  )
 
 
295
 
296
  with gr.Row():
297
  lora_adapter = gr.Dropdown(
 
299
  choices=list(ADAPTER_SPECS.keys()),
300
  value="Photo-to-Anime"
301
  )
 
302
  with gr.Accordion("Advanced Settings", open=False, visible=False):
303
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
304
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
 
311
  ["examples/A.jpeg", "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
312
  ],
313
  inputs=[input_image, prompt, lora_adapter],
314
+ outputs=[rerun_output, seed],
315
  fn=infer_example,
316
  cache_examples=False,
317
  label="Examples"
 
322
  run_button.click(
323
  fn=infer,
324
  inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
325
+ outputs=[rerun_output, seed]
326
  )
327
 
328
  if __name__ == "__main__":