Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -165,7 +165,7 @@ def update_dimensions_on_upload(image):
|
|
| 165 |
|
| 166 |
return new_width, new_height
|
| 167 |
|
| 168 |
-
@spaces.GPU
|
| 169 |
def infer(
|
| 170 |
images,
|
| 171 |
prompt,
|
|
@@ -254,7 +254,7 @@ def infer(
|
|
| 254 |
gc.collect()
|
| 255 |
torch.cuda.empty_cache()
|
| 256 |
|
| 257 |
-
@spaces.GPU
|
| 258 |
def infer_example(images, prompt, lora_adapter):
|
| 259 |
if not images:
|
| 260 |
return None, 0
|
|
@@ -285,9 +285,7 @@ css="""
|
|
| 285 |
|
| 286 |
with gr.Blocks() as demo:
|
| 287 |
with gr.Column(elem_id="col-container"):
|
| 288 |
-
gr.Markdown("
|
| 289 |
-
gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511) adapters. Upload one or more images.")
|
| 290 |
-
|
| 291 |
with gr.Row(equal_height=True):
|
| 292 |
with gr.Column():
|
| 293 |
images = gr.Gallery(
|
|
@@ -323,24 +321,7 @@ with gr.Blocks() as demo:
|
|
| 323 |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
|
| 324 |
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
|
| 325 |
|
| 326 |
-
|
| 327 |
-
examples=[
|
| 328 |
-
[["examples/B.jpg"], "Transform into anime.", "Photo-to-Anime"],
|
| 329 |
-
[["examples/A.jpeg"], "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
|
| 330 |
-
[["examples/U.jpg"], "Upscale this picture to 4K resolution.", "Upscaler"],
|
| 331 |
-
[["examples/ST1.jpg", "examples/ST2.jpg"], "Convert Image 1 to the style of Image 2.", "Style-Transfer"],
|
| 332 |
-
[["examples/L1.jpg", "examples/L2.jpg"], "Refer to the color tone, remove the original lighting from Image 1, and relight Image 1 based on the lighting and color tone of Image 2.", "Light-Migration"],
|
| 333 |
-
[["examples/P1.jpg", "examples/P2.jpg"], "Make the person in image 1 do the exact same pose of the person in image 2. Changing the style and background of the image of the person in image 1 is undesirable, so don't do it.", "Any-Pose"],
|
| 334 |
-
],
|
| 335 |
-
inputs=[images, prompt, lora_adapter],
|
| 336 |
-
outputs=[output_image, seed],
|
| 337 |
-
fn=infer_example,
|
| 338 |
-
cache_examples=False,
|
| 339 |
-
label="Examples"
|
| 340 |
-
)
|
| 341 |
-
|
| 342 |
-
gr.Markdown("[*](https://huggingface.co/spaces/prithivMLmods/Qwen-Image-Edit-2511-LoRAs-Fast)This is still an experimental Space for Qwen-Image-Edit-2511.")
|
| 343 |
-
|
| 344 |
run_button.click(
|
| 345 |
fn=infer,
|
| 346 |
inputs=[images, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
|
|
|
|
| 165 |
|
| 166 |
return new_width, new_height
|
| 167 |
|
| 168 |
+
@spaces.GPU(duration=15)
|
| 169 |
def infer(
|
| 170 |
images,
|
| 171 |
prompt,
|
|
|
|
| 254 |
gc.collect()
|
| 255 |
torch.cuda.empty_cache()
|
| 256 |
|
| 257 |
+
@spaces.GPU(duration=15)
|
| 258 |
def infer_example(images, prompt, lora_adapter):
|
| 259 |
if not images:
|
| 260 |
return None, 0
|
|
|
|
| 285 |
|
| 286 |
with gr.Blocks() as demo:
|
| 287 |
with gr.Column(elem_id="col-container"):
|
| 288 |
+
gr.Markdown("Qedit")
|
|
|
|
|
|
|
| 289 |
with gr.Row(equal_height=True):
|
| 290 |
with gr.Column():
|
| 291 |
images = gr.Gallery(
|
|
|
|
| 321 |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
|
| 322 |
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
|
| 323 |
|
| 324 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
run_button.click(
|
| 326 |
fn=infer,
|
| 327 |
inputs=[images, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
|