Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- app.py +33 -0
- model_utils.py +28 -0
- requirements.txt +6 -0
app.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from model_utils import QwenImageEdit
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
# load model
|
| 6 |
+
model = QwenImageEdit()
|
| 7 |
+
|
| 8 |
+
def infer(prompt, upload_image=None, steps=30, scale=7.5):
|
| 9 |
+
if upload_image is not None:
|
| 10 |
+
img = Image.open(upload_image.name).convert("RGB")
|
| 11 |
+
else:
|
| 12 |
+
img = None
|
| 13 |
+
result = model.generate(prompt=prompt, image=img, num_inference_steps=steps, guidance_scale=scale)
|
| 14 |
+
return result
|
| 15 |
+
|
| 16 |
+
title = "Qwen Image Edit — Text to Image / Image to Image"
|
| 17 |
+
description = "Enter a prompt; optionally upload an image to edit."
|
| 18 |
+
|
| 19 |
+
iface = gr.Interface(
|
| 20 |
+
fn=infer,
|
| 21 |
+
inputs=[
|
| 22 |
+
gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"),
|
| 23 |
+
gr.File(label="Upload an image (optional)"),
|
| 24 |
+
gr.Slider(minimum=10, maximum=100, step=1, value=30, label="Inference steps"),
|
| 25 |
+
gr.Slider(minimum=1.0, maximum=20.0, step=0.5, value=7.5, label="Guidance scale"),
|
| 26 |
+
],
|
| 27 |
+
outputs=gr.Image(type="pil", label="Generated Image"),
|
| 28 |
+
title=title,
|
| 29 |
+
description=description,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
if __name__ == "__main__":
|
| 33 |
+
iface.launch(server_name="0.0.0.0", server_port=7860)
|
model_utils.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from diffusers import StableDiffusionPipeline
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
class QwenImageEdit:
|
| 5 |
+
def __init__(self, model_name="Phr00t/Qwen-Image-Edit-Rapid-AIO"):
|
| 6 |
+
self.pipe = StableDiffusionPipeline.from_pretrained(
|
| 7 |
+
model_name,
|
| 8 |
+
torch_dtype=torch.float16,
|
| 9 |
+
revision="fp16", # if applicable
|
| 10 |
+
).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 11 |
+
|
| 12 |
+
def generate(self, prompt: str, image=None, num_inference_steps=30, guidance_scale=7.5):
|
| 13 |
+
# If image is provided → image-to-image editing, otherwise vanilla text→image
|
| 14 |
+
if image is not None:
|
| 15 |
+
out = self.pipe(
|
| 16 |
+
prompt=prompt,
|
| 17 |
+
init_image=image,
|
| 18 |
+
strength=0.7,
|
| 19 |
+
num_inference_steps=num_inference_steps,
|
| 20 |
+
guidance_scale=guidance_scale
|
| 21 |
+
)
|
| 22 |
+
else:
|
| 23 |
+
out = self.pipe(
|
| 24 |
+
prompt=prompt,
|
| 25 |
+
num_inference_steps=num_inference_steps,
|
| 26 |
+
guidance_scale=guidance_scale
|
| 27 |
+
)
|
| 28 |
+
return out.images[0]
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
diffusers
|
| 3 |
+
transformers
|
| 4 |
+
torch
|
| 5 |
+
accelerate
|
| 6 |
+
safetensors
|