Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,93 +1,53 @@
|
|
| 1 |
-
# app.py
|
| 2 |
-
# Prompt Image Editor — Hugging Face Space
|
| 3 |
-
# Minimal branding in source so the repo can be published under a subsidiary page
|
| 4 |
-
|
| 5 |
-
|
| 6 |
import os
|
| 7 |
-
import gradio as gr
|
| 8 |
-
from PIL import Image
|
| 9 |
import torch
|
| 10 |
-
|
| 11 |
-
from
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
logging.set_verbosity_error()
|
| 15 |
-
|
| 16 |
|
| 17 |
-
|
| 18 |
-
MODEL_ID = os.getenv("MODEL_ID", "runwayml/stable-diffusion-v1-5")
|
| 19 |
-
HF_TOKEN = os.getenv("HF_API_TOKEN") # set as a Secret in your Space if required
|
| 20 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
)
|
| 32 |
-
else:
|
| 33 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
| 34 |
-
MODEL_ID,
|
| 35 |
-
revision="fp16",
|
| 36 |
-
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
|
| 37 |
-
use_auth_token=HF_TOKEN if HF_TOKEN else None,
|
| 38 |
-
)
|
| 39 |
-
if DEVICE == "cuda":
|
| 40 |
-
pipe = pipe.to("cuda")
|
| 41 |
-
return pipe
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
pipe = load_pipelines()
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
def generate_image(prompt: str, negative_prompt: str, steps: int, guidance: float):
|
| 48 |
-
if not prompt:
|
| 49 |
-
return None
|
| 50 |
-
with torch.autocast("cuda") if DEVICE == "cuda" else torch.no_grad():
|
| 51 |
-
out = pipe(prompt=prompt, guidance_scale=guidance, num_inference_steps=steps)
|
| 52 |
-
return out.images[0]
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
def edit_image(init_image, mask, prompt: str, negative_prompt: str, steps: int, guidance: float):
|
| 58 |
-
if init_image is None:
|
| 59 |
-
return None
|
| 60 |
-
if mask is None:
|
| 61 |
-
return None
|
| 62 |
-
init_img = init_image.convert("RGB")
|
| 63 |
-
mask_img = mask.convert("L")
|
| 64 |
-
with torch.autocast("cuda") if DEVICE == "cuda" else torch.no_grad():
|
| 65 |
-
out = pipe(prompt=prompt, image=init_img, mask_image=mask_img, guidance_scale=guidance, num_inference_steps=steps)
|
| 66 |
-
return out.images[0]
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
with gr.Blocks(title="Prompt Image Editor") as demo:
|
| 70 |
-
gr.Markdown("# Prompt Image Editor")
|
| 71 |
-
with gr.Row():
|
| 72 |
-
with gr.Column(scale=2):
|
| 73 |
-
mode = gr.Radio(["Generate", "Edit / Inpaint"], value="Generate", label="Mode")
|
| 74 |
-
prompt = gr.Textbox(lines=3, label="Prompt")
|
| 75 |
-
negative_prompt = gr.Textbox(lines=2, label="Negative prompt (optional)")
|
| 76 |
-
steps = gr.Slider(minimum=10, maximum=60, step=5, value=28, label="Steps")
|
| 77 |
-
guidance = gr.Slider(minimum=1.0, maximum=20.0, step=0.5, value=7.5, label="Guidance Scale")
|
| 78 |
-
run = gr.Button("Run")
|
| 79 |
-
with gr.Column(scale=3):
|
| 80 |
-
input_image = gr.Image(type="pil", label="Initial image (for editing)")
|
| 81 |
-
mask_image = gr.Image(type="pil", label="Mask (white = edit)")
|
| 82 |
-
output = gr.Image(label="Output")
|
| 83 |
-
|
| 84 |
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
if mode == "Generate":
|
| 88 |
-
return generate_image(prompt, negative_prompt, steps, guidance)
|
| 89 |
-
else:
|
| 90 |
-
return edit_image(input_image, mask_image, prompt, negative_prompt, steps, guidance)
|
| 91 |
-
except Exception as e:
|
| 92 |
-
return Image.new('RGB', (512,512), color=(255,0,0))
|
| 93 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
import torch
|
| 3 |
+
import gradio as gr
|
| 4 |
+
from diffusers import StableDiffusionPipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
MODEL_ID = os.getenv("MODEL_ID", "stabilityai/stable-diffusion-2-1")
|
|
|
|
|
|
|
| 7 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 8 |
|
| 9 |
+
# -------------------------
|
| 10 |
+
# Load Model
|
| 11 |
+
# -------------------------
|
| 12 |
+
def load_pipeline():
|
| 13 |
+
print(f"Loading model: {MODEL_ID} on {DEVICE}")
|
| 14 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 15 |
+
MODEL_ID,
|
| 16 |
+
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32
|
| 17 |
+
)
|
| 18 |
+
pipe = pipe.to(DEVICE)
|
| 19 |
+
return pipe
|
| 20 |
+
|
| 21 |
+
pipe = load_pipeline()
|
| 22 |
+
|
| 23 |
+
# -------------------------
|
| 24 |
+
# Inference Function
|
| 25 |
+
# -------------------------
|
| 26 |
+
def generate(prompt):
|
| 27 |
+
if not prompt or prompt.strip() == "":
|
| 28 |
+
return "Please enter a valid prompt.", None
|
| 29 |
+
|
| 30 |
+
print("Running inference...")
|
| 31 |
+
|
| 32 |
+
result = pipe(
|
| 33 |
+
prompt=prompt,
|
| 34 |
+
num_inference_steps=25,
|
| 35 |
+
guidance_scale=7.5
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
image = result.images[0]
|
| 39 |
+
return f"Generated image for: {prompt}", image
|
| 40 |
+
|
| 41 |
+
# -------------------------
|
| 42 |
+
# Gradio UI
|
| 43 |
+
# -------------------------
|
| 44 |
+
interface = gr.Interface(
|
| 45 |
+
fn=generate,
|
| 46 |
+
inputs=gr.Textbox(label="Prompt", placeholder="Enter your image prompt..."),
|
| 47 |
+
outputs=[gr.Textbox(label="Status"), gr.Image(label="Generated Image")],
|
| 48 |
+
title="Prompt Image Editor",
|
| 49 |
+
description="Generate AI images using text prompts.",
|
| 50 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
+
if __name__ == "__main__":
|
| 53 |
+
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|