Spaces:
Running
Running
| # app.py | |
| # Prompt Image Editor — Hugging Face Space | |
| # Minimal branding in source so the repo can be published under a subsidiary page | |
| import os | |
| import gradio as gr | |
| from PIL import Image | |
| import torch | |
| from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline | |
| from transformers import logging | |
| logging.set_verbosity_error() | |
| # Environment settings (Spaces: Variables & Secrets) | |
| MODEL_ID = os.getenv("MODEL_ID", "runwayml/stable-diffusion-v1-5") | |
| HF_TOKEN = os.getenv("HF_API_TOKEN") # set as a Secret in your Space if required | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| def load_pipelines(): | |
| print(f"Loading model: {MODEL_ID} on {DEVICE}") | |
| if "inpaint" in MODEL_ID or "img2img" in MODEL_ID: | |
| pipe = StableDiffusionInpaintPipeline.from_pretrained( | |
| MODEL_ID, | |
| revision="fp16", | |
| torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32, | |
| use_auth_token=HF_TOKEN if HF_TOKEN else None, | |
| ) | |
| else: | |
| pipe = StableDiffusionPipeline.from_pretrained( | |
| MODEL_ID, | |
| revision="fp16", | |
| torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32, | |
| use_auth_token=HF_TOKEN if HF_TOKEN else None, | |
| ) | |
| if DEVICE == "cuda": | |
| pipe = pipe.to("cuda") | |
| return pipe | |
| pipe = load_pipelines() | |
| def generate_image(prompt: str, negative_prompt: str, steps: int, guidance: float): | |
| if not prompt: | |
| return None | |
| with torch.autocast("cuda") if DEVICE == "cuda" else torch.no_grad(): | |
| out = pipe(prompt=prompt, guidance_scale=guidance, num_inference_steps=steps) | |
| return out.images[0] | |
| def edit_image(init_image, mask, prompt: str, negative_prompt: str, steps: int, guidance: float): | |
| if init_image is None: | |
| return None | |
| if mask is None: | |
| return None | |
| init_img = init_image.convert("RGB") | |
| mask_img = mask.convert("L") | |
| with torch.autocast("cuda") if DEVICE == "cuda" else torch.no_grad(): | |
| out = pipe(prompt=prompt, image=init_img, mask_image=mask_img, guidance_scale=guidance, num_inference_steps=steps) | |
| return out.images[0] | |
| with gr.Blocks(title="Prompt Image Editor") as demo: | |
| gr.Markdown("# Prompt Image Editor") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| mode = gr.Radio(["Generate", "Edit / Inpaint"], value="Generate", label="Mode") | |
| prompt = gr.Textbox(lines=3, label="Prompt") | |
| negative_prompt = gr.Textbox(lines=2, label="Negative prompt (optional)") | |
| steps = gr.Slider(minimum=10, maximum=60, step=5, value=28, label="Steps") | |
| guidance = gr.Slider(minimum=1.0, maximum=20.0, step=0.5, value=7.5, label="Guidance Scale") | |
| run = gr.Button("Run") | |
| with gr.Column(scale=3): | |
| input_image = gr.Image(type="pil", label="Initial image (for editing)") | |
| mask_image = gr.Image(type="pil", label="Mask (white = edit)") | |
| output = gr.Image(label="Output") | |
| def _run(mode, prompt, negative_prompt, steps, guidance, input_image, mask_image): | |
| try: | |
| if mode == "Generate": | |
| return generate_image(prompt, negative_prompt, steps, guidance) | |
| else: | |
| return edit_image(input_image, mask_image, prompt, negative_prompt, steps, guidance) | |
| except Exception as e: | |
| return Image.new('RGB', (512,512), color=(255,0,0)) | |
| demo.launch() |