Spaces:
Sleeping
Sleeping
File size: 2,534 Bytes
29014f3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | import gradio as gr
import torch
import random
import numpy as np
from PIL import Image
from diffusers import StableDiffusionInstructPix2PixPipeline
import spaces
# ==============================
# Device (CPU ONLY)
# ==============================
device = "cpu"
dtype = torch.float32
print("Loading InstructPix2Pix pipeline...")
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",
torch_dtype=dtype,
safety_checker=None
).to(device)
# CPU optimizations
pipe.enable_attention_slicing()
print("Model loaded successfully.")
MAX_SEED = np.iinfo(np.int32).max
# ==============================
# Inference Function
# ==============================
@spaces.GPU() # Safe even on CPU Basic
def infer(
image,
prompt,
seed=0,
randomize_seed=True,
guidance_scale=7.5,
num_inference_steps=20,
):
if image is None:
return None, seed
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
image = image.convert("RGB").resize((512, 512))
result = pipe(
prompt=prompt,
image=image,
guidance_scale=guidance_scale,
num_inference_steps=min(num_inference_steps, 30),
generator=generator,
).images[0]
return result, seed
# ==============================
# UI
# ==============================
with gr.Blocks() as demo:
gr.Markdown("# 🖼️ Image Edit (CPU Version)")
gr.Markdown("Stable Diffusion InstructPix2Pix – works on 16GB CPU Basic")
with gr.Row():
input_image = gr.Image(type="pil", label="Input Image")
output_image = gr.Image(type="pil", label="Edited Image")
prompt = gr.Textbox(
label="Edit Instruction",
placeholder="e.g. make the sky pink"
)
with gr.Row():
seed = gr.Slider(0, MAX_SEED, value=0, step=1, label="Seed")
randomize_seed = gr.Checkbox(value=True, label="Randomize Seed")
with gr.Row():
guidance_scale = gr.Slider(1.0, 15.0, value=7.5, step=0.5, label="Guidance Scale")
num_inference_steps = gr.Slider(1, 40, value=20, step=1, label="Steps")
run_button = gr.Button("Edit Image")
run_button.click(
fn=infer,
inputs=[
input_image,
prompt,
seed,
randomize_seed,
guidance_scale,
num_inference_steps
],
outputs=[output_image, seed],
)
if __name__ == "__main__":
demo.launch() |