import gradio as gr import torch import random import numpy as np from PIL import Image from diffusers import StableDiffusionInstructPix2PixPipeline import spaces # ============================== # Device (CPU ONLY) # ============================== device = "cpu" dtype = torch.float32 print("Loading InstructPix2Pix pipeline...") pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", torch_dtype=dtype, safety_checker=None ).to(device) # CPU optimizations pipe.enable_attention_slicing() print("Model loaded successfully.") MAX_SEED = np.iinfo(np.int32).max # ============================== # Inference Function # ============================== @spaces.GPU() # Safe even on CPU Basic def infer( image, prompt, seed=0, randomize_seed=True, guidance_scale=7.5, num_inference_steps=20, ): if image is None: return None, seed if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator(device=device).manual_seed(seed) image = image.convert("RGB").resize((512, 512)) result = pipe( prompt=prompt, image=image, guidance_scale=guidance_scale, num_inference_steps=min(num_inference_steps, 30), generator=generator, ).images[0] return result, seed # ============================== # UI # ============================== with gr.Blocks() as demo: gr.Markdown("# 🖼️ Image Edit (CPU Version)") gr.Markdown("Stable Diffusion InstructPix2Pix – works on 16GB CPU Basic") with gr.Row(): input_image = gr.Image(type="pil", label="Input Image") output_image = gr.Image(type="pil", label="Edited Image") prompt = gr.Textbox( label="Edit Instruction", placeholder="e.g. make the sky pink" ) with gr.Row(): seed = gr.Slider(0, MAX_SEED, value=0, step=1, label="Seed") randomize_seed = gr.Checkbox(value=True, label="Randomize Seed") with gr.Row(): guidance_scale = gr.Slider(1.0, 15.0, value=7.5, step=0.5, label="Guidance Scale") num_inference_steps = gr.Slider(1, 40, value=20, step=1, label="Steps") run_button = gr.Button("Edit Image") run_button.click( fn=infer, inputs=[ input_image, prompt, seed, randomize_seed, guidance_scale, num_inference_steps ], outputs=[output_image, seed], ) if __name__ == "__main__": demo.launch()