import torch from diffusers import StableDiffusionPipeline import gradio as gr # Load the Stable Diffusion model model_id = "runwayml/stable-diffusion-v1-5" # Replace with your model if different pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) pipe = pipe.to("cpu") pipe.enable_attention_slicing() # Reduce memory usage on CPU # Define the generation function def generate_image(prompt, seed=None): # Handle the seed input if seed is None or seed == "": # Generate a random seed if none provided seed = torch.randint(0, 1000000, (1,)).item() else: # Convert the seed from string to integer try: seed = int(seed) except ValueError: # If conversion fails (e.g., user enters "abc"), use a random seed seed = torch.randint(0, 1000000, (1,)).item() # Set up the generator with the seed for CPU generator = torch.Generator(device="cpu").manual_seed(seed) # Generate the image image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] return image, str(seed) # Return seed as string for display # Create Gradio interface interface = gr.Interface( fn=generate_image, inputs=[ gr.Textbox(label="Prompt", placeholder="Enter your prompt here"), gr.Textbox(label="Seed (optional)", placeholder="Leave blank for random") ], outputs=[ gr.Image(label="Generated Image"), gr.Textbox(label="Seed Used") ], title="Stable Diffusion on CPU with Random Seed", description="Generate images with Stable Diffusion on CPU. Leave seed blank for random output." ) # Launch the interface interface.launch()