File size: 1,718 Bytes
3712100 c744ef6 3712100 c744ef6 3712100 c744ef6 3712100 c744ef6 3712100 c744ef6 3712100 c744ef6 3712100 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | import torch
from diffusers import StableDiffusionPipeline
import gradio as gr
# Load the Stable Diffusion model
model_id = "runwayml/stable-diffusion-v1-5" # Replace with your model if different
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
pipe = pipe.to("cpu")
pipe.enable_attention_slicing() # Reduce memory usage on CPU
# Define the generation function
def generate_image(prompt, seed=None):
# Handle the seed input
if seed is None or seed == "":
# Generate a random seed if none provided
seed = torch.randint(0, 1000000, (1,)).item()
else:
# Convert the seed from string to integer
try:
seed = int(seed)
except ValueError:
# If conversion fails (e.g., user enters "abc"), use a random seed
seed = torch.randint(0, 1000000, (1,)).item()
# Set up the generator with the seed for CPU
generator = torch.Generator(device="cpu").manual_seed(seed)
# Generate the image
image = pipe(prompt, generator=generator, num_inference_steps=20).images[0]
return image, str(seed) # Return seed as string for display
# Create Gradio interface
interface = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(label="Prompt", placeholder="Enter your prompt here"),
gr.Textbox(label="Seed (optional)", placeholder="Leave blank for random")
],
outputs=[
gr.Image(label="Generated Image"),
gr.Textbox(label="Seed Used")
],
title="Stable Diffusion on CPU with Random Seed",
description="Generate images with Stable Diffusion on CPU. Leave seed blank for random output."
)
# Launch the interface
interface.launch() |