| | import torch |
| | from diffusers import StableDiffusionPipeline |
| | import gradio as gr |
| |
|
| | |
| | model_id = "runwayml/stable-diffusion-v1-5" |
| | pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) |
| | pipe = pipe.to("cpu") |
| | pipe.enable_attention_slicing() |
| |
|
| | |
| | def generate_image(prompt, seed=None): |
| | |
| | if seed is None or seed == "": |
| | |
| | seed = torch.randint(0, 1000000, (1,)).item() |
| | else: |
| | |
| | try: |
| | seed = int(seed) |
| | except ValueError: |
| | |
| | seed = torch.randint(0, 1000000, (1,)).item() |
| |
|
| | |
| | generator = torch.Generator(device="cpu").manual_seed(seed) |
| | |
| | |
| | image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] |
| | |
| | return image, str(seed) |
| |
|
| | |
| | interface = gr.Interface( |
| | fn=generate_image, |
| | inputs=[ |
| | gr.Textbox(label="Prompt", placeholder="Enter your prompt here"), |
| | gr.Textbox(label="Seed (optional)", placeholder="Leave blank for random") |
| | ], |
| | outputs=[ |
| | gr.Image(label="Generated Image"), |
| | gr.Textbox(label="Seed Used") |
| | ], |
| | title="Stable Diffusion on CPU with Random Seed", |
| | description="Generate images with Stable Diffusion on CPU. Leave seed blank for random output." |
| | ) |
| |
|
| | |
| | interface.launch() |