from diffusers import StableDiffusionPipeline from PIL import Image import streamlit as st import torch from transformers import CLIPTextModel, CLIPFeatureExtractor import numpy as np import matplotlib.pyplot as plt # Load the model and tokenizer @st.cache(allow_output_mutation=True) def load_model(): model_id = "CompVis/stable-diffusion-2-1" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe.to("cuda" if torch.cuda.is_available() else "cpu") return pipe # Main function def main(): st.title("Stable Diffusion 2.1 Image Generator") st.header("Generate images from your prompts!") prompt = st.text_input("Enter your prompt:", "") steps = st.slider("Number of steps:", min_value=20, max_value=200, value=50) guidance = st.slider("Guidance scale:", min_value=1.0, max_value=20.0, value=7.5) if st.button("Generate"): if not prompt: st.error("Please enter a prompt") return with st.spinner("Generating image..."): pipe = load_model() image = pipe(prompt, guidance_scale=guidance, num_inference_steps=steps).images[0] st.image(image, caption="Generated Image") if __name__ == "__main__": main()