Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| import torch | |
| from diffusers import StableDiffusionPipeline | |
| from PIL import Image | |
| def load_model(): | |
| # Check if CUDA (GPU) is available, otherwise use CPU | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| st.write(f"Using device: {device}") | |
| try: | |
| # Load the Stable Diffusion pipeline | |
| pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") | |
| pipe.to(device) # Move the model to the chosen device (GPU if available, otherwise CPU) | |
| return pipe | |
| except Exception as e: | |
| st.write(f"Error loading model: {e}") | |
| return None | |
| def generate_image(pipe, prompt): | |
| try: | |
| # Generate an image based on the prompt | |
| image = pipe(prompt).images[0] | |
| return image | |
| except Exception as e: | |
| st.write(f"Error generating image: {e}") | |
| return None | |
| # Streamlit UI | |
| st.title("Stable Diffusion Image Generation") | |
| st.write("Enter a prompt and generate an image based on it.") | |
| # User input for the prompt | |
| prompt = st.text_input("Enter your image prompt:") | |
| if st.button("Generate Image"): | |
| if prompt: | |
| st.write("Generating image... Please wait.") | |
| # Load the model (optional: you can load the model at startup, but this is just for demonstration) | |
| pipe = load_model() | |
| if pipe: | |
| image = generate_image(pipe, prompt) | |
| if image: | |
| st.image(image, caption="Generated Image", use_column_width=True) | |
| st.write("Image generated successfully.") | |
| else: | |
| st.write("Image generation failed.") | |
| else: | |
| st.write("Failed to load model.") | |
| else: | |
| st.write("Please enter a valid prompt.") | |