Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from diffusers import StableDiffusionPipeline | |
| import torch | |
| from transformers import logging | |
| logging.set_verbosity_error() # This suppresses warnings, including cache migration | |
| #GPU error | |
| from diffusers import StableDiffusionPipeline | |
| # Load the model | |
| model_id = "MostafaAly/stable-diffusion-finetuned" | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) | |
| # Check if CUDA is available and move the model to GPU if it is, otherwise use CPU | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| pipe.to(device) | |
| print(f"Using device: {device}") | |
| # Define the function for text-to-image generation | |
| def generate_image(prompt): | |
| image = pipe(prompt).images[0] | |
| return image | |
| # Create a Gradio interface | |
| interface = gr.Interface( | |
| fn=generate_image, | |
| inputs=gr.Textbox(label="Enter your prompt"), | |
| outputs=gr.Image(label="Generated Image"), | |
| ) | |
| # Launch the interface | |
| interface.launch() | |