Spaces:
Sleeping
Sleeping
| import os | |
| import torch | |
| import gradio as gr | |
| from diffusers import StableDiffusionPipeline | |
| MODEL_ID = os.getenv("MODEL_ID", "stabilityai/stable-diffusion-2-1") | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| # ------------------------- | |
| # Load Model | |
| # ------------------------- | |
| def load_pipeline(): | |
| print(f"Loading model: {MODEL_ID} on {DEVICE}") | |
| pipe = StableDiffusionPipeline.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32 | |
| ) | |
| pipe = pipe.to(DEVICE) | |
| return pipe | |
| pipe = load_pipeline() | |
| # ------------------------- | |
| # Inference Function | |
| # ------------------------- | |
| def generate(prompt): | |
| if not prompt or prompt.strip() == "": | |
| return "Please enter a valid prompt.", None | |
| print("Running inference...") | |
| result = pipe( | |
| prompt=prompt, | |
| num_inference_steps=25, | |
| guidance_scale=7.5 | |
| ) | |
| image = result.images[0] | |
| return f"Generated image for: {prompt}", image | |
| # ------------------------- | |
| # Gradio UI | |
| # ------------------------- | |
| interface = gr.Interface( | |
| fn=generate, | |
| inputs=gr.Textbox(label="Prompt", placeholder="Enter your image prompt..."), | |
| outputs=[gr.Textbox(label="Status"), gr.Image(label="Generated Image")], | |
| title="Prompt Image Editor", | |
| description="Generate AI images using text prompts.", | |
| ) | |
| if __name__ == "__main__": | |
| interface.launch() | |