MySpace4 / app.py
angolinn's picture
Create app.py
dfb94f7 verified
from diffusers import DiffusionPipeline
import torch
import gradio as gr
# Load the model
# Use torch_dtype=torch.float16 for GPU inference if your hardware supports it
# and if the model provides a float16 variant for memory efficiency.
# For CPU, you might omit torch_dtype or use torch.float32.
try:
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
use_safetensors=True
)
pipeline.to("cuda") # Move model to GPU if available
except Exception as e:
print(f"Could not load model with float16 or move to CUDA, trying CPU: {e}")
pipeline = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
use_safetensors=True
)
# No .to("cuda") for CPU
def generate_image(prompt):
# Generate image
image = pipeline(prompt).images[0]
return image
# Create Gradio interface
iface = gr.Interface(
fn=generate_image,
inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="image",
title="My Hugging Face Image Generator",
description="Generate images from text prompts using a pre-trained Stable Diffusion model on Hugging Face."
)
# Launch the Gradio app
if __name__ == "__main__":
iface.launch()