| | from diffusers import DiffusionPipeline |
| | import torch |
| | import gradio as gr |
| |
|
| | |
| | |
| | |
| | |
| | try: |
| | pipeline = DiffusionPipeline.from_pretrained( |
| | "stabilityai/stable-diffusion-xl-base-1.0", |
| | torch_dtype=torch.float16, |
| | use_safetensors=True |
| | ) |
| | pipeline.to("cuda") |
| | except Exception as e: |
| | print(f"Could not load model with float16 or move to CUDA, trying CPU: {e}") |
| | pipeline = DiffusionPipeline.from_pretrained( |
| | "stabilityai/stable-diffusion-xl-base-1.0", |
| | use_safetensors=True |
| | ) |
| | |
| |
|
| | def generate_image(prompt): |
| | |
| | image = pipeline(prompt).images[0] |
| | return image |
| |
|
| | |
| | iface = gr.Interface( |
| | fn=generate_image, |
| | inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."), |
| | outputs="image", |
| | title="My Hugging Face Image Generator", |
| | description="Generate images from text prompts using a pre-trained Stable Diffusion model on Hugging Face." |
| | ) |
| |
|
| | |
| | if __name__ == "__main__": |
| | iface.launch() |