| import gradio as gr |
| import torch |
| from diffusers import StableDiffusionPipeline |
| from huggingface_hub import login |
|
|
| pipe = None |
|
|
| def setup_model(hf_token, prompt): |
| global pipe |
|
|
| if pipe is None: |
| try: |
| login(hf_token) |
| pipe = StableDiffusionPipeline.from_pretrained( |
| "stable-diffusion-v1-5/stable-diffusion-v1-5", |
| use_auth_token=hf_token, |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 |
| ) |
| pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu") |
| except Exception as e: |
| return f"β Error loading model: {e}", None |
|
|
| image = pipe(prompt).images[0] |
| return "β
Image generated!", image |
|
|
| |
| gr.Interface( |
| fn=setup_model, |
| inputs=[ |
| gr.Textbox(label="π Enter Hugging Face Token", type="password"), |
| gr.Textbox(label="π¨ Prompt (What do you want to see?)") |
| ], |
| outputs=[ |
| gr.Textbox(label="Status"), |
| gr.Image(label="Generated Image") |
| ], |
| title="π§ Stable Diffusion via Gradio", |
| description="Enter your Hugging Face token once, then type a prompt to generate images." |
| ).launch() |
|
|