| | from diffusers import StableDiffusionPipeline |
| | import torch |
| | import os |
| | from huggingface_hub import login |
| |
|
| | |
| | token = os.getenv("HF_TOKEN") |
| | if token: |
| | login(token=token) |
| | else: |
| | raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.") |
| |
|
| | |
| | model_id = "stabilityai/stable-diffusion-3.5-large" |
| | pipe = StableDiffusion3Pipeline.from_pretrained(model_id, torch_dtype=torch.float16) |
| | pipe.to("cuda") |
| |
|
| | |
| | lora_model_path = "lora_model.pth" |
| |
|
| | |
| | pipe.load_lora_model(lora_model_path) |
| |
|
| | |
| | def generate_image(prompt): |
| | image = pipe(prompt).images[0] |
| | return image |
| |
|
| | import gradio as gr |
| | iface = gr.Interface(fn=generate_image, inputs="text", outputs="image") |
| | iface.launch() |