| import torch |
| from diffusers import StableDiffusion3Pipeline |
| from huggingface_hub import login |
| import os |
| import gradio as gr |
|
|
| |
| token = os.getenv("HF_TOKEN") |
| if token: |
| login(token=token) |
| else: |
| raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.") |
|
|
| |
| model_id = "stabilityai/stable-diffusion-3.5-large" |
| pipe = StableDiffusion3Pipeline.from_pretrained(model_id, torch_dtype=torch.float16) |
| pipe.to("cpu") |
|
|
| |
| lora_model_path = "./lora_model.pth" |
|
|
| |
| def load_lora_model(pipe, lora_model_path): |
| |
| lora_weights = torch.load(lora_model_path, map_location="cpu") |
| |
| |
| for name, param in pipe.unet.named_parameters(): |
| if name in lora_weights: |
| param.data += lora_weights[name] |
|
|
| return pipe |
|
|
| |
| pipe = load_lora_model(pipe, lora_model_path) |
|
|
| |
| def generate_image(prompt, seed=None): |
| generator = torch.manual_seed(seed) if seed is not None else None |
| |
| image = pipe(prompt, height=512, width=512, generator=generator).images[0] |
| return image |
|
|
| |
| iface = gr.Interface( |
| fn=generate_image, |
| inputs=[ |
| gr.Textbox(label="Enter your prompt"), |
| gr.Number(label="Enter a seed (optional)", value=None), |
| ], |
| outputs="image" |
| ) |
| iface.launch() |