| import torch |
| import spaces |
| from diffusers import StableDiffusion3Pipeline |
| from huggingface_hub import login |
| import os |
| import gradio as gr |
|
|
| |
| token = os.getenv("HF_TOKEN") |
| if token: |
| login(token=token) |
| else: |
| raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.") |
|
|
| |
| model_id = "stabilityai/stable-diffusion-3.5-large" |
| pipe = StableDiffusion3Pipeline.from_pretrained(model_id) |
|
|
| |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' |
| pipe.to(device) |
|
|
| |
| lora_model_path = "./lora_model.pth" |
|
|
| |
| def load_lora_model(pipe, lora_model_path): |
| |
| lora_weights = torch.load(lora_model_path, map_location=device, weights_only=True) |
| |
| |
| print(dir(pipe.transformer)) |
|
|
| |
| try: |
| for name, param in pipe.transformer.named_parameters(): |
| if name in lora_weights: |
| param.data += lora_weights[name] |
| except AttributeError: |
| print("The model doesn't have 'transformer' attributes. Please check the model structure.") |
| |
|
|
| return pipe |
|
|
| |
| pipe = load_lora_model(pipe, lora_model_path) |
|
|
| |
| @spaces.gpu |
| def generate(prompt, seed=None): |
| generator = torch.manual_seed(seed) if seed is not None else None |
| |
| image = pipe(prompt, height=512, width=512, generator=generator).images[0] |
| return image |
|
|
| |
| iface = gr.Interface( |
| fn=generate, |
| inputs=[ |
| gr.Textbox(label="Enter your prompt"), |
| gr.Number(label="Enter a seed (optional)", value=None), |
| ], |
| outputs="image" |
| ) |
| iface.launch() |
|
|