| import torch |
| from diffusers import StableDiffusion3Pipeline |
| from huggingface_hub import login |
| import os |
| import gradio as gr |
|
|
| |
| if torch.cuda.is_available(): |
| device = "cuda" |
| print("GPU is available") |
| else: |
| device = "cpu" |
| print("GPU is not available, using CPU") |
|
|
| |
| token = os.getenv("HF_TOKEN") |
| if token: |
| login(token=token) |
| else: |
| raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.") |
|
|
| |
| model_id = "stabilityai/stable-diffusion-3.5-large" |
| if device == "cuda": |
| pipe = StableDiffusion3Pipeline.from_pretrained(model_id, torch_dtype=torch.float16) |
| else: |
| pipe = StableDiffusion3Pipeline.from_pretrained(model_id) |
|
|
| pipe.to(device) |
|
|
| |
| lora_model_path = "./lora_model.pth" |
|
|
| |
| def load_lora_model(pipe, lora_model_path): |
| |
| lora_weights = torch.load(lora_model_path, map_location=device) |
| |
| |
| print(dir(pipe)) |
|
|
| |
| try: |
| for name, param in pipe.unet.named_parameters(): |
| if name in lora_weights: |
| param.data += lora_weights[name] |
| except AttributeError: |
| print("The model doesn't have 'unet' attributes. Please check the model structure.") |
| |
|
|
| return pipe |
|
|
| |
| pipe = load_lora_model(pipe, lora_model_path) |
|
|
| |
| def generate_image(prompt, seed=None): |
| generator = torch.manual_seed(seed) if seed is not None else None |
| |
| image = pipe(prompt, height=512, width=512, generator=generator).images[0] |
| return image |
|
|
| |
| iface = gr.Interface( |
| fn=generate_image, |
| inputs=[ |
| gr.Textbox(label="Enter your prompt"), |
| gr.Number(label="Enter a seed (optional)", value=None), |
| ], |
| outputs="image" |
| ) |
| iface.launch() |
|
|