import gradio as gr import requests import io from PIL import Image import json import os import shutil import logging import math from tqdm import tqdm import time from diffusers import DiffusionPipeline def run_lora(base_model, lora, prompt, neg_prompt, progress=gr.Progress(track_tqdm=True)): print(f"Inside run_lora, base_model: {base_model}, lora: {lora.name}, prompt: {prompt}, neg_prompt: {neg_prompt}") base_repo = None if base_model == "v1-5": base_repo = "runwayml/stable-diffusion-v1-5" elif base_model == "v2-1": base_repo = "stabilityai/stable-diffusion-2-1" elif base_model == "v2": base_repo = "stabilityai/stable-diffusion-2" print(f"base_repo: {base_repo}") pipeline = DiffusionPipeline.from_pretrained(base_repo) pipeline.load_lora_weights(lora.name) print(pipeline) image = pipeline(prompt, negative_prompt = neg_prompt).images[0] return image app = gr.Interface( run_lora, [ gr.Dropdown( ["v1-5", "v2", "v2-1"], label="Base Model", info="Stable Diffusion Base Model." ), gr.File(file_count="single", file_types=[".safetensors"]), gr.Textbox(label="Prompt", show_label=False, placeholder="Type a prompt after selecting a LoRA"), gr.Textbox(label="Negative Prompt", show_label=False, placeholder="Type negative prompt here.") ], "image", ) app.launch()