File size: 1,428 Bytes
5db71ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b33acdf
5db71ea
 
 
 
 
 
b33acdf
 
5db71ea
 
 
 
b33acdf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
import requests
import io
from PIL import Image
import json
import os
import shutil
import logging
import math 
from tqdm import tqdm
import time
from diffusers import DiffusionPipeline

def run_lora(base_model, lora, prompt, neg_prompt, progress=gr.Progress(track_tqdm=True)):
    print(f"Inside run_lora, base_model: {base_model}, lora: {lora.name}, prompt: {prompt}, neg_prompt: {neg_prompt}")

    base_repo = None
    if  base_model == "v1-5":
        base_repo = "runwayml/stable-diffusion-v1-5"
    elif base_model == "v2-1":
        base_repo = "stabilityai/stable-diffusion-2-1"
    elif base_model == "v2":
        base_repo = "stabilityai/stable-diffusion-2"

    print(f"base_repo: {base_repo}")

    pipeline = DiffusionPipeline.from_pretrained(base_repo)
    pipeline.load_lora_weights(lora.name)
    print(pipeline) 

    image = pipeline(prompt, negative_prompt = neg_prompt).images[0]
    return image
            
app = gr.Interface(
    run_lora,
    [
        gr.Dropdown(
            ["v1-5", "v2", "v2-1"], label="Base Model", info="Stable Diffusion Base Model."
        ),
        gr.File(file_count="single", file_types=[".safetensors"]),
        gr.Textbox(label="Prompt", show_label=False, placeholder="Type a prompt after selecting a LoRA"),
        gr.Textbox(label="Negative Prompt", show_label=False, placeholder="Type negative prompt here.")
    ],
    "image",
)

app.launch()