add scripts
Browse files- download_from_hf.py +20 -0
- print_model.py +21 -0
- run_sd_with_lora.py +98 -0
- test_configs.py +189 -0
- test_diffusers.py +121 -0
download_from_hf.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from huggingface_hub import snapshot_download
|
| 3 |
+
|
| 4 |
+
# HF_ENDPOINT=https://hf-mirror.com python download_from_hf.py
|
| 5 |
+
|
| 6 |
+
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"
|
| 7 |
+
# repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 8 |
+
|
| 9 |
+
local_path = str(Path("/Users/jeqin/work/code/sd/models") / repo_id)
|
| 10 |
+
# snapshot_download(repo_id, local_dir=local_path, local_dir_use_symlinks=False,
|
| 11 |
+
# # ignore_patterns=[".gitattributes", "*.md", "*.bin", "*.safetensors", "*.ckpt", "*.onnx"],
|
| 12 |
+
# allow_patterns=pattern_list)
|
| 13 |
+
snapshot_download(repo_id, local_dir=local_path, local_dir_use_symlinks=False,
|
| 14 |
+
ignore_patterns=[
|
| 15 |
+
".gitattributes", "*.bin", "*.onnx", "*.ckpt", "*.onnx_data", "*.png", "*.jpg", "*.md"
|
| 16 |
+
],
|
| 17 |
+
allow_patterns=["text_encoder/model.safetensors"]
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
print_model.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from safetensors import safe_open
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def print_keys(model_path):
|
| 5 |
+
tensors = {}
|
| 6 |
+
with safe_open(model_path, framework="pt") as f:
|
| 7 |
+
print("key length: ", len(f.keys()))
|
| 8 |
+
for k in f.keys():
|
| 9 |
+
v = f.get_tensor(k)
|
| 10 |
+
if v.shape:
|
| 11 |
+
print(f"{k}: {type(v)}, {v.shape}")
|
| 12 |
+
else:
|
| 13 |
+
print(f"{k}: {v}")
|
| 14 |
+
tensors[k] = v
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
if __name__ == '__main__':
|
| 18 |
+
model = "../models/lora_1.5/ColoringBook-sd15.safetensors"
|
| 19 |
+
# model = "../models/lora_2.1/pytorch_lora_weights-sd21-comfyui.safetensors"
|
| 20 |
+
print(model)
|
| 21 |
+
print_keys(model)
|
run_sd_with_lora.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from diffusers import AutoPipelineForText2Image
|
| 2 |
+
from diffusers import TCDScheduler, LCMScheduler
|
| 3 |
+
from diffusers.utils import make_image_grid
|
| 4 |
+
import torch
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import time
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import argparse
|
| 9 |
+
|
| 10 |
+
device = torch.device("mps")
|
| 11 |
+
|
| 12 |
+
folder = Path("../models/")
|
| 13 |
+
|
| 14 |
+
if __name__ == "__main__":
|
| 15 |
+
parser = argparse.ArgumentParser(
|
| 16 |
+
description="Generate images from a textual prompt using stable diffusion"
|
| 17 |
+
)
|
| 18 |
+
parser.add_argument("prompt")
|
| 19 |
+
parser.add_argument("--model", choices=["sd1.5", "sd2", "realistic", "sdxl", "sdxl-turbo"], default="sdxl")
|
| 20 |
+
parser.add_argument("--n_images", type=int, default=1)
|
| 21 |
+
parser.add_argument("--steps", type=int)
|
| 22 |
+
parser.add_argument("--guidance_scale", type=float)
|
| 23 |
+
parser.add_argument("--negative_prompt", default="")
|
| 24 |
+
parser.add_argument("--output", default="out.png")
|
| 25 |
+
parser.add_argument("--img-size", type=int, default=512)
|
| 26 |
+
parser.add_argument("--lora", type=str, default=None)
|
| 27 |
+
parser.add_argument("--lora-scale", type=float, default=None)
|
| 28 |
+
args = parser.parse_args()
|
| 29 |
+
|
| 30 |
+
if args.model == "sdxl-turbo":
|
| 31 |
+
model_path = folder / "stabilityai/sdxl-turbo"
|
| 32 |
+
args.guidance_scale = args.guidance_scale or 0.0
|
| 33 |
+
args.steps = args.steps or 2
|
| 34 |
+
elif args.model == "sdxl":
|
| 35 |
+
model_path = folder / "stabilityai/stable-diffusion-xl-base-1.0"
|
| 36 |
+
args.guidance_scale = args.guidance_scale or 7.5
|
| 37 |
+
args.steps = args.steps or 20
|
| 38 |
+
elif args.model == "sd2":
|
| 39 |
+
model_path = folder / "stabilityai/stable-diffusion-2-1-base"
|
| 40 |
+
args.guidance_scale = args.guidance_scale or 7.5
|
| 41 |
+
args.steps = args.steps or 20
|
| 42 |
+
elif args.model == "sd1.5":
|
| 43 |
+
model_path = folder / "runwayml/stable-diffusion-v1-5"
|
| 44 |
+
args.guidance_scale = args.guidance_scale or 7.5
|
| 45 |
+
args.steps = args.steps or 20
|
| 46 |
+
elif args.model == "realistic":
|
| 47 |
+
model_path = folder / "SG161222/Realistic_Vision_V3.0_VAE"
|
| 48 |
+
args.guidance_scale = args.guidance_scale or 5 # 3,5-7
|
| 49 |
+
args.steps = args.steps or 10
|
| 50 |
+
else:
|
| 51 |
+
raise ValueError(f"Unknown model: {args.model}")
|
| 52 |
+
|
| 53 |
+
print("*" * 10, "configurations")
|
| 54 |
+
print(f"model: {args.model}\nimage number: {args.n_images}\nsteps: {args.steps}\n"
|
| 55 |
+
f"guidance_scale: {args.guidance_scale}\nnegative_prompt: {args.negative_prompt}\noutput: {args.output}\n"
|
| 56 |
+
f"img-size: {args.img_size}\nlora: {args.lora}\nlora-scale: {args.lora_scale}\nprompt:{args.prompt}\n")
|
| 57 |
+
print("*" * 10)
|
| 58 |
+
|
| 59 |
+
t0 = time.time()
|
| 60 |
+
pipe = AutoPipelineForText2Image.from_pretrained(model_path,
|
| 61 |
+
torch_dtype=torch.float16).to(device)
|
| 62 |
+
t1 = time.time()
|
| 63 |
+
print(f"load model time: {(t1 - t0):.3f}")
|
| 64 |
+
|
| 65 |
+
if args.lora:
|
| 66 |
+
t_load_lora = time.time()
|
| 67 |
+
# pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 68 |
+
pipe.load_lora_weights(args.lora)
|
| 69 |
+
# pipe.fuse_lora()
|
| 70 |
+
t2 = time.time()
|
| 71 |
+
print(f"load lora time: {(t2 - t1):.3f}")
|
| 72 |
+
|
| 73 |
+
if args.lora_scale:
|
| 74 |
+
output = pipe(prompt=args.prompt,
|
| 75 |
+
height=args.img_size,
|
| 76 |
+
width=args.img_size,
|
| 77 |
+
num_inference_steps=args.steps,
|
| 78 |
+
num_images_per_prompt=args.n_images,
|
| 79 |
+
guidance_scale=args.guidance_scale,
|
| 80 |
+
cross_attention_kwargs={'scale': args.lora_scale},
|
| 81 |
+
# generator=torch.Generator(device="mps").manual_seed(0)
|
| 82 |
+
)
|
| 83 |
+
else:
|
| 84 |
+
output = pipe(prompt=args.prompt,
|
| 85 |
+
height=args.img_size,
|
| 86 |
+
width=args.img_size,
|
| 87 |
+
num_inference_steps=args.steps,
|
| 88 |
+
num_images_per_prompt=args.n_images,
|
| 89 |
+
guidance_scale=args.guidance_scale,
|
| 90 |
+
# generator=torch.Generator(device="mps").manual_seed(0)
|
| 91 |
+
)
|
| 92 |
+
t3 = time.time()
|
| 93 |
+
print(f"generate image time: {(t3 - t2):.3f}")
|
| 94 |
+
img = make_image_grid(output.images, rows=1, cols=args.n_images)
|
| 95 |
+
img.save(args.output)
|
| 96 |
+
print(f"save image to: {args.output}")
|
| 97 |
+
print(f"output image size: {img.size}")
|
| 98 |
+
print(f"total time: {(time.time() - t0):.3f}")
|
test_configs.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
output = Path("./base_model_images")
|
| 4 |
+
if not output.exists():
|
| 5 |
+
output.mkdir(exist_ok=True, parents=True)
|
| 6 |
+
|
| 7 |
+
prompts = {
|
| 8 |
+
0: "astronaut riding a horse",
|
| 9 |
+
1: "a cute corgi",
|
| 10 |
+
2: "A cinematic shot of a baby racoon wearing an intricate italian priest robe",
|
| 11 |
+
3: "portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hair",
|
| 12 |
+
4: "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece",
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
# base_models = ["sd1.5", "sd2", "realistic", "sdxl", "sdxl-turbo"] # base model with default config
|
| 16 |
+
# base_models = ["sdxl"]
|
| 17 |
+
# sd1.5 512, sd2 768, realistic 512, sdxl 1024, sdxl-turbo 1024
|
| 18 |
+
# sd1.5 512, sd2 512, realistic 512, sdxl 1024, sdxl-turbo 512
|
| 19 |
+
# model(guidance_scale, steps), sd1.5(7.5, 20-50), sd2(7.5, 20-50),realistic(3-5, 10) sdxl(7.5, 20-50), sdxl-turbo(0.0, 2-5 )
|
| 20 |
+
|
| 21 |
+
base_models = {
|
| 22 |
+
"sd1.5": {
|
| 23 |
+
# "steps": 40,
|
| 24 |
+
"img-size": 512,
|
| 25 |
+
# "guidance_scale": 1.2,
|
| 26 |
+
# "loras": [
|
| 27 |
+
# {
|
| 28 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/lcm-sdv15.safetensors",
|
| 29 |
+
# "trigger_words": ""
|
| 30 |
+
# },
|
| 31 |
+
# {
|
| 32 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/ColoringBook-sd15.safetensors",
|
| 33 |
+
# "trigger_words": "Coloring Book, ColoringBookAF"
|
| 34 |
+
# },
|
| 35 |
+
# {
|
| 36 |
+
# "lora-scale": 0.8,
|
| 37 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/lego-sd15.safetensors",
|
| 38 |
+
# "trigger_words": "LEGO Creator"
|
| 39 |
+
# }, {
|
| 40 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/toyglasses-sd15.safetensors",
|
| 41 |
+
# "trigger_words": "<lora:toyglasses:1>toyglasses"
|
| 42 |
+
# }, {
|
| 43 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/song_flower-sd15.safetensors",
|
| 44 |
+
# "trigger_words": "Song Dynasty flower and bird painting"
|
| 45 |
+
# }, {
|
| 46 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/3dillu-sd15.safetensors",
|
| 47 |
+
# "trigger_words": ""
|
| 48 |
+
# }, {
|
| 49 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/Icons-sd15.safetensors",
|
| 50 |
+
# "trigger_words": "icons, ios icon app",
|
| 51 |
+
# "lora-scale": 0.8,
|
| 52 |
+
# },
|
| 53 |
+
# {
|
| 54 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/CuteCartoon-sd15.safetensors",
|
| 55 |
+
# "trigger_words": "Cartoon,CuteCartoonAF",
|
| 56 |
+
# },
|
| 57 |
+
|
| 58 |
+
# ]
|
| 59 |
+
|
| 60 |
+
},
|
| 61 |
+
"sd2": {
|
| 62 |
+
# "steps": 40,
|
| 63 |
+
"img-size": 512,
|
| 64 |
+
# "n_images": "8"
|
| 65 |
+
# "loras": [
|
| 66 |
+
# {
|
| 67 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_2.1/ColoringBook-sd21.safetensors",
|
| 68 |
+
# "trigger_words": "ColoringBookAF",
|
| 69 |
+
# },
|
| 70 |
+
# {
|
| 71 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_2.1/TShirtDesign-sd21.safetensors",
|
| 72 |
+
# "trigger_words": "T Shirt Design, TShirtDesignAF",
|
| 73 |
+
# },
|
| 74 |
+
# {
|
| 75 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_2.1/StudioGhibli-sd21.safetensors",
|
| 76 |
+
# "trigger_words": "Studio Ghibli, StdGBRedmAF",
|
| 77 |
+
# },
|
| 78 |
+
# {
|
| 79 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_2.1/3D-sd21.safetensors",
|
| 80 |
+
# "trigger_words": "3D Render Style, 3DRenderAF",
|
| 81 |
+
# }, {
|
| 82 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_2.1/Stickers-sd21.safetensors",
|
| 83 |
+
# "trigger_words": " Sticker",
|
| 84 |
+
# },
|
| 85 |
+
# ]
|
| 86 |
+
},
|
| 87 |
+
"realistic": {
|
| 88 |
+
# "steps": 20,
|
| 89 |
+
"img-size": 512,
|
| 90 |
+
# "loras": [
|
| 91 |
+
# {
|
| 92 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/lego-sd15.safetensors",
|
| 93 |
+
# "trigger_words": "LEGO Creator",
|
| 94 |
+
# "lora-scale": 0.8, # Between 0.6-1.0, recommended to use 0.8.
|
| 95 |
+
# },
|
| 96 |
+
# {
|
| 97 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/toyglasses-sd15.safetensors",
|
| 98 |
+
# "trigger_words": "<lora:toyglasses:1>toyglasses"
|
| 99 |
+
# }, {
|
| 100 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/song_flower-sd15.safetensors",
|
| 101 |
+
# "trigger_words": "Song Dynasty flower and bird painting"
|
| 102 |
+
# }, {
|
| 103 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/3dillu-sd15.safetensors",
|
| 104 |
+
# "trigger_words": ""
|
| 105 |
+
# }, {
|
| 106 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/Icons-sd15.safetensors",
|
| 107 |
+
# "trigger_words": " icons, ios icon app",
|
| 108 |
+
# "lora-scale": 0.8,
|
| 109 |
+
# },
|
| 110 |
+
# {
|
| 111 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_1.5/CuteCartoon-sd15.safetensors",
|
| 112 |
+
# "trigger_words": "Cartoon,CuteCartoonAF",
|
| 113 |
+
# },
|
| 114 |
+
# ],
|
| 115 |
+
},
|
| 116 |
+
"sdxl": {
|
| 117 |
+
# "steps": "40",
|
| 118 |
+
"img-size": 1024,
|
| 119 |
+
# "loras": [
|
| 120 |
+
# {
|
| 121 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/papercut_sdxl.safetensors",
|
| 122 |
+
# "trigger_words": "papercut style",
|
| 123 |
+
# },
|
| 124 |
+
# {
|
| 125 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/toy_face-sdxl.safetensors",
|
| 126 |
+
# "trigger_words": "toy_face",
|
| 127 |
+
# "lora-scale": 0.9,
|
| 128 |
+
# }, {
|
| 129 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/Products10k-sdxl.safetensors",
|
| 130 |
+
# "trigger_words": "",
|
| 131 |
+
# }, {
|
| 132 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/ikea_instructions-sdxl.safetensors",
|
| 133 |
+
# "trigger_words": "",
|
| 134 |
+
# }, {
|
| 135 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/Lego_sdxl.safetensors",
|
| 136 |
+
# "trigger_words": "LEGO MiniFig",
|
| 137 |
+
# }, {
|
| 138 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/lego_cinematic_sdxl.safetensors",
|
| 139 |
+
# "trigger_words": "Lego",
|
| 140 |
+
# }, {
|
| 141 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/Emojis-sdxl.safetensors",
|
| 142 |
+
# "trigger_words": "Emoji",
|
| 143 |
+
# }, {
|
| 144 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/MoviePoster-sdxl.safetensors",
|
| 145 |
+
# "trigger_words": "Movie Poster, MoviePosterAF",
|
| 146 |
+
# }, {
|
| 147 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/XboxAvatar-sdxl.safetensors",
|
| 148 |
+
# "trigger_words": "XBOX AVATAR",
|
| 149 |
+
# },
|
| 150 |
+
# ],
|
| 151 |
+
},
|
| 152 |
+
"sdxl-turbo": {
|
| 153 |
+
# "n_images": "6",
|
| 154 |
+
"img-size": 512,
|
| 155 |
+
# "steps": 4
|
| 156 |
+
# "loras": [
|
| 157 |
+
# {
|
| 158 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/papercut_sdxl.safetensors",
|
| 159 |
+
# "trigger_words": "papercut style"
|
| 160 |
+
# },
|
| 161 |
+
# {
|
| 162 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/toy_face-sdxl.safetensors",
|
| 163 |
+
# "trigger_words": "toy_face",
|
| 164 |
+
# "lora-scale": 0.9,
|
| 165 |
+
# }, {
|
| 166 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/Products10k-sdxl.safetensors",
|
| 167 |
+
# "trigger_words": "",
|
| 168 |
+
# }, {
|
| 169 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/ikea_instructions-sdxl.safetensors",
|
| 170 |
+
# "trigger_words": "",
|
| 171 |
+
# }, {
|
| 172 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/Lego_sdxl.safetensors",
|
| 173 |
+
# "trigger_words": "LEGO MiniFig",
|
| 174 |
+
# }, {
|
| 175 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/lego_cinematic_sdxl.safetensors",
|
| 176 |
+
# "trigger_words": "Lego",
|
| 177 |
+
# }, {
|
| 178 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/Emojis-sdxl.safetensors",
|
| 179 |
+
# "trigger_words": "Emoji",
|
| 180 |
+
# }, {
|
| 181 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/MoviePoster-sdxl.safetensors",
|
| 182 |
+
# "trigger_words": "Movie Poster, MoviePosterAF",
|
| 183 |
+
# }, {
|
| 184 |
+
# "lora": "/Users/jeqin/work/code/sd/models/lora_xl/XboxAvatar-sdxl.safetensors",
|
| 185 |
+
# "trigger_words": "XBOX AVATAR",
|
| 186 |
+
# },
|
| 187 |
+
# ]
|
| 188 |
+
}
|
| 189 |
+
}
|
test_diffusers.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
import subprocess
|
| 4 |
+
import csv
|
| 5 |
+
from subprocess import CompletedProcess
|
| 6 |
+
|
| 7 |
+
from test_configs import *
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def cmd(command: str, check=True, capture_output=False) -> CompletedProcess:
|
| 11 |
+
print(command)
|
| 12 |
+
if capture_output:
|
| 13 |
+
ret = subprocess.run(command, shell=True, check=check, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
| 14 |
+
universal_newlines=True)
|
| 15 |
+
else:
|
| 16 |
+
ret = subprocess.run(command, shell=True, check=check)
|
| 17 |
+
print(ret.stdout)
|
| 18 |
+
return ret
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def parse_log(output):
|
| 22 |
+
"""output example:
|
| 23 |
+
"""
|
| 24 |
+
model_name = re.search(r"model: (.+)", output).group(1)
|
| 25 |
+
steps = re.search(r"steps: (.+)", output).group(1)
|
| 26 |
+
cfg_weight = re.search(r"guidance_scale: (.+)", output).group(1)
|
| 27 |
+
img_size = re.search(r"img-size: (.+)", output).group(1)
|
| 28 |
+
img_number = re.search(r"image number: (.+)", output).group(1)
|
| 29 |
+
load_model_time = re.search(r"load model time: (.+)", output).group(1)
|
| 30 |
+
update_lora_time = re.search(r"load lora time: (.+)", output).group(1)
|
| 31 |
+
generate_time = re.search(r"generate image time: (.+)", output).group(1)
|
| 32 |
+
total_time = re.search(r"total time: (.+)", output).group(1)
|
| 33 |
+
out_image = re.search(r"save image to: (.+)", output).group(1)
|
| 34 |
+
out_image = '/'.join(out_image.split("/")[-2:])
|
| 35 |
+
out_image_size = re.search(r"output image size: \((.+)\)", output).group(1)
|
| 36 |
+
out_image_size = out_image_size.replace(', ', '*')
|
| 37 |
+
return (model_name, steps, cfg_weight, img_size, img_number, load_model_time,
|
| 38 |
+
update_lora_time, generate_time, total_time, out_image, out_image_size)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _get_cmd(prompt, **kwargs):
|
| 42 |
+
base_cmd = f'python run_sd_with_lora.py "{prompt}"'
|
| 43 |
+
for k, v in kwargs.items():
|
| 44 |
+
base_cmd += f" --{k} {v}"
|
| 45 |
+
return base_cmd
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def test_lora(result):
|
| 49 |
+
commands = {
|
| 50 |
+
"no_lora": [],
|
| 51 |
+
"no_trigger": [],
|
| 52 |
+
"with_trigger": []
|
| 53 |
+
}
|
| 54 |
+
for model, config in base_models.items():
|
| 55 |
+
loras = config.pop("loras")
|
| 56 |
+
for l in loras:
|
| 57 |
+
trigger_words = l.get("trigger_words")
|
| 58 |
+
lora_name = l.get("lora").split('/')[-1].split('.')[0]
|
| 59 |
+
for i, p in prompts.items():
|
| 60 |
+
# # 1. run with no lora
|
| 61 |
+
# paras = {"model": model, "output": str(output / model / f"{lora_name}-{i}-a_no_lora.png")}
|
| 62 |
+
# paras.update(config)
|
| 63 |
+
# commands["no_lora"].append(_get_cmd(p, **paras))
|
| 64 |
+
#
|
| 65 |
+
# # 2. run with lora, but no trigger words
|
| 66 |
+
# paras = {"model": model, "output": str(output / model / f"{lora_name}-{i}-b_no_trigger.png")}
|
| 67 |
+
# paras.update(config)
|
| 68 |
+
# paras["lora"] = l.get("lora")
|
| 69 |
+
# if l.get("lora-scale"):
|
| 70 |
+
# paras["lora-scale"] = l.get("lora-scale")
|
| 71 |
+
# commands["no_trigger"].append(_get_cmd(p, **paras))
|
| 72 |
+
|
| 73 |
+
# 3. run with lora, with trigger words
|
| 74 |
+
paras = {"model": model, "output": str(output / f"{model}-{lora_name}-{i}-c_with_trigger.png"),
|
| 75 |
+
"n_images": 4}
|
| 76 |
+
paras.update(config)
|
| 77 |
+
paras["lora"] = l.get("lora")
|
| 78 |
+
if l.get("lora-scale"):
|
| 79 |
+
paras["lora-scale"] = l.get("lora-scale")
|
| 80 |
+
p = f"{p}, {trigger_words}"
|
| 81 |
+
commands["with_trigger"].append(_get_cmd(p, **paras))
|
| 82 |
+
|
| 83 |
+
for _, cmds in commands.items():
|
| 84 |
+
for c in cmds:
|
| 85 |
+
try:
|
| 86 |
+
ret = cmd(c, capture_output=True)
|
| 87 |
+
result.append(parse_log(ret.stdout))
|
| 88 |
+
except Exception as e:
|
| 89 |
+
print("Exception: ", e)
|
| 90 |
+
return result
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def test_base_model(result: list):
|
| 94 |
+
for model, config in base_models.items():
|
| 95 |
+
for i, p in prompts.items():
|
| 96 |
+
paras = {"model": model, "output": str(output / f"{model}_{i}.png"), "n_images": 4}
|
| 97 |
+
paras.update(config)
|
| 98 |
+
command = _get_cmd(p, **paras)
|
| 99 |
+
try:
|
| 100 |
+
ret = cmd(command, capture_output=True)
|
| 101 |
+
result.append(parse_log(ret.stdout))
|
| 102 |
+
except Exception as e:
|
| 103 |
+
print("Exception: ", e)
|
| 104 |
+
return result
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def main():
|
| 108 |
+
result = [
|
| 109 |
+
['model name', 'steps', 'guidance scale', 'img size', 'img number', 'load model', 'update lora',
|
| 110 |
+
'generate image', 'total time', 'output image', 'output image size']
|
| 111 |
+
]
|
| 112 |
+
result = test_base_model(result)
|
| 113 |
+
# result = test_lora(result)
|
| 114 |
+
|
| 115 |
+
with open("result_diffuser.csv", 'w', newline='') as f:
|
| 116 |
+
writer = csv.writer(f)
|
| 117 |
+
writer.writerows(result)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
if __name__ == '__main__':
|
| 121 |
+
main()
|