Upload lora-scripts/sd-scripts/tools/cache_latents.py with huggingface_hub
Browse files
lora-scripts/sd-scripts/tools/cache_latents.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# latentsのdiskへの事前キャッシュを行う / cache latents to disk
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import math
|
| 5 |
+
from multiprocessing import Value
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
from accelerate.utils import set_seed
|
| 9 |
+
import torch
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
|
| 12 |
+
from library import config_util
|
| 13 |
+
from library import train_util
|
| 14 |
+
from library import sdxl_train_util
|
| 15 |
+
from library.config_util import (
|
| 16 |
+
ConfigSanitizer,
|
| 17 |
+
BlueprintGenerator,
|
| 18 |
+
)
|
| 19 |
+
from library.utils import setup_logging
|
| 20 |
+
setup_logging()
|
| 21 |
+
import logging
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
def cache_to_disk(args: argparse.Namespace) -> None:
|
| 25 |
+
train_util.prepare_dataset_args(args, True)
|
| 26 |
+
|
| 27 |
+
# check cache latents arg
|
| 28 |
+
assert args.cache_latents_to_disk, "cache_latents_to_disk must be True / cache_latents_to_diskはTrueである必要があります"
|
| 29 |
+
|
| 30 |
+
use_dreambooth_method = args.in_json is None
|
| 31 |
+
|
| 32 |
+
if args.seed is not None:
|
| 33 |
+
set_seed(args.seed) # 乱数系列を初期化する
|
| 34 |
+
|
| 35 |
+
# tokenizerを準備する:datasetを動かすために必要
|
| 36 |
+
if args.sdxl:
|
| 37 |
+
tokenizer1, tokenizer2 = sdxl_train_util.load_tokenizers(args)
|
| 38 |
+
tokenizers = [tokenizer1, tokenizer2]
|
| 39 |
+
else:
|
| 40 |
+
tokenizer = train_util.load_tokenizer(args)
|
| 41 |
+
tokenizers = [tokenizer]
|
| 42 |
+
|
| 43 |
+
# データセットを準備する
|
| 44 |
+
if args.dataset_class is None:
|
| 45 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, True, False, True))
|
| 46 |
+
if args.dataset_config is not None:
|
| 47 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 48 |
+
user_config = config_util.load_user_config(args.dataset_config)
|
| 49 |
+
ignored = ["train_data_dir", "in_json"]
|
| 50 |
+
if any(getattr(args, attr) is not None for attr in ignored):
|
| 51 |
+
logger.warning(
|
| 52 |
+
"ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
|
| 53 |
+
", ".join(ignored)
|
| 54 |
+
)
|
| 55 |
+
)
|
| 56 |
+
else:
|
| 57 |
+
if use_dreambooth_method:
|
| 58 |
+
logger.info("Using DreamBooth method.")
|
| 59 |
+
user_config = {
|
| 60 |
+
"datasets": [
|
| 61 |
+
{
|
| 62 |
+
"subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(
|
| 63 |
+
args.train_data_dir, args.reg_data_dir
|
| 64 |
+
)
|
| 65 |
+
}
|
| 66 |
+
]
|
| 67 |
+
}
|
| 68 |
+
else:
|
| 69 |
+
logger.info("Training with captions.")
|
| 70 |
+
user_config = {
|
| 71 |
+
"datasets": [
|
| 72 |
+
{
|
| 73 |
+
"subsets": [
|
| 74 |
+
{
|
| 75 |
+
"image_dir": args.train_data_dir,
|
| 76 |
+
"metadata_file": args.in_json,
|
| 77 |
+
}
|
| 78 |
+
]
|
| 79 |
+
}
|
| 80 |
+
]
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizers)
|
| 84 |
+
train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 85 |
+
else:
|
| 86 |
+
train_dataset_group = train_util.load_arbitrary_dataset(args, tokenizers)
|
| 87 |
+
|
| 88 |
+
# datasetのcache_latentsを呼ばなければ、生の画像が返る
|
| 89 |
+
|
| 90 |
+
current_epoch = Value("i", 0)
|
| 91 |
+
current_step = Value("i", 0)
|
| 92 |
+
ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
|
| 93 |
+
collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)
|
| 94 |
+
|
| 95 |
+
# acceleratorを準備する
|
| 96 |
+
logger.info("prepare accelerator")
|
| 97 |
+
accelerator = train_util.prepare_accelerator(args)
|
| 98 |
+
|
| 99 |
+
# mixed precisionに対応した型を用意しておき適宜castする
|
| 100 |
+
weight_dtype, _ = train_util.prepare_dtype(args)
|
| 101 |
+
vae_dtype = torch.float32 if args.no_half_vae else weight_dtype
|
| 102 |
+
|
| 103 |
+
# モデルを読み込む
|
| 104 |
+
logger.info("load model")
|
| 105 |
+
if args.sdxl:
|
| 106 |
+
(_, _, _, vae, _, _, _) = sdxl_train_util.load_target_model(args, accelerator, "sdxl", weight_dtype)
|
| 107 |
+
else:
|
| 108 |
+
_, vae, _, _ = train_util.load_target_model(args, weight_dtype, accelerator)
|
| 109 |
+
|
| 110 |
+
if torch.__version__ >= "2.0.0": # PyTorch 2.0.0 以上対応のxformersなら以下が使える
|
| 111 |
+
vae.set_use_memory_efficient_attention_xformers(args.xformers)
|
| 112 |
+
vae.to(accelerator.device, dtype=vae_dtype)
|
| 113 |
+
vae.requires_grad_(False)
|
| 114 |
+
vae.eval()
|
| 115 |
+
|
| 116 |
+
# dataloaderを準備する
|
| 117 |
+
train_dataset_group.set_caching_mode("latents")
|
| 118 |
+
|
| 119 |
+
# DataLoaderのプロセス数:0 は persistent_workers が使えないので注意
|
| 120 |
+
n_workers = min(args.max_data_loader_n_workers, os.cpu_count()) # cpu_count or max_data_loader_n_workers
|
| 121 |
+
|
| 122 |
+
train_dataloader = torch.utils.data.DataLoader(
|
| 123 |
+
train_dataset_group,
|
| 124 |
+
batch_size=1,
|
| 125 |
+
shuffle=True,
|
| 126 |
+
collate_fn=collator,
|
| 127 |
+
num_workers=n_workers,
|
| 128 |
+
persistent_workers=args.persistent_data_loader_workers,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# acceleratorを使ってモデルを準備する:マルチGPUで使えるようになるはず
|
| 132 |
+
train_dataloader = accelerator.prepare(train_dataloader)
|
| 133 |
+
|
| 134 |
+
# データ取得のためのループ
|
| 135 |
+
for batch in tqdm(train_dataloader):
|
| 136 |
+
b_size = len(batch["images"])
|
| 137 |
+
vae_batch_size = b_size if args.vae_batch_size is None else args.vae_batch_size
|
| 138 |
+
flip_aug = batch["flip_aug"]
|
| 139 |
+
random_crop = batch["random_crop"]
|
| 140 |
+
bucket_reso = batch["bucket_reso"]
|
| 141 |
+
|
| 142 |
+
# バッチを分割して処理する
|
| 143 |
+
for i in range(0, b_size, vae_batch_size):
|
| 144 |
+
images = batch["images"][i : i + vae_batch_size]
|
| 145 |
+
absolute_paths = batch["absolute_paths"][i : i + vae_batch_size]
|
| 146 |
+
resized_sizes = batch["resized_sizes"][i : i + vae_batch_size]
|
| 147 |
+
|
| 148 |
+
image_infos = []
|
| 149 |
+
for i, (image, absolute_path, resized_size) in enumerate(zip(images, absolute_paths, resized_sizes)):
|
| 150 |
+
image_info = train_util.ImageInfo(absolute_path, 1, "dummy", False, absolute_path)
|
| 151 |
+
image_info.image = image
|
| 152 |
+
image_info.bucket_reso = bucket_reso
|
| 153 |
+
image_info.resized_size = resized_size
|
| 154 |
+
image_info.latents_npz = os.path.splitext(absolute_path)[0] + ".npz"
|
| 155 |
+
|
| 156 |
+
if args.skip_existing:
|
| 157 |
+
if train_util.is_disk_cached_latents_is_expected(image_info.bucket_reso, image_info.latents_npz, flip_aug):
|
| 158 |
+
logger.warning(f"Skipping {image_info.latents_npz} because it already exists.")
|
| 159 |
+
continue
|
| 160 |
+
|
| 161 |
+
image_infos.append(image_info)
|
| 162 |
+
|
| 163 |
+
if len(image_infos) > 0:
|
| 164 |
+
train_util.cache_batch_latents(vae, True, image_infos, flip_aug, random_crop)
|
| 165 |
+
|
| 166 |
+
accelerator.wait_for_everyone()
|
| 167 |
+
accelerator.print(f"Finished caching latents for {len(train_dataset_group)} batches.")
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def setup_parser() -> argparse.ArgumentParser:
|
| 171 |
+
parser = argparse.ArgumentParser()
|
| 172 |
+
|
| 173 |
+
train_util.add_sd_models_arguments(parser)
|
| 174 |
+
train_util.add_training_arguments(parser, True)
|
| 175 |
+
train_util.add_dataset_arguments(parser, True, True, True)
|
| 176 |
+
config_util.add_config_arguments(parser)
|
| 177 |
+
parser.add_argument("--sdxl", action="store_true", help="Use SDXL model / SDXLモデルを使用する")
|
| 178 |
+
parser.add_argument(
|
| 179 |
+
"--no_half_vae",
|
| 180 |
+
action="store_true",
|
| 181 |
+
help="do not use fp16/bf16 VAE in mixed precision (use float VAE) / mixed precisionでも fp16/bf16 VAEを使わずfloat VAEを使う",
|
| 182 |
+
)
|
| 183 |
+
parser.add_argument(
|
| 184 |
+
"--skip_existing",
|
| 185 |
+
action="store_true",
|
| 186 |
+
help="skip images if npz already exists (both normal and flipped exists if flip_aug is enabled) / npzが既に存在する画像をスキップする(flip_aug有効時は通常、反転の両方が存在する画像をスキップ)",
|
| 187 |
+
)
|
| 188 |
+
return parser
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
if __name__ == "__main__":
|
| 192 |
+
parser = setup_parser()
|
| 193 |
+
|
| 194 |
+
args = parser.parse_args()
|
| 195 |
+
args = train_util.read_config_from_file(args, parser)
|
| 196 |
+
|
| 197 |
+
cache_to_disk(args)
|