Files upload
Browse files- ip_ckpt/README.md +109 -0
- ip_ckpt/lyra_tran/models/image_encoder/config.json +23 -0
- ip_ckpt/lyra_tran/models/image_encoder/model.safetensors +3 -0
- ip_ckpt/lyra_tran/models/image_encoder/pytorch_model.bin +3 -0
- ip_ckpt/lyra_tran/sdxl_models/.DS_Store +0 -0
- ip_ckpt/lyra_tran/sdxl_models/image_encoder/config.json +81 -0
- ip_ckpt/lyra_tran/sdxl_models/image_encoder/model.safetensors +3 -0
- ip_ckpt/lyra_tran/sdxl_models/image_encoder/pytorch_model.bin +3 -0
- ip_ckpt/lyra_tran/sdxl_models/ip-adapter-plus_sdxl_vit-h.bin +3 -0
- sdxl-vae-fp16-fix/config.json +32 -0
- sdxl-vae-fp16-fix/diffusion_pytorch_model.bin +3 -0
- sdxl-vae-fp16-fix/diffusion_pytorch_model.safetensors +3 -0
- sdxl-vae-fp16-fix/sdxl.vae.safetensors +3 -0
- sdxl-vae-fp16-fix/sdxl_vae.safetensors +3 -0
- sdxl_unet/config.json +69 -0
ip_ckpt/README.md
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
---
|
| 4 |
+
<div align="center">
|
| 5 |
+
|
| 6 |
+
<h1> lyraDiff: An Out-of-the-box Acceleration Engine for Diffusion and DiT Models</h1>
|
| 7 |
+
|
| 8 |
+
</div>
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
`lyraDiff` introduces a **recompilation-free** inference engine for Diffusion and DiT models, achieving **state-of-the-art speed**, **extensive model support**, and **pixel-level image consistency**.
|
| 12 |
+
|
| 13 |
+
## Highlights
|
| 14 |
+
- **State-of-the-art Inference Speed**: `lyraDiff` utilizes multiple techniques to achieve up to **6.1x** speedup of the model inference, including **Quantization**, **Fused GEMM Kernels**, **Flash Attention**, and **NHWC & Fused GroupNorm**.
|
| 15 |
+
- **Memory Efficiency**: `lyraDiff` utilizes buffer-based DRAM reuse strategy and multiple types of quantizations (FP8/INT8/INT4) to save **10-40%** of DRAM usage.
|
| 16 |
+
- **Extensive Model Support**: `lyraDiff` supports a wide range of top Generative/SR models such as **SD1.5, SDXL, FLUX, S3Diff, etc.**, and those most commonly used plugins such as **LoRA, ControlNet and Ip-Adapter**.
|
| 17 |
+
- **Zero Compilation Deployment**: Unlike **TensorRT** or **AITemplate**, which takes minutes to compile, `lyraDiff` eliminates runtime recompilation overhead even with model inputs of dynamic shapes.
|
| 18 |
+
- **Image Gen Consistency**: The outputs of `lyraDiff` are aligned with the ones of [HF diffusers](https://github.com/huggingface/diffusers) at the pixel level, even under LoRA switch in quantization mode.
|
| 19 |
+
- **Fast Plugin Hot-swap**: `lyraDiff` provides **Super Fast Model Hot-swap for ControlNet and LoRA** which can hugely benefit a real-time image gen service.
|
| 20 |
+
|
| 21 |
+
## Usage
|
| 22 |
+
|
| 23 |
+
`lyraDiff-IP-Adapters` is converted from the standard [IP-Adapter](https://huggingface.co/h94/IP-Adapter) weights using this [script](https://github.com/TMElyralab/lyraDiff/blob/main/lyradiff/convert_model_scripts/convert_ipadapter.py) to be compatiable with [lyraDiff](https://github.com/TMElyralab/lyraDiff), and contains both SD1.5 and SDXL version of converted IP-Adapter
|
| 24 |
+
|
| 25 |
+
We provide a reference implementation of lyraDiff version of SD1.5/SDXL, as well as sampling code, in a dedicated [github repository](https://github.com/TMElyralab/lyraDiff).
|
| 26 |
+
|
| 27 |
+
### Example
|
| 28 |
+
We provide minimal [script](https://github.com/TMElyralab/lyraDiff/blob/main/examples/SDXL/ipadapter_demo.py) for running SDXL models + IP-Adapter with lyraDiff as follows:
|
| 29 |
+
|
| 30 |
+
```python
|
| 31 |
+
import torch
|
| 32 |
+
import time
|
| 33 |
+
import sys, os
|
| 34 |
+
from diffusers import StableDiffusionXLPipeline
|
| 35 |
+
from lyradiff.lyradiff_model.module.lyradiff_ip_adapter import LyraIPAdapter
|
| 36 |
+
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextModelWithProjection
|
| 37 |
+
from lyradiff.lyradiff_model.lyradiff_unet_model import LyraDiffUNet2DConditionModel
|
| 38 |
+
from lyradiff.lyradiff_model.lyradiff_vae_model import LyraDiffVaeModel
|
| 39 |
+
from diffusers import EulerAncestralDiscreteScheduler
|
| 40 |
+
from PIL import Image
|
| 41 |
+
from diffusers.utils import load_image
|
| 42 |
+
import GPUtil
|
| 43 |
+
|
| 44 |
+
model_path = "/path/to/sdxl/model/"
|
| 45 |
+
vae_model_path = "/path/to/sdxl/sdxl-vae-fp16-fix"
|
| 46 |
+
|
| 47 |
+
text_encoder = CLIPTextModel.from_pretrained(model_path, subfolder="text_encoder").to(torch.float16).to(torch.device("cuda"))
|
| 48 |
+
text_encoder_2 = CLIPTextModelWithProjection.from_pretrained(model_path, subfolder="text_encoder_2").to(torch.float16).to(torch.device("cuda"))
|
| 49 |
+
tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer")
|
| 50 |
+
tokenizer_2 = CLIPTokenizer.from_pretrained( model_path, subfolder="tokenizer_2")
|
| 51 |
+
|
| 52 |
+
unet = LyraDiffUNet2DConditionModel(is_sdxl=True)
|
| 53 |
+
vae = LyraDiffVaeModel(scaling_factor=0.13025, is_upcast=False)
|
| 54 |
+
|
| 55 |
+
unet.load_from_diffusers_model(os.path.join(model_path, "unet"))
|
| 56 |
+
vae.load_from_diffusers_model(vae_model_path)
|
| 57 |
+
|
| 58 |
+
scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_path, subfolder="scheduler", timestep_spacing="linspace")
|
| 59 |
+
|
| 60 |
+
pipe = StableDiffusionXLPipeline(
|
| 61 |
+
vae=vae,
|
| 62 |
+
unet=unet,
|
| 63 |
+
text_encoder=text_encoder,
|
| 64 |
+
text_encoder_2=text_encoder_2,
|
| 65 |
+
tokenizer=tokenizer,
|
| 66 |
+
tokenizer_2=tokenizer_2,
|
| 67 |
+
scheduler=scheduler
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
ip_ckpt = "/path/to/sdxl/ip_ckpt/ip-adapter-plus_sdxl_vit-h.bin"
|
| 71 |
+
image_encoder_path = "/path/to/sdxl/ip_ckpt/image_encoder"
|
| 72 |
+
|
| 73 |
+
# Create LyraIPAdapter
|
| 74 |
+
ip_adapter = LyraIPAdapter(unet_model=unet.model, sdxl=True, device=torch.device("cuda"), ip_ckpt=ip_ckpt, ip_plus=True, image_encoder_path=image_encoder_path, num_ip_tokens=16, ip_projection_dim=1024)
|
| 75 |
+
|
| 76 |
+
# load ip_adapter image
|
| 77 |
+
ip_image = load_image("https://cdn-uploads.huggingface.co/production/uploads/6461b412846a6c8c8305319d/8U6yNHTPLaOC3gIWJZWGL.png")
|
| 78 |
+
ip_scale = 0.5
|
| 79 |
+
|
| 80 |
+
# get ip image embedding and pass it to the pipeline
|
| 81 |
+
ip_image_embedding = [ip_adapter.get_image_embeds_lyradiff(ip_image)['ip_hidden_states']]
|
| 82 |
+
# unet set ip adapter scale in unet model obj, since we cannot set ip_adapter_scale through diffusers pipeline
|
| 83 |
+
unet.set_ip_adapter_scale(ip_scale)
|
| 84 |
+
|
| 85 |
+
for i in range(3):
|
| 86 |
+
generator = torch.Generator("cuda").manual_seed(123)
|
| 87 |
+
start = time.perf_counter()
|
| 88 |
+
images = pipe(prompt="a beautiful girl, cartoon style",
|
| 89 |
+
height=1024,
|
| 90 |
+
width=1024,
|
| 91 |
+
num_inference_steps=20,
|
| 92 |
+
num_images_per_prompt=1,
|
| 93 |
+
guidance_scale=7.5,
|
| 94 |
+
negative_prompt="NSFW",
|
| 95 |
+
generator=torch.Generator("cuda").manual_seed(123),
|
| 96 |
+
ip_adapter_image_embeds=ip_image_embedding
|
| 97 |
+
)[0]
|
| 98 |
+
images[0].save(f"sdxl_ip_{i}.png")
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
## Citation
|
| 103 |
+
``` bibtex
|
| 104 |
+
@Misc{lyraDiff_2025,
|
| 105 |
+
author = {Kangjian Wu, Zhengtao Wang, Yibo Lu, Haoxiong Su, Sa Xiao, Qiwen Mao, Mian Peng, Bin Wu, Wenjiang Zhou},
|
| 106 |
+
title = {lyraDiff: Accelerating Diffusion Models with best flexibility},
|
| 107 |
+
howpublished = {\url{https://github.com/TMElyralab/lyraDiff}},
|
| 108 |
+
year = {2025}
|
| 109 |
+
}
|
ip_ckpt/lyra_tran/models/image_encoder/config.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "./image_encoder",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"CLIPVisionModelWithProjection"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"dropout": 0.0,
|
| 8 |
+
"hidden_act": "gelu",
|
| 9 |
+
"hidden_size": 1280,
|
| 10 |
+
"image_size": 224,
|
| 11 |
+
"initializer_factor": 1.0,
|
| 12 |
+
"initializer_range": 0.02,
|
| 13 |
+
"intermediate_size": 5120,
|
| 14 |
+
"layer_norm_eps": 1e-05,
|
| 15 |
+
"model_type": "clip_vision_model",
|
| 16 |
+
"num_attention_heads": 16,
|
| 17 |
+
"num_channels": 3,
|
| 18 |
+
"num_hidden_layers": 32,
|
| 19 |
+
"patch_size": 14,
|
| 20 |
+
"projection_dim": 1024,
|
| 21 |
+
"torch_dtype": "float16",
|
| 22 |
+
"transformers_version": "4.28.0.dev0"
|
| 23 |
+
}
|
ip_ckpt/lyra_tran/models/image_encoder/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6ca9667da1ca9e0b0f75e46bb030f7e011f44f86cbfb8d5a36590fcd7507b030
|
| 3 |
+
size 2528373448
|
ip_ckpt/lyra_tran/models/image_encoder/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d3ec1e66737f77a4f3bc2df3c52eacefc69ce7825e2784183b1d4e9877d9193
|
| 3 |
+
size 2528481905
|
ip_ckpt/lyra_tran/sdxl_models/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
ip_ckpt/lyra_tran/sdxl_models/image_encoder/config.json
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"CLIPVisionModelWithProjection"
|
| 4 |
+
],
|
| 5 |
+
"_name_or_path": "",
|
| 6 |
+
"add_cross_attention": false,
|
| 7 |
+
"architectures": null,
|
| 8 |
+
"attention_dropout": 0.0,
|
| 9 |
+
"bad_words_ids": null,
|
| 10 |
+
"begin_suppress_tokens": null,
|
| 11 |
+
"bos_token_id": null,
|
| 12 |
+
"chunk_size_feed_forward": 0,
|
| 13 |
+
"cross_attention_hidden_size": null,
|
| 14 |
+
"decoder_start_token_id": null,
|
| 15 |
+
"diversity_penalty": 0.0,
|
| 16 |
+
"do_sample": false,
|
| 17 |
+
"dropout": 0.0,
|
| 18 |
+
"early_stopping": false,
|
| 19 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 20 |
+
"eos_token_id": null,
|
| 21 |
+
"exponential_decay_length_penalty": null,
|
| 22 |
+
"finetuning_task": null,
|
| 23 |
+
"forced_bos_token_id": null,
|
| 24 |
+
"forced_eos_token_id": null,
|
| 25 |
+
"hidden_act": "gelu",
|
| 26 |
+
"hidden_size": 1664,
|
| 27 |
+
"id2label": {
|
| 28 |
+
"0": "LABEL_0",
|
| 29 |
+
"1": "LABEL_1"
|
| 30 |
+
},
|
| 31 |
+
"image_size": 224,
|
| 32 |
+
"initializer_factor": 1.0,
|
| 33 |
+
"initializer_range": 0.02,
|
| 34 |
+
"intermediate_size": 8192,
|
| 35 |
+
"is_decoder": false,
|
| 36 |
+
"is_encoder_decoder": false,
|
| 37 |
+
"label2id": {
|
| 38 |
+
"LABEL_0": 0,
|
| 39 |
+
"LABEL_1": 1
|
| 40 |
+
},
|
| 41 |
+
"layer_norm_eps": 1e-05,
|
| 42 |
+
"length_penalty": 1.0,
|
| 43 |
+
"max_length": 20,
|
| 44 |
+
"min_length": 0,
|
| 45 |
+
"model_type": "clip_vision_model",
|
| 46 |
+
"no_repeat_ngram_size": 0,
|
| 47 |
+
"num_attention_heads": 16,
|
| 48 |
+
"num_beam_groups": 1,
|
| 49 |
+
"num_beams": 1,
|
| 50 |
+
"num_channels": 3,
|
| 51 |
+
"num_hidden_layers": 48,
|
| 52 |
+
"num_return_sequences": 1,
|
| 53 |
+
"output_attentions": false,
|
| 54 |
+
"output_hidden_states": false,
|
| 55 |
+
"output_scores": false,
|
| 56 |
+
"pad_token_id": null,
|
| 57 |
+
"patch_size": 14,
|
| 58 |
+
"prefix": null,
|
| 59 |
+
"problem_type": null,
|
| 60 |
+
"pruned_heads": {},
|
| 61 |
+
"remove_invalid_values": false,
|
| 62 |
+
"repetition_penalty": 1.0,
|
| 63 |
+
"return_dict": true,
|
| 64 |
+
"return_dict_in_generate": false,
|
| 65 |
+
"sep_token_id": null,
|
| 66 |
+
"suppress_tokens": null,
|
| 67 |
+
"task_specific_params": null,
|
| 68 |
+
"temperature": 1.0,
|
| 69 |
+
"tf_legacy_loss": false,
|
| 70 |
+
"tie_encoder_decoder": false,
|
| 71 |
+
"tie_word_embeddings": true,
|
| 72 |
+
"tokenizer_class": null,
|
| 73 |
+
"top_k": 50,
|
| 74 |
+
"top_p": 1.0,
|
| 75 |
+
"torch_dtype": null,
|
| 76 |
+
"torchscript": false,
|
| 77 |
+
"transformers_version": "4.24.0",
|
| 78 |
+
"typical_p": 1.0,
|
| 79 |
+
"use_bfloat16": false,
|
| 80 |
+
"projection_dim": 1280
|
| 81 |
+
}
|
ip_ckpt/lyra_tran/sdxl_models/image_encoder/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:657723e09f46a7c3957df651601029f66b1748afb12b419816330f16ed45d64d
|
| 3 |
+
size 3689912664
|
ip_ckpt/lyra_tran/sdxl_models/image_encoder/pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2999562fbc02f9dc0d9c0acb7cf0970ec3a9b2a578d7d05afe82191d606d2d80
|
| 3 |
+
size 3690112753
|
ip_ckpt/lyra_tran/sdxl_models/ip-adapter-plus_sdxl_vit-h.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec70edb7cc8e769c9388d94eeaea3e4526352c9fae793a608782d1d8951fde90
|
| 3 |
+
size 1013454427
|
sdxl-vae-fp16-fix/config.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "AutoencoderKL",
|
| 3 |
+
"_diffusers_version": "0.18.0.dev0",
|
| 4 |
+
"_name_or_path": ".",
|
| 5 |
+
"act_fn": "silu",
|
| 6 |
+
"block_out_channels": [
|
| 7 |
+
128,
|
| 8 |
+
256,
|
| 9 |
+
512,
|
| 10 |
+
512
|
| 11 |
+
],
|
| 12 |
+
"down_block_types": [
|
| 13 |
+
"DownEncoderBlock2D",
|
| 14 |
+
"DownEncoderBlock2D",
|
| 15 |
+
"DownEncoderBlock2D",
|
| 16 |
+
"DownEncoderBlock2D"
|
| 17 |
+
],
|
| 18 |
+
"in_channels": 3,
|
| 19 |
+
"latent_channels": 4,
|
| 20 |
+
"layers_per_block": 2,
|
| 21 |
+
"norm_num_groups": 32,
|
| 22 |
+
"out_channels": 3,
|
| 23 |
+
"sample_size": 512,
|
| 24 |
+
"scaling_factor": 0.13025,
|
| 25 |
+
"up_block_types": [
|
| 26 |
+
"UpDecoderBlock2D",
|
| 27 |
+
"UpDecoderBlock2D",
|
| 28 |
+
"UpDecoderBlock2D",
|
| 29 |
+
"UpDecoderBlock2D"
|
| 30 |
+
],
|
| 31 |
+
"force_upcast": false
|
| 32 |
+
}
|
sdxl-vae-fp16-fix/diffusion_pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37eb3e09ae1ce3d6891ddf809ca927b618e501091142cf07fdd9cd170e3a046f
|
| 3 |
+
size 334712113
|
sdxl-vae-fp16-fix/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c
|
| 3 |
+
size 334643238
|
sdxl-vae-fp16-fix/sdxl.vae.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1
|
| 3 |
+
size 334641162
|
sdxl-vae-fp16-fix/sdxl_vae.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1
|
| 3 |
+
size 334641162
|
sdxl_unet/config.json
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "UNet2DConditionModel",
|
| 3 |
+
"_diffusers_version": "0.19.0.dev0",
|
| 4 |
+
"act_fn": "silu",
|
| 5 |
+
"addition_embed_type": "text_time",
|
| 6 |
+
"addition_embed_type_num_heads": 64,
|
| 7 |
+
"addition_time_embed_dim": 256,
|
| 8 |
+
"attention_head_dim": [
|
| 9 |
+
5,
|
| 10 |
+
10,
|
| 11 |
+
20
|
| 12 |
+
],
|
| 13 |
+
"block_out_channels": [
|
| 14 |
+
320,
|
| 15 |
+
640,
|
| 16 |
+
1280
|
| 17 |
+
],
|
| 18 |
+
"center_input_sample": false,
|
| 19 |
+
"class_embed_type": null,
|
| 20 |
+
"class_embeddings_concat": false,
|
| 21 |
+
"conv_in_kernel": 3,
|
| 22 |
+
"conv_out_kernel": 3,
|
| 23 |
+
"cross_attention_dim": 2048,
|
| 24 |
+
"cross_attention_norm": null,
|
| 25 |
+
"down_block_types": [
|
| 26 |
+
"DownBlock2D",
|
| 27 |
+
"CrossAttnDownBlock2D",
|
| 28 |
+
"CrossAttnDownBlock2D"
|
| 29 |
+
],
|
| 30 |
+
"downsample_padding": 1,
|
| 31 |
+
"dual_cross_attention": false,
|
| 32 |
+
"encoder_hid_dim": null,
|
| 33 |
+
"encoder_hid_dim_type": null,
|
| 34 |
+
"flip_sin_to_cos": true,
|
| 35 |
+
"freq_shift": 0,
|
| 36 |
+
"in_channels": 4,
|
| 37 |
+
"layers_per_block": 2,
|
| 38 |
+
"mid_block_only_cross_attention": null,
|
| 39 |
+
"mid_block_scale_factor": 1,
|
| 40 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
| 41 |
+
"norm_eps": 1e-05,
|
| 42 |
+
"norm_num_groups": 32,
|
| 43 |
+
"num_attention_heads": null,
|
| 44 |
+
"num_class_embeds": null,
|
| 45 |
+
"only_cross_attention": false,
|
| 46 |
+
"out_channels": 4,
|
| 47 |
+
"projection_class_embeddings_input_dim": 2816,
|
| 48 |
+
"resnet_out_scale_factor": 1.0,
|
| 49 |
+
"resnet_skip_time_act": false,
|
| 50 |
+
"resnet_time_scale_shift": "default",
|
| 51 |
+
"sample_size": 128,
|
| 52 |
+
"time_cond_proj_dim": null,
|
| 53 |
+
"time_embedding_act_fn": null,
|
| 54 |
+
"time_embedding_dim": null,
|
| 55 |
+
"time_embedding_type": "positional",
|
| 56 |
+
"timestep_post_act": null,
|
| 57 |
+
"transformer_layers_per_block": [
|
| 58 |
+
1,
|
| 59 |
+
2,
|
| 60 |
+
10
|
| 61 |
+
],
|
| 62 |
+
"up_block_types": [
|
| 63 |
+
"CrossAttnUpBlock2D",
|
| 64 |
+
"CrossAttnUpBlock2D",
|
| 65 |
+
"UpBlock2D"
|
| 66 |
+
],
|
| 67 |
+
"upcast_attention": null,
|
| 68 |
+
"use_linear_projection": true
|
| 69 |
+
}
|