diff --git a/.cache/huggingface/.gitignore b/.cache/huggingface/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f59ec20aabf5842d237244ece8c81ab184faeac1 --- /dev/null +++ b/.cache/huggingface/.gitignore @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/.cache/huggingface/download/.gitattributes.metadata b/.cache/huggingface/download/.gitattributes.metadata new file mode 100644 index 0000000000000000000000000000000000000000..537c99405f66cbfd706600e80da4a9f868890de3 --- /dev/null +++ b/.cache/huggingface/download/.gitattributes.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +7cde9d254d057f6b4bb9ef30b214e1c60fc64d57 +1743561839.6836596 diff --git a/.cache/huggingface/download/README.md.metadata b/.cache/huggingface/download/README.md.metadata new file mode 100644 index 0000000000000000000000000000000000000000..234327a92b4ab14c95733e6bf8f706d2669d85ad --- /dev/null +++ b/.cache/huggingface/download/README.md.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +87224a2de7792d8045b6ebf0338683b9f3645fcb +1743561839.665659 diff --git a/.cache/huggingface/download/assets/comp_effic.png.metadata b/.cache/huggingface/download/assets/comp_effic.png.metadata new file mode 100644 index 0000000000000000000000000000000000000000..23e23cd318dc25883b53344256aff307f7faf352 --- /dev/null +++ b/.cache/huggingface/download/assets/comp_effic.png.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +b0e225caffb4b31295ad150f95ee852e4c3dde4a00ac8f79a2ff500f2ce26b8d +1743561839.7486591 diff --git a/.cache/huggingface/download/assets/data_for_diff_stage.jpg.metadata b/.cache/huggingface/download/assets/data_for_diff_stage.jpg.metadata new file mode 100644 index 0000000000000000000000000000000000000000..d2ef6ef3965ff2f1aec84ef8a70acb48998c5a8b --- /dev/null +++ b/.cache/huggingface/download/assets/data_for_diff_stage.jpg.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +59aec08409f2d46b0e640e4e120dc7cca52c08c3de56d026602dbcff1ebf241a +1743561839.7386565 diff --git a/.cache/huggingface/download/assets/i2v_res.png.metadata b/.cache/huggingface/download/assets/i2v_res.png.metadata new file mode 100644 index 0000000000000000000000000000000000000000..9f455f9d2d635ae8c3071b1ad30a814582735b9f --- /dev/null +++ b/.cache/huggingface/download/assets/i2v_res.png.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +6823b3206d8d0cb18d3b5b949dec1217f1178109ba11f14e977b67e1f7b8a248 +1743561839.834657 diff --git a/.cache/huggingface/download/assets/logo.png.metadata b/.cache/huggingface/download/assets/logo.png.metadata new file mode 100644 index 0000000000000000000000000000000000000000..900d8421fe2099ea3ff3eb9b143a0dda6f397c37 --- /dev/null +++ b/.cache/huggingface/download/assets/logo.png.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +96cddc0f667293436d0b9f92a299b6346b65b231d38ee49719a33d46c91fe1e3 +1743561839.6816583 diff --git a/.cache/huggingface/download/assets/t2v_res.jpg.metadata b/.cache/huggingface/download/assets/t2v_res.jpg.metadata new file mode 100644 index 0000000000000000000000000000000000000000..d4d48fee4bd7f224f678ef459c971e337f89c3fc --- /dev/null +++ b/.cache/huggingface/download/assets/t2v_res.jpg.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +91db579092446be2a834bc67721a8e4346936f38c4edb912f459ca3e10f8f439 +1743561839.5706127 diff --git a/.cache/huggingface/download/assets/vben_1.3b_vs_sota.png.metadata b/.cache/huggingface/download/assets/vben_1.3b_vs_sota.png.metadata new file mode 100644 index 0000000000000000000000000000000000000000..c0f88e36eba299eee5d2c56b7bca637a7d417112 --- /dev/null +++ b/.cache/huggingface/download/assets/vben_1.3b_vs_sota.png.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +b7705db79f2e1428ec7a1e6fff8c4fbde062fb95bb233516ddbd04b20007c845 +1743561839.708659 diff --git a/.cache/huggingface/download/assets/vben_vs_sota.png.metadata b/.cache/huggingface/download/assets/vben_vs_sota.png.metadata new file mode 100644 index 0000000000000000000000000000000000000000..76db8ac037d304c0bc375ba59518880f9cce67f9 --- /dev/null +++ b/.cache/huggingface/download/assets/vben_vs_sota.png.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +9a0e86ca85046d2675f97984b88b6e74df07bba8a62a31ab8a1aef50d4eda44e +1743561840.610448 diff --git a/.cache/huggingface/download/assets/video_dit_arch.jpg.metadata b/.cache/huggingface/download/assets/video_dit_arch.jpg.metadata new file mode 100644 index 0000000000000000000000000000000000000000..ec950e98e1a93fc26474e3bb59de192a85e26d11 --- /dev/null +++ b/.cache/huggingface/download/assets/video_dit_arch.jpg.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +195dceec6570289d8b01cc51d2e28a7786216f19de55b23978a52610d1646a66 +1743561840.2780747 diff --git a/.cache/huggingface/download/assets/video_vae_res.jpg.metadata b/.cache/huggingface/download/assets/video_vae_res.jpg.metadata new file mode 100644 index 0000000000000000000000000000000000000000..3061815937793df4db69c862bc86046e2796fdb2 --- /dev/null +++ b/.cache/huggingface/download/assets/video_vae_res.jpg.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +d8f9e7f7353848056a615c8ef35ab86ec22976bb46cb27405008b4089701945c +1743561840.2000742 diff --git a/.cache/huggingface/download/examples/i2v_input.JPG.metadata b/.cache/huggingface/download/examples/i2v_input.JPG.metadata new file mode 100644 index 0000000000000000000000000000000000000000..b3a73d97325be1c8e7b50c3ece275100900b0f93 --- /dev/null +++ b/.cache/huggingface/download/examples/i2v_input.JPG.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +077e3d965090c9028c69c00931675f42e1acc815c6eb450ab291b3b72d211a8e +1743561840.2440746 diff --git a/.cache/huggingface/download/image_encoder/config.json.metadata b/.cache/huggingface/download/image_encoder/config.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..2daddfcd5bfff72cbdc53f93e5f40bbf8baf8b0a --- /dev/null +++ b/.cache/huggingface/download/image_encoder/config.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +3be9aa15022880cec03c998d71d0b48ffeff18e2 +1743561840.1470757 diff --git a/.cache/huggingface/download/image_encoder/model.safetensors.metadata b/.cache/huggingface/download/image_encoder/model.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..9714b9419e8be78cf1ace9d669ca6eb21fc44d29 --- /dev/null +++ b/.cache/huggingface/download/image_encoder/model.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +f5c0bbf4f43f319a80335387d371742333807589c5bb4130a1a2a5d4abf6bf72 +1743562092.2149613 diff --git a/.cache/huggingface/download/image_processor/preprocessor_config.json.metadata b/.cache/huggingface/download/image_processor/preprocessor_config.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..0611dedb0929fcc3d301be28af9ec3341b4c7682 --- /dev/null +++ b/.cache/huggingface/download/image_processor/preprocessor_config.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +ae07b4c27c8e8d50505ee232210ef38d1e1cb2a2 +1743561840.2320752 diff --git a/.cache/huggingface/download/model_index.json.metadata b/.cache/huggingface/download/model_index.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..1a58a32fd5072358ef482a4d96cc56a4d448e58a --- /dev/null +++ b/.cache/huggingface/download/model_index.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +b5e23b8a97ed52fa7fef8bdfa5bda52152f74936 +1743561841.282866 diff --git a/.cache/huggingface/download/scheduler/scheduler_config.json.metadata b/.cache/huggingface/download/scheduler/scheduler_config.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..dc26368e563a55bcb6349a899896373d1579685a --- /dev/null +++ b/.cache/huggingface/download/scheduler/scheduler_config.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +68954ef5992ba98981b93175a67460852fefae00 +1743561840.6194732 diff --git a/.cache/huggingface/download/text_encoder/config.json.metadata b/.cache/huggingface/download/text_encoder/config.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..136510aeb2b8496ffa8d56e02a240b45dbb677fa --- /dev/null +++ b/.cache/huggingface/download/text_encoder/config.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +2fd01c57dddd8ae386d518c69c087c8ba8c73804 +1743561840.7427285 diff --git a/.cache/huggingface/download/text_encoder/model-00001-of-00005.safetensors.metadata b/.cache/huggingface/download/text_encoder/model-00001-of-00005.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..43c9485311c4b531339dfa15d1c4588eb7d9ced2 --- /dev/null +++ b/.cache/huggingface/download/text_encoder/model-00001-of-00005.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99 +1743562578.6698806 diff --git a/.cache/huggingface/download/text_encoder/model-00002-of-00005.safetensors.metadata b/.cache/huggingface/download/text_encoder/model-00002-of-00005.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..445a960905b352e3c3b4ed615887c2f034aa5cd5 --- /dev/null +++ b/.cache/huggingface/download/text_encoder/model-00002-of-00005.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +481c7b2b39771c44df6dd8d13ee12ed072d731b4a650bd092885d4d52db229ad +1743563346.2700834 diff --git a/.cache/huggingface/download/text_encoder/model-00003-of-00005.safetensors.metadata b/.cache/huggingface/download/text_encoder/model-00003-of-00005.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..4c36aef65323eb0f49eb01e8bd1e29f04754ae64 --- /dev/null +++ b/.cache/huggingface/download/text_encoder/model-00003-of-00005.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +f93148bcc04052a169e1e49bfcf6125df6cf9bf243cb9c627da75266cf8e35c3 +1743562960.3901935 diff --git a/.cache/huggingface/download/text_encoder/model-00004-of-00005.safetensors.metadata b/.cache/huggingface/download/text_encoder/model-00004-of-00005.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..b5a8a65b0b1e5294cc4284c89e29cfc3cb04710a --- /dev/null +++ b/.cache/huggingface/download/text_encoder/model-00004-of-00005.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +a451792c739c05bca4606190cc2dd16731411bac03b4cf6aacc5767321f857c9 +1743562619.1815734 diff --git a/.cache/huggingface/download/text_encoder/model-00005-of-00005.safetensors.metadata b/.cache/huggingface/download/text_encoder/model-00005-of-00005.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..c41d4bcf9f5cbbbc9c9c815632a3b99c71c16b8b --- /dev/null +++ b/.cache/huggingface/download/text_encoder/model-00005-of-00005.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +7e76e18d224531b8197a46231cb53daf7f2f6ca707130252becf933026ac4eea +1743561968.839003 diff --git a/.cache/huggingface/download/text_encoder/model.safetensors.index.json.metadata b/.cache/huggingface/download/text_encoder/model.safetensors.index.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..4a914870c48ece4b5a0b0b7a94228b56ed9827fe --- /dev/null +++ b/.cache/huggingface/download/text_encoder/model.safetensors.index.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +60ece61b46ecb3e6a5b705ea304bc97535317c2a +1743561841.5554132 diff --git a/.cache/huggingface/download/tokenizer/special_tokens_map.json.metadata b/.cache/huggingface/download/tokenizer/special_tokens_map.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..3abd0fec0a349ac9437ff63c18d2b872675e94b3 --- /dev/null +++ b/.cache/huggingface/download/tokenizer/special_tokens_map.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +2ed25bf989a28d20b5d4b5822fbc24666d12a6f7 +1743561843.4675965 diff --git a/.cache/huggingface/download/tokenizer/spiece.model.metadata b/.cache/huggingface/download/tokenizer/spiece.model.metadata new file mode 100644 index 0000000000000000000000000000000000000000..313034b2e05885eb6791b3ecd695775029f8334a --- /dev/null +++ b/.cache/huggingface/download/tokenizer/spiece.model.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +e3909a67b780650b35cf529ac782ad2b6b26e6d1f849d3fbb6a872905f452458 +1743561843.132565 diff --git a/.cache/huggingface/download/tokenizer/tokenizer.json.metadata b/.cache/huggingface/download/tokenizer/tokenizer.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..a96c281d9b0f704bb553e9d60b42c633ea22b23f --- /dev/null +++ b/.cache/huggingface/download/tokenizer/tokenizer.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +20a46ac256746594ed7e1e3ef733b83fbc5a6f0922aa7480eda961743de080ef +1743561849.196448 diff --git a/.cache/huggingface/download/tokenizer/tokenizer_config.json.metadata b/.cache/huggingface/download/tokenizer/tokenizer_config.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..baabb48505850fdff909f4456e87f8c8f35ffdfb --- /dev/null +++ b/.cache/huggingface/download/tokenizer/tokenizer_config.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +09d434f9457238f697f4c208aab47f58caa15bfe +1743561843.9789824 diff --git a/.cache/huggingface/download/transformer/config.json.metadata b/.cache/huggingface/download/transformer/config.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..648343187bd896c5783c34ec75ca7cca0180cb1e --- /dev/null +++ b/.cache/huggingface/download/transformer/config.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +998a7ae19a6151f25b78801bfde3ac1c2372d086 +1743561844.7242002 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00001-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00001-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..6ad867c390456f3826f4201faa4b8df318406172 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00001-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +578fb4865a81befa52e7cc6bbf4e38bf889b060da48469d6afc33293cd320aee +1743562050.4216878 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00002-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00002-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..0f94df531743b16541f4754df212c9a77eb8d181 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00002-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +c6de6b7274a0a5df23781946380090186b118db8ebc2b399fb3b0c184e34ec51 +1743563308.1231756 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00003-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00003-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..a58ae38bca8e0877a4467b1e52834ca61ca6ad3a --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00003-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +40cd68d9cbcedeeecc8c0d7097d8d3661abd94a561d845a83467f51dc2c5d510 +1743563220.6839123 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00004-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00004-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..5b037a2e220d9de9250fe14edd7ac59c566ce4bc --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00004-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +8ac9bda9384b13089db4c7e8aa0d6717f25cc8b359e7c22de972928affbde53f +1743562275.772391 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00005-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00005-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..47c373f234dbaa1ea287740c494042d1b7b3f5ed --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00005-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +15c496abbeca6af95c75b081f1b3c79c89c955377272ce9301c57df202a56682 +1743562290.9464 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00006-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00006-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..357540c4e2b273432bb83ccabc67ba5256bc10d8 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00006-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +7183288b1dfd63cc5b3df83dc7143baa3d7d7e2af28dd9483bfb11302e3485b6 +1743562486.9983783 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00007-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00007-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..f113fd2d947cb3e513ecd2aa8560c479a973360b --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00007-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +6f29d03e9c754a1fb3cd55b36e90eec4e766ae8bf6f12914590cf86730cf9cd4 +1743562920.133621 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00008-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00008-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..d6a02144261798fcf9b2fc3c2e95bbfb02317b42 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00008-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +8664bbf773bc62d6b1e91fbba7e65a67b46049b78e6cee3e1d88ed119ede144a +1743563098.084907 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00009-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00009-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..74b0d589995c0da33707c4ab283c1a0513d8b41c --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00009-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +af18a5aff3479457318060670067874482fa048e7a644e00b37262bde2c0085a +1743562796.9459107 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00010-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00010-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..39056813910077b384fcde6e085c5446b6fa7a11 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00010-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +2c0bff9b932684a39823b1fc3d3d83a2fe7b33ff679ee0a7b98c60e102b6fa45 +1743562860.6050017 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00011-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00011-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..9d06cfb80cda78b24bce4045e3f5218f65162f97 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00011-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +6ae788cff8de05f565042ccec34550e6f4e2629435718aac5eb993603effa37e +1743563203.2706444 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00012-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00012-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..af3b95ef87ee60df23debf41ed3971f7532fcc73 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00012-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +a55a84afcc67fe716ac1e302eb690e28b0b96896cbee2c7533c6fc5add63efeb +1743563375.4128845 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00013-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00013-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..7da99dc49aa10bf2fa451bf4656a0701dbbb8121 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00013-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +6eaea61781808c98dac0b9b98f7e8e5ee8d1d84180b206961594760b910c4c43 +1743563142.9103942 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model-00014-of-00014.safetensors.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00014-of-00014.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..33c1b5c97bf90a0bfa6fca81a62fe6dbbc87d41b --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model-00014-of-00014.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +76c360055f102fe88c0132ef325e4dbb193955a7cfbc92a15a2c0c337ef5d85c +1743563233.0308325 diff --git a/.cache/huggingface/download/transformer/diffusion_pytorch_model.safetensors.index.json.metadata b/.cache/huggingface/download/transformer/diffusion_pytorch_model.safetensors.index.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..84688085c1c813875df72a2f4663654bac504b46 --- /dev/null +++ b/.cache/huggingface/download/transformer/diffusion_pytorch_model.safetensors.index.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +8c634524e998b6be92d5ce1983030bd37e87718d +1743563098.9606261 diff --git a/.cache/huggingface/download/vae/config.json.metadata b/.cache/huggingface/download/vae/config.json.metadata new file mode 100644 index 0000000000000000000000000000000000000000..4d3f0565e4305b240c95a0dd705c378ecc80bbf7 --- /dev/null +++ b/.cache/huggingface/download/vae/config.json.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +fe988ee53511225fb2fd0a01004d6e19524df75f +1743563099.3789775 diff --git a/.cache/huggingface/download/vae/diffusion_pytorch_model.safetensors.metadata b/.cache/huggingface/download/vae/diffusion_pytorch_model.safetensors.metadata new file mode 100644 index 0000000000000000000000000000000000000000..293df073d9be52e8ff1e8e80ad1c626ec5b2320c --- /dev/null +++ b/.cache/huggingface/download/vae/diffusion_pytorch_model.safetensors.metadata @@ -0,0 +1,3 @@ +482bed6c298d3d66fd22c51337af0ccf26264926 +d6e524b3fffede1787a74e81b30976dce5400c4439ba64222168e607ed19e793 +1743563131.555712 diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..7cde9d254d057f6b4bb9ef30b214e1c60fc64d57 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,14 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer/tokenizer.json filter=lfs diff=lfs merge=lfs -text +assets/comp_effic.png filter=lfs diff=lfs merge=lfs -text +assets/data_for_diff_stage.jpg filter=lfs diff=lfs merge=lfs -text +assets/i2v_res.png filter=lfs diff=lfs merge=lfs -text +assets/logo.png filter=lfs diff=lfs merge=lfs -text +assets/t2v_res.jpg filter=lfs diff=lfs merge=lfs -text +assets/vben_1.3b_vs_sota.png filter=lfs diff=lfs merge=lfs -text +assets/vben_vs_sota.png filter=lfs diff=lfs merge=lfs -text +assets/video_dit_arch.jpg filter=lfs diff=lfs merge=lfs -text +assets/video_vae_res.jpg filter=lfs diff=lfs merge=lfs -text +examples/i2v_input.JPG filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index 7b95401dc46245ac339fc25059d4a56d90b4cde5..87224a2de7792d8045b6ebf0338683b9f3645fcb 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,311 @@ ---- -license: apache-2.0 ---- +--- +license: apache-2.0 +language: +- en +- zh +pipeline_tag: image-to-video +library_name: diffusers +tags: +- video +- video genration +--- +# Wan2.1 + +

+ +

+ +

+ πŸ’œ Wan    |    πŸ–₯️ GitHub    |   πŸ€— Hugging Face   |   πŸ€– ModelScope   |    πŸ“‘ Paper (Coming soon)    |    πŸ“‘ Blog    |   πŸ’¬ WeChat Group   |    πŸ“– Discord   +
+ +----- + +[**Wan: Open and Advanced Large-Scale Video Generative Models**]() + +In this repository, we present **Wan2.1**, a comprehensive and open suite of video foundation models that pushes the boundaries of video generation. **Wan2.1** offers these key features: +- πŸ‘ **SOTA Performance**: **Wan2.1** consistently outperforms existing open-source models and state-of-the-art commercial solutions across multiple benchmarks. +- πŸ‘ **Supports Consumer-grade GPUs**: The T2V-1.3B model requires only 8.19 GB VRAM, making it compatible with almost all consumer-grade GPUs. It can generate a 5-second 480P video on an RTX 4090 in about 4 minutes (without optimization techniques like quantization). Its performance is even comparable to some closed-source models. +- πŸ‘ **Multiple Tasks**: **Wan2.1** excels in Text-to-Video, Image-to-Video, Video Editing, Text-to-Image, and Video-to-Audio, advancing the field of video generation. +- πŸ‘ **Visual Text Generation**: **Wan2.1** is the first video model capable of generating both Chinese and English text, featuring robust text generation that enhances its practical applications. +- πŸ‘ **Powerful Video VAE**: **Wan-VAE** delivers exceptional efficiency and performance, encoding and decoding 1080P videos of any length while preserving temporal information, making it an ideal foundation for video and image generation. + + +This repository contains our I2V-14B model, which is capable of generating 720P high-definition videos. After thousands of rounds of human evaluations, this model has outperformed both closed-source and open-source alternatives, achieving state-of-the-art performance. + + +## Video Demos + +

+ +
+ +## πŸ”₯ Latest News!! + +* Feb 25, 2025: πŸ‘‹ We've released the inference code and weights of Wan2.1. + + +## πŸ“‘ Todo List +- Wan2.1 Text-to-Video + - [x] Multi-GPU Inference code of the 14B and 1.3B models + - [x] Checkpoints of the 14B and 1.3B models + - [x] Gradio demo + - [x] Diffusers integration + - [ ] ComfyUI integration +- Wan2.1 Image-to-Video + - [x] Multi-GPU Inference code of the 14B model + - [x] Checkpoints of the 14B model + - [x] Gradio demo + - [x] Diffusers integration + - [ ] ComfyUI integration + + +## Quickstart + +#### Installation +Clone the repo: +``` +git clone https://github.com/Wan-Video/Wan2.1.git +cd Wan2.1 +``` + +Install dependencies: +``` +# Ensure torch >= 2.4.0 +pip install -r requirements.txt +``` + + +#### Model Download + +| Models | Download Link | Notes | +| --------------|-------------------------------------------------------------------------------|-------------------------------| +| T2V-14B | πŸ€— [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-T2V-14B) πŸ€– [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-T2V-14B) | Supports both 480P and 720P +| I2V-14B-720P | πŸ€— [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-720P) πŸ€– [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-720P) | Supports 720P +| I2V-14B-480P | πŸ€— [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-480P) πŸ€– [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P) | Supports 480P +| T2V-1.3B | πŸ€— [Huggingface](https://huggingface.co/Wan-AI/Wan2.1-T2V-1.3B) πŸ€– [ModelScope](https://www.modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B) | Supports 480P + +> πŸ’‘Note: The 1.3B model is capable of generating videos at 720P resolution. However, due to limited training at this resolution, the results are generally less stable compared to 480P. For optimal performance, we recommend using 480P resolution. + + +Download models using πŸ€— huggingface-cli: +``` +pip install "huggingface_hub[cli]" +huggingface-cli download Wan-AI/Wan2.1-I2V-14B-720P-Diffusers --local-dir ./Wan2.1-I2V-14B-720P-Diffusers +``` + +Download models using πŸ€– modelscope-cli: +``` +pip install modelscope +modelscope download Wan-AI/Wan2.1-I2V-14B-720P-Diffusers --local_dir ./Wan2.1-I2V-14B-720P-Diffusers +``` + +#### Run Image-to-Video Generation + +Similar to Text-to-Video, Image-to-Video is also divided into processes with and without the prompt extension step. The specific parameters and their corresponding settings are as follows: + + + + + + + + + + + + + + + + + + + + + + + + + + +
TaskResolutionModel
480P720P
i2v-14BβŒβœ”οΈWan2.1-I2V-14B-720P
i2v-14Bβœ”οΈβŒWan2.1-T2V-14B-480P
+ + +##### (1) Without Prompt Extention + +- Single-GPU inference +``` +python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." +``` + +> πŸ’‘For the Image-to-Video task, the `size` parameter represents the area of the generated video, with the aspect ratio following that of the original input image. + +- Multi-GPU inference using FSDP + xDiT USP + +``` +pip install "xfuser>=0.4.1" +torchrun --nproc_per_node=8 generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --dit_fsdp --t5_fsdp --ulysses_size 8 --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." +``` + +Wan can also be run directly using πŸ€— Diffusers! + +```python +import torch +import numpy as np +from diffusers import AutoencoderKLWan, WanImageToVideoPipeline +from diffusers.utils import export_to_video, load_image +from transformers import CLIPVisionModel + +# Available models: Wan-AI/Wan2.1-I2V-14B-480P-Diffusers, Wan-AI/Wan2.1-I2V-14B-720P-Diffusers +model_id = "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers" +image_encoder = CLIPVisionModel.from_pretrained(model_id, subfolder="image_encoder", torch_dtype=torch.float32) +vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) +pipe = WanImageToVideoPipeline.from_pretrained(model_id, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16) +pipe.to("cuda") + +image = load_image( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg" +) +max_area = 720 * 1280 +aspect_ratio = image.height / image.width +mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1] +height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value +width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value +image = image.resize((width, height)) +prompt = ( + "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in " + "the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot." +) +negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + +output = pipe( + image=image, prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, num_frames=81, guidance_scale=5.0 +).frames[0] +export_to_video(output, "output.mp4", fps=16) +``` + +##### (2) Using Prompt Extention + + +Run with local prompt extention using `Qwen/Qwen2.5-VL-7B-Instruct`: +``` +python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --use_prompt_extend --prompt_extend_model Qwen/Qwen2.5-VL-7B-Instruct --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." +``` + +Run with remote prompt extention using `dashscope`: +``` +DASH_API_KEY=your_key python generate.py --task i2v-14B --size 1280*720 --ckpt_dir ./Wan2.1-I2V-14B-720P --image examples/i2v_input.JPG --use_prompt_extend --prompt_extend_method 'dashscope' --prompt "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside." +``` + +##### (3) Runing local gradio + +``` +cd gradio +# if one only uses 480P model in gradio +DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_480p ./Wan2.1-I2V-14B-480P + +# if one only uses 720P model in gradio +DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_720p ./Wan2.1-I2V-14B-720P + +# if one uses both 480P and 720P models in gradio +DASH_API_KEY=your_key python i2v_14B_singleGPU.py --prompt_extend_method 'dashscope' --ckpt_dir_480p ./Wan2.1-I2V-14B-480P --ckpt_dir_720p ./Wan2.1-I2V-14B-720P +``` + + +## Manual Evaluation + +We conducted extensive manual evaluations to evaluate the performance of the Image-to-Video model, and the results are presented in the table below. The results clearly indicate that **Wan2.1** outperforms both closed-source and open-source models. + +
+ +
+ + +## Computational Efficiency on Different GPUs + +We test the computational efficiency of different **Wan2.1** models on different GPUs in the following table. The results are presented in the format: **Total time (s) / peak GPU memory (GB)**. + + +
+ +
+ +> The parameter settings for the tests presented in this table are as follows: +> (1) For the 1.3B model on 8 GPUs, set `--ring_size 8` and `--ulysses_size 1`; +> (2) For the 14B model on 1 GPU, use `--offload_model True`; +> (3) For the 1.3B model on a single 4090 GPU, set `--offload_model True --t5_cpu`; +> (4) For all testings, no prompt extension was applied, meaning `--use_prompt_extend` was not enabled. + +------- + +## Introduction of Wan2.1 + +**Wan2.1** is designed on the mainstream diffusion transformer paradigm, achieving significant advancements in generative capabilities through a series of innovations. These include our novel spatio-temporal variational autoencoder (VAE), scalable training strategies, large-scale data construction, and automated evaluation metrics. Collectively, these contributions enhance the model’s performance and versatility. + + +##### (1) 3D Variational Autoencoders +We propose a novel 3D causal VAE architecture, termed **Wan-VAE** specifically designed for video generation. By combining multiple strategies, we improve spatio-temporal compression, reduce memory usage, and ensure temporal causality. **Wan-VAE** demonstrates significant advantages in performance efficiency compared to other open-source VAEs. Furthermore, our **Wan-VAE** can encode and decode unlimited-length 1080P videos without losing historical temporal information, making it particularly well-suited for video generation tasks. + + +
+ +
+ + +##### (2) Video Diffusion DiT + +**Wan2.1** is designed using the Flow Matching framework within the paradigm of mainstream Diffusion Transformers. Our model's architecture uses the T5 Encoder to encode multilingual text input, with cross-attention in each transformer block embedding the text into the model structure. Additionally, we employ an MLP with a Linear layer and a SiLU layer to process the input time embeddings and predict six modulation parameters individually. This MLP is shared across all transformer blocks, with each block learning a distinct set of biases. Our experimental findings reveal a significant performance improvement with this approach at the same parameter scale. + +
+ +
+ + +| Model | Dimension | Input Dimension | Output Dimension | Feedforward Dimension | Frequency Dimension | Number of Heads | Number of Layers | +|--------|-----------|-----------------|------------------|-----------------------|---------------------|-----------------|------------------| +| 1.3B | 1536 | 16 | 16 | 8960 | 256 | 12 | 30 | +| 14B | 5120 | 16 | 16 | 13824 | 256 | 40 | 40 | + + + +##### Data + +We curated and deduplicated a candidate dataset comprising a vast amount of image and video data. During the data curation process, we designed a four-step data cleaning process, focusing on fundamental dimensions, visual quality and motion quality. Through the robust data processing pipeline, we can easily obtain high-quality, diverse, and large-scale training sets of images and videos. + +![figure1](assets/data_for_diff_stage.jpg "figure1") + + +##### Comparisons to SOTA +We compared **Wan2.1** with leading open-source and closed-source models to evaluate the performace. Using our carefully designed set of 1,035 internal prompts, we tested across 14 major dimensions and 26 sub-dimensions. We then compute the total score by performing a weighted calculation on the scores of each dimension, utilizing weights derived from human preferences in the matching process. The detailed results are shown in the table below. These results demonstrate our model's superior performance compared to both open-source and closed-source models. + +![figure1](assets/vben_vs_sota.png "figure1") + + +## Citation +If you find our work helpful, please cite us. + +``` +@article{wan2.1, + title = {Wan: Open and Advanced Large-Scale Video Generative Models}, + author = {Wan Team}, + journal = {}, + year = {2025} +} +``` + +## License Agreement +The models in this repository are licensed under the Apache 2.0 License. We claim no rights over the your generate contents, granting you the freedom to use them while ensuring that your usage complies with the provisions of this license. You are fully accountable for your use of the models, which must not involve sharing any content that violates applicable laws, causes harm to individuals or groups, disseminates personal information intended for harm, spreads misinformation, or targets vulnerable populations. For a complete list of restrictions and details regarding your rights, please refer to the full text of the [license](LICENSE.txt). + + +## Acknowledgements + +We would like to thank the contributors to the [SD3](https://huggingface.co/stabilityai/stable-diffusion-3-medium), [Qwen](https://huggingface.co/Qwen), [umt5-xxl](https://huggingface.co/google/umt5-xxl), [diffusers](https://github.com/huggingface/diffusers) and [HuggingFace](https://huggingface.co) repositories, for their open research. + + + +## Contact Us +If you would like to leave a message to our research or product teams, feel free to join our [Discord](https://discord.gg/p5XbdQV7) or [WeChat groups](https://gw.alicdn.com/imgextra/i2/O1CN01tqjWFi1ByuyehkTSB_!!6000000000015-0-tps-611-1279.jpg)! \ No newline at end of file diff --git a/assets/comp_effic.png b/assets/comp_effic.png new file mode 100644 index 0000000000000000000000000000000000000000..741f12abd4bc11efd6177e7c59765d87eaf7e395 --- /dev/null +++ b/assets/comp_effic.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0e225caffb4b31295ad150f95ee852e4c3dde4a00ac8f79a2ff500f2ce26b8d +size 1793594 diff --git a/assets/data_for_diff_stage.jpg b/assets/data_for_diff_stage.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7ba97f116a3e3304d9960069344019787181368 --- /dev/null +++ b/assets/data_for_diff_stage.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59aec08409f2d46b0e640e4e120dc7cca52c08c3de56d026602dbcff1ebf241a +size 528268 diff --git a/assets/i2v_res.png b/assets/i2v_res.png new file mode 100644 index 0000000000000000000000000000000000000000..98470f121ae318c11d25fd3728cd5c93e0c6993d --- /dev/null +++ b/assets/i2v_res.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6823b3206d8d0cb18d3b5b949dec1217f1178109ba11f14e977b67e1f7b8a248 +size 891681 diff --git a/assets/logo.png b/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..14cea40a9ec4c2aa8de3b46806b25d766980d909 --- /dev/null +++ b/assets/logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96cddc0f667293436d0b9f92a299b6346b65b231d38ee49719a33d46c91fe1e3 +size 56322 diff --git a/assets/t2v_res.jpg b/assets/t2v_res.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7549a1f66d7aa8fb90b6e6181188efc1be0edc28 --- /dev/null +++ b/assets/t2v_res.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91db579092446be2a834bc67721a8e4346936f38c4edb912f459ca3e10f8f439 +size 301030 diff --git a/assets/vben_1.3b_vs_sota.png b/assets/vben_1.3b_vs_sota.png new file mode 100644 index 0000000000000000000000000000000000000000..2d14bab7f9fb058a71ce70a78b25afb8d482079f --- /dev/null +++ b/assets/vben_1.3b_vs_sota.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7705db79f2e1428ec7a1e6fff8c4fbde062fb95bb233516ddbd04b20007c845 +size 515765 diff --git a/assets/vben_vs_sota.png b/assets/vben_vs_sota.png new file mode 100644 index 0000000000000000000000000000000000000000..cded47bc519dc2aeae2f370228209e8c9e74bc0b --- /dev/null +++ b/assets/vben_vs_sota.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a0e86ca85046d2675f97984b88b6e74df07bba8a62a31ab8a1aef50d4eda44e +size 1552119 diff --git a/assets/video_dit_arch.jpg b/assets/video_dit_arch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97d9c19d286b432c33d644d5b00061c2e2a3545a --- /dev/null +++ b/assets/video_dit_arch.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:195dceec6570289d8b01cc51d2e28a7786216f19de55b23978a52610d1646a66 +size 643369 diff --git a/assets/video_vae_res.jpg b/assets/video_vae_res.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91ca92abf061f569b335f3b8ca63e796ce2f6103 --- /dev/null +++ b/assets/video_vae_res.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8f9e7f7353848056a615c8ef35ab86ec22976bb46cb27405008b4089701945c +size 212586 diff --git a/examples/i2v_input.JPG b/examples/i2v_input.JPG new file mode 100644 index 0000000000000000000000000000000000000000..8c7fabd943752179587eb717362db32ce1eb4800 --- /dev/null +++ b/examples/i2v_input.JPG @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:077e3d965090c9028c69c00931675f42e1acc815c6eb450ab291b3b72d211a8e +size 250628 diff --git a/image_encoder/config.json b/image_encoder/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3be9aa15022880cec03c998d71d0b48ffeff18e2 --- /dev/null +++ b/image_encoder/config.json @@ -0,0 +1,23 @@ +{ + "_name_or_path": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", + "architectures": [ + "CLIPVisionModelWithProjection" + ], + "attention_dropout": 0.0, + "dropout": 0.0, + "hidden_act": "gelu", + "hidden_size": 1280, + "image_size": 224, + "initializer_factor": 1.0, + "initializer_range": 0.02, + "intermediate_size": 5120, + "layer_norm_eps": 1e-05, + "model_type": "clip_vision_model", + "num_attention_heads": 16, + "num_channels": 3, + "num_hidden_layers": 32, + "patch_size": 14, + "projection_dim": 1024, + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0" +} diff --git a/image_encoder/model.safetensors b/image_encoder/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d546049b3265c1438f03821e610dbb4380402b2f --- /dev/null +++ b/image_encoder/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5c0bbf4f43f319a80335387d371742333807589c5bb4130a1a2a5d4abf6bf72 +size 1264217760 diff --git a/image_processor/preprocessor_config.json b/image_processor/preprocessor_config.json new file mode 100644 index 0000000000000000000000000000000000000000..ae07b4c27c8e8d50505ee232210ef38d1e1cb2a2 --- /dev/null +++ b/image_processor/preprocessor_config.json @@ -0,0 +1,28 @@ +{ + "crop_size": { + "height": 224, + "width": 224 + }, + "do_center_crop": false, + "do_convert_rgb": true, + "do_normalize": true, + "do_rescale": true, + "do_resize": true, + "image_mean": [ + 0.48145466, + 0.4578275, + 0.40821073 + ], + "image_processor_type": "CLIPImageProcessor", + "image_std": [ + 0.26862954, + 0.26130258, + 0.27577711 + ], + "resample": 3, + "rescale_factor": 0.00392156862745098, + "size": { + "height": 224, + "width": 224 + } +} diff --git a/model_index.json b/model_index.json new file mode 100644 index 0000000000000000000000000000000000000000..b5e23b8a97ed52fa7fef8bdfa5bda52152f74936 --- /dev/null +++ b/model_index.json @@ -0,0 +1,32 @@ +{ + "_class_name": "WanImageToVideoPipeline", + "_diffusers_version": "0.33.0.dev0", + "image_encoder": [ + "transformers", + "CLIPVisionModelWithProjection" + ], + "image_processor": [ + "transformers", + "CLIPImageProcessor" + ], + "scheduler": [ + "diffusers", + "FlowMatchEulerDiscreteScheduler" + ], + "text_encoder": [ + "transformers", + "UMT5EncoderModel" + ], + "tokenizer": [ + "transformers", + "T5TokenizerFast" + ], + "transformer": [ + "diffusers", + "WanTransformer3DModel" + ], + "vae": [ + "diffusers", + "AutoencoderKLWan" + ] +} diff --git a/scheduler/scheduler_config.json b/scheduler/scheduler_config.json new file mode 100644 index 0000000000000000000000000000000000000000..68954ef5992ba98981b93175a67460852fefae00 --- /dev/null +++ b/scheduler/scheduler_config.json @@ -0,0 +1,28 @@ +{ + "_class_name": "UniPCMultistepScheduler", + "_diffusers_version": "0.33.0.dev0", + "beta_end": 0.02, + "beta_schedule": "linear", + "beta_start": 0.0001, + "disable_corrector": [], + "dynamic_thresholding_ratio": 0.995, + "final_sigmas_type": "zero", + "flow_shift": 5.0, + "lower_order_final": true, + "num_train_timesteps": 1000, + "predict_x0": true, + "prediction_type": "flow_prediction", + "rescale_betas_zero_snr": false, + "sample_max_value": 1.0, + "solver_order": 2, + "solver_p": null, + "solver_type": "bh2", + "steps_offset": 0, + "thresholding": false, + "timestep_spacing": "linspace", + "trained_betas": null, + "use_beta_sigmas": false, + "use_exponential_sigmas": false, + "use_flow_sigmas": true, + "use_karras_sigmas": false +} diff --git a/text_encoder/config.json b/text_encoder/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2fd01c57dddd8ae386d518c69c087c8ba8c73804 --- /dev/null +++ b/text_encoder/config.json @@ -0,0 +1,34 @@ +{ + "_name_or_path": "google/umt5-xxl", + "architectures": [ + "UMT5EncoderModel" + ], + "classifier_dropout": 0.0, + "d_ff": 10240, + "d_kv": 64, + "d_model": 4096, + "decoder_start_token_id": 0, + "dense_act_fn": "gelu_new", + "dropout_rate": 0.1, + "eos_token_id": 1, + "feed_forward_proj": "gated-gelu", + "initializer_factor": 1.0, + "is_encoder_decoder": true, + "is_gated_act": true, + "layer_norm_epsilon": 1e-06, + "model_type": "umt5", + "num_decoder_layers": 24, + "num_heads": 64, + "num_layers": 24, + "output_past": true, + "pad_token_id": 0, + "relative_attention_max_distance": 128, + "relative_attention_num_buckets": 32, + "scalable_attention": true, + "tie_word_embeddings": false, + "tokenizer_class": "T5Tokenizer", + "torch_dtype": "float32", + "transformers_version": "4.48.0.dev0", + "use_cache": true, + "vocab_size": 256384 +} diff --git a/text_encoder/model-00001-of-00005.safetensors b/text_encoder/model-00001-of-00005.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..383f168e1318f0f8d6777b1ba5e88546c1273c46 --- /dev/null +++ b/text_encoder/model-00001-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99 +size 4972389712 diff --git a/text_encoder/model-00002-of-00005.safetensors b/text_encoder/model-00002-of-00005.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..05f1cf9934e3ec7e228b51a93b56875cee468b86 --- /dev/null +++ b/text_encoder/model-00002-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:481c7b2b39771c44df6dd8d13ee12ed072d731b4a650bd092885d4d52db229ad +size 4899225672 diff --git a/text_encoder/model-00003-of-00005.safetensors b/text_encoder/model-00003-of-00005.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3ea8c93410053a1d42cde624bdf8faf011d329e3 --- /dev/null +++ b/text_encoder/model-00003-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f93148bcc04052a169e1e49bfcf6125df6cf9bf243cb9c627da75266cf8e35c3 +size 4966309504 diff --git a/text_encoder/model-00004-of-00005.safetensors b/text_encoder/model-00004-of-00005.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9c4d6361dc5ed76d8c129e9c7ad1c64ec7045637 --- /dev/null +++ b/text_encoder/model-00004-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a451792c739c05bca4606190cc2dd16731411bac03b4cf6aacc5767321f857c9 +size 4999880704 diff --git a/text_encoder/model-00005-of-00005.safetensors b/text_encoder/model-00005-of-00005.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..75aa52c300dafa768721d112ac5a7f11512156ff --- /dev/null +++ b/text_encoder/model-00005-of-00005.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e76e18d224531b8197a46231cb53daf7f2f6ca707130252becf933026ac4eea +size 2885866152 diff --git a/text_encoder/model.safetensors.index.json b/text_encoder/model.safetensors.index.json new file mode 100644 index 0000000000000000000000000000000000000000..60ece61b46ecb3e6a5b705ea304bc97535317c2a --- /dev/null +++ b/text_encoder/model.safetensors.index.json @@ -0,0 +1,249 @@ +{ + "metadata": { + "total_size": 22723641344 + }, + "weight_map": { + "encoder.block.0.layer.0.SelfAttention.k.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.0.SelfAttention.o.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.0.SelfAttention.q.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.0.SelfAttention.v.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.0.layer_norm.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.1.DenseReluDense.wi_0.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.1.DenseReluDense.wi_1.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.1.DenseReluDense.wo.weight": "model-00001-of-00005.safetensors", + "encoder.block.0.layer.1.layer_norm.weight": "model-00001-of-00005.safetensors", + "encoder.block.1.layer.0.SelfAttention.k.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.0.SelfAttention.o.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.0.SelfAttention.q.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.0.SelfAttention.relative_attention_bias.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.0.SelfAttention.v.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.0.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.1.DenseReluDense.wo.weight": "model-00002-of-00005.safetensors", + "encoder.block.1.layer.1.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.10.layer.0.SelfAttention.k.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.0.SelfAttention.o.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.0.SelfAttention.q.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.0.SelfAttention.relative_attention_bias.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.0.SelfAttention.v.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.0.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.1.DenseReluDense.wi_0.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.1.DenseReluDense.wi_1.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.1.DenseReluDense.wo.weight": "model-00003-of-00005.safetensors", + "encoder.block.10.layer.1.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.0.SelfAttention.k.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.0.SelfAttention.o.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.0.SelfAttention.q.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.0.SelfAttention.relative_attention_bias.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.0.SelfAttention.v.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.0.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.1.DenseReluDense.wi_0.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.1.DenseReluDense.wi_1.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.1.DenseReluDense.wo.weight": "model-00003-of-00005.safetensors", + "encoder.block.11.layer.1.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.0.SelfAttention.k.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.0.SelfAttention.o.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.0.SelfAttention.q.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.0.SelfAttention.relative_attention_bias.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.0.SelfAttention.v.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.0.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.1.DenseReluDense.wi_0.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.1.DenseReluDense.wi_1.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.1.DenseReluDense.wo.weight": "model-00003-of-00005.safetensors", + "encoder.block.12.layer.1.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.0.SelfAttention.k.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.0.SelfAttention.o.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.0.SelfAttention.q.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.0.SelfAttention.relative_attention_bias.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.0.SelfAttention.v.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.0.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.1.DenseReluDense.wi_0.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.1.DenseReluDense.wi_1.weight": "model-00003-of-00005.safetensors", + "encoder.block.13.layer.1.DenseReluDense.wo.weight": "model-00004-of-00005.safetensors", + "encoder.block.13.layer.1.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.0.SelfAttention.k.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.0.SelfAttention.o.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.0.SelfAttention.q.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.0.SelfAttention.relative_attention_bias.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.0.SelfAttention.v.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.0.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.1.DenseReluDense.wi_0.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.1.DenseReluDense.wi_1.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.1.DenseReluDense.wo.weight": "model-00004-of-00005.safetensors", + "encoder.block.14.layer.1.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.0.SelfAttention.k.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.0.SelfAttention.o.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.0.SelfAttention.q.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.0.SelfAttention.relative_attention_bias.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.0.SelfAttention.v.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.0.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.1.DenseReluDense.wi_0.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.1.DenseReluDense.wi_1.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.1.DenseReluDense.wo.weight": "model-00004-of-00005.safetensors", + "encoder.block.15.layer.1.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.0.SelfAttention.k.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.0.SelfAttention.o.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.0.SelfAttention.q.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.0.SelfAttention.relative_attention_bias.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.0.SelfAttention.v.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.0.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.1.DenseReluDense.wi_0.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.1.DenseReluDense.wi_1.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.1.DenseReluDense.wo.weight": "model-00004-of-00005.safetensors", + "encoder.block.16.layer.1.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.0.SelfAttention.k.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.0.SelfAttention.o.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.0.SelfAttention.q.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.0.SelfAttention.relative_attention_bias.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.0.SelfAttention.v.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.0.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.1.DenseReluDense.wi_0.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.1.DenseReluDense.wi_1.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.1.DenseReluDense.wo.weight": "model-00004-of-00005.safetensors", + "encoder.block.17.layer.1.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.0.SelfAttention.k.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.0.SelfAttention.o.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.0.SelfAttention.q.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.0.SelfAttention.relative_attention_bias.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.0.SelfAttention.v.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.0.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.1.DenseReluDense.wi_0.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.1.DenseReluDense.wi_1.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.1.DenseReluDense.wo.weight": "model-00004-of-00005.safetensors", + "encoder.block.18.layer.1.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.0.SelfAttention.k.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.0.SelfAttention.o.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.0.SelfAttention.q.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.0.SelfAttention.relative_attention_bias.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.0.SelfAttention.v.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.0.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.1.DenseReluDense.wi_0.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.1.DenseReluDense.wi_1.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.1.DenseReluDense.wo.weight": "model-00004-of-00005.safetensors", + "encoder.block.19.layer.1.layer_norm.weight": "model-00004-of-00005.safetensors", + "encoder.block.2.layer.0.SelfAttention.k.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.0.SelfAttention.o.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.0.SelfAttention.q.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.0.SelfAttention.relative_attention_bias.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.0.SelfAttention.v.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.0.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.1.DenseReluDense.wo.weight": "model-00002-of-00005.safetensors", + "encoder.block.2.layer.1.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.20.layer.0.SelfAttention.k.weight": "model-00004-of-00005.safetensors", + "encoder.block.20.layer.0.SelfAttention.o.weight": "model-00005-of-00005.safetensors", + "encoder.block.20.layer.0.SelfAttention.q.weight": "model-00004-of-00005.safetensors", + "encoder.block.20.layer.0.SelfAttention.relative_attention_bias.weight": "model-00005-of-00005.safetensors", + "encoder.block.20.layer.0.SelfAttention.v.weight": "model-00004-of-00005.safetensors", + "encoder.block.20.layer.0.layer_norm.weight": "model-00005-of-00005.safetensors", + "encoder.block.20.layer.1.DenseReluDense.wi_0.weight": "model-00005-of-00005.safetensors", + "encoder.block.20.layer.1.DenseReluDense.wi_1.weight": "model-00005-of-00005.safetensors", + "encoder.block.20.layer.1.DenseReluDense.wo.weight": "model-00005-of-00005.safetensors", + "encoder.block.20.layer.1.layer_norm.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.0.SelfAttention.k.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.0.SelfAttention.o.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.0.SelfAttention.q.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.0.SelfAttention.relative_attention_bias.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.0.SelfAttention.v.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.0.layer_norm.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.1.DenseReluDense.wi_0.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.1.DenseReluDense.wi_1.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.1.DenseReluDense.wo.weight": "model-00005-of-00005.safetensors", + "encoder.block.21.layer.1.layer_norm.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.0.SelfAttention.k.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.0.SelfAttention.o.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.0.SelfAttention.q.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.0.SelfAttention.relative_attention_bias.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.0.SelfAttention.v.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.0.layer_norm.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.1.DenseReluDense.wi_0.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.1.DenseReluDense.wi_1.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.1.DenseReluDense.wo.weight": "model-00005-of-00005.safetensors", + "encoder.block.22.layer.1.layer_norm.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.0.SelfAttention.k.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.0.SelfAttention.o.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.0.SelfAttention.q.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.0.SelfAttention.relative_attention_bias.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.0.SelfAttention.v.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.0.layer_norm.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.1.DenseReluDense.wi_0.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.1.DenseReluDense.wi_1.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.1.DenseReluDense.wo.weight": "model-00005-of-00005.safetensors", + "encoder.block.23.layer.1.layer_norm.weight": "model-00005-of-00005.safetensors", + "encoder.block.3.layer.0.SelfAttention.k.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.0.SelfAttention.o.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.0.SelfAttention.q.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.0.SelfAttention.relative_attention_bias.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.0.SelfAttention.v.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.0.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.1.DenseReluDense.wo.weight": "model-00002-of-00005.safetensors", + "encoder.block.3.layer.1.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.0.SelfAttention.k.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.0.SelfAttention.o.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.0.SelfAttention.q.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.0.SelfAttention.relative_attention_bias.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.0.SelfAttention.v.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.0.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.1.DenseReluDense.wo.weight": "model-00002-of-00005.safetensors", + "encoder.block.4.layer.1.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.0.SelfAttention.k.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.0.SelfAttention.o.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.0.SelfAttention.q.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.0.SelfAttention.relative_attention_bias.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.0.SelfAttention.v.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.0.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.1.DenseReluDense.wo.weight": "model-00002-of-00005.safetensors", + "encoder.block.5.layer.1.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.0.SelfAttention.k.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.0.SelfAttention.o.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.0.SelfAttention.q.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.0.SelfAttention.relative_attention_bias.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.0.SelfAttention.v.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.0.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.1.DenseReluDense.wi_0.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.1.DenseReluDense.wi_1.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.1.DenseReluDense.wo.weight": "model-00002-of-00005.safetensors", + "encoder.block.6.layer.1.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.7.layer.0.SelfAttention.k.weight": "model-00002-of-00005.safetensors", + "encoder.block.7.layer.0.SelfAttention.o.weight": "model-00002-of-00005.safetensors", + "encoder.block.7.layer.0.SelfAttention.q.weight": "model-00002-of-00005.safetensors", + "encoder.block.7.layer.0.SelfAttention.relative_attention_bias.weight": "model-00002-of-00005.safetensors", + "encoder.block.7.layer.0.SelfAttention.v.weight": "model-00002-of-00005.safetensors", + "encoder.block.7.layer.0.layer_norm.weight": "model-00002-of-00005.safetensors", + "encoder.block.7.layer.1.DenseReluDense.wi_0.weight": "model-00003-of-00005.safetensors", + "encoder.block.7.layer.1.DenseReluDense.wi_1.weight": "model-00003-of-00005.safetensors", + "encoder.block.7.layer.1.DenseReluDense.wo.weight": "model-00003-of-00005.safetensors", + "encoder.block.7.layer.1.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.0.SelfAttention.k.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.0.SelfAttention.o.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.0.SelfAttention.q.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.0.SelfAttention.relative_attention_bias.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.0.SelfAttention.v.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.0.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.1.DenseReluDense.wi_0.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.1.DenseReluDense.wi_1.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.1.DenseReluDense.wo.weight": "model-00003-of-00005.safetensors", + "encoder.block.8.layer.1.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.0.SelfAttention.k.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.0.SelfAttention.o.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.0.SelfAttention.q.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.0.SelfAttention.relative_attention_bias.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.0.SelfAttention.v.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.0.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.1.DenseReluDense.wi_0.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.1.DenseReluDense.wi_1.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.1.DenseReluDense.wo.weight": "model-00003-of-00005.safetensors", + "encoder.block.9.layer.1.layer_norm.weight": "model-00003-of-00005.safetensors", + "encoder.final_layer_norm.weight": "model-00005-of-00005.safetensors", + "shared.weight": "model-00001-of-00005.safetensors" + } +} diff --git a/tokenizer/special_tokens_map.json b/tokenizer/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..2ed25bf989a28d20b5d4b5822fbc24666d12a6f7 --- /dev/null +++ b/tokenizer/special_tokens_map.json @@ -0,0 +1,332 @@ +{ + "additional_special_tokens": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "bos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizer/spiece.model b/tokenizer/spiece.model new file mode 100644 index 0000000000000000000000000000000000000000..2fe5f347e9f9367585589ae89e997dfbd5cf802c --- /dev/null +++ b/tokenizer/spiece.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3909a67b780650b35cf529ac782ad2b6b26e6d1f849d3fbb6a872905f452458 +size 4548313 diff --git a/tokenizer/tokenizer.json b/tokenizer/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..9c6427ab2f801c942da4ee9740f1a750d3978a3b --- /dev/null +++ b/tokenizer/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20a46ac256746594ed7e1e3ef733b83fbc5a6f0922aa7480eda961743de080ef +size 16837459 diff --git a/tokenizer/tokenizer_config.json b/tokenizer/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..09d434f9457238f697f4c208aab47f58caa15bfe --- /dev/null +++ b/tokenizer/tokenizer_config.json @@ -0,0 +1,2749 @@ +{ + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "3": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256000": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256001": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256002": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256003": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256004": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256005": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256006": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256007": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256008": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256009": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256010": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256011": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256012": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256013": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256014": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256015": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256016": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256017": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256018": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256019": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256020": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256021": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256022": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256023": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256024": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256025": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256026": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256027": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256028": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256029": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256030": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256031": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256032": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256033": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256034": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256035": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256036": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256037": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256038": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256039": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256040": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256041": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256042": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256043": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256044": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256045": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256046": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256047": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256048": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256049": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256050": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256051": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256052": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256053": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256054": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256055": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256056": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256057": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256058": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256059": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256060": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256061": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256062": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256063": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256064": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256065": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256066": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256067": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256068": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256069": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256070": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256071": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256072": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256073": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256074": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256075": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256076": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256077": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256078": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256079": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256080": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256081": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256082": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256083": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256084": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256085": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256086": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256087": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256088": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256089": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256090": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256091": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256092": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256093": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256094": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256095": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256096": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256097": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256098": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256099": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256100": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256101": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256102": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256103": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256104": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256105": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256106": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256107": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256108": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256109": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256110": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256111": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256112": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256113": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256114": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256115": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256116": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256117": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256118": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256119": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256120": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256121": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256122": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256123": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256124": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256125": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256126": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256127": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256128": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256129": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256130": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256131": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256132": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256133": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256134": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256135": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256136": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256137": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256138": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256139": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256140": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256141": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256142": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256143": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256144": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256145": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256146": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256147": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256148": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256149": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256150": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256151": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256152": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256153": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256154": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256155": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256156": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256157": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256158": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256159": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256160": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256161": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256162": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256163": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256164": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256165": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256166": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256167": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256168": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256169": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256170": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256171": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256172": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256173": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256174": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256175": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256176": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256177": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256178": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256179": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256180": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256181": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256182": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256183": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256184": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256185": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256186": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256187": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256188": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256189": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256190": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256191": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256192": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256193": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256194": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256195": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256196": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256197": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256198": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256199": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256200": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256201": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256202": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256203": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256204": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256205": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256206": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256207": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256208": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256209": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256210": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256211": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256212": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256213": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256214": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256215": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256216": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256217": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256218": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256219": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256220": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256221": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256222": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256223": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256224": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256225": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256226": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256227": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256228": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256229": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256230": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256231": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256232": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256233": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256234": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256235": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256236": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256237": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256238": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256239": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256240": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256241": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256242": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256243": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256244": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256245": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256246": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256247": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256248": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256249": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256250": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256251": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256252": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256253": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256254": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256255": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256256": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256257": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256258": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256259": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256260": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256261": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256262": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256263": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256264": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256265": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256266": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256267": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256268": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256269": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256270": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256271": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256272": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256273": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256274": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256275": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256276": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256277": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256278": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256279": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256280": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256281": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256282": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256283": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256284": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256285": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256286": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256287": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256288": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256289": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256290": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256291": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256292": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256293": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256294": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256295": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256296": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256297": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256298": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "256299": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "bos_token": "", + "clean_up_tokenization_spaces": true, + "eos_token": "", + "extra_ids": 300, + "extra_special_tokens": {}, + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "sp_model_kwargs": {}, + "spaces_between_special_tokens": false, + "tokenizer_class": "T5Tokenizer", + "unk_token": "" +} diff --git a/transformer/config.json b/transformer/config.json new file mode 100644 index 0000000000000000000000000000000000000000..998a7ae19a6151f25b78801bfde3ac1c2372d086 --- /dev/null +++ b/transformer/config.json @@ -0,0 +1,23 @@ +{ + "_class_name": "WanTransformer3DModel", + "_diffusers_version": "0.33.0.dev0", + "added_kv_proj_dim": 5120, + "attention_head_dim": 128, + "cross_attn_norm": true, + "eps": 1e-06, + "ffn_dim": 13824, + "freq_dim": 256, + "image_dim": 1280, + "in_channels": 36, + "num_attention_heads": 40, + "num_layers": 40, + "out_channels": 16, + "patch_size": [ + 1, + 2, + 2 + ], + "qk_norm": "rms_norm_across_heads", + "rope_max_seq_len": 1024, + "text_dim": 4096 +} diff --git a/transformer/diffusion_pytorch_model-00001-of-00014.safetensors b/transformer/diffusion_pytorch_model-00001-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6c0e7a65507435c7d94eac46a9ca94505998a899 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00001-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:578fb4865a81befa52e7cc6bbf4e38bf889b060da48469d6afc33293cd320aee +size 4929161040 diff --git a/transformer/diffusion_pytorch_model-00002-of-00014.safetensors b/transformer/diffusion_pytorch_model-00002-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..fa668c9a7717e9f095761408ed721ef136281d18 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00002-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6de6b7274a0a5df23781946380090186b118db8ebc2b399fb3b0c184e34ec51 +size 4951011392 diff --git a/transformer/diffusion_pytorch_model-00003-of-00014.safetensors b/transformer/diffusion_pytorch_model-00003-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..d5aefb58c55dda4342d9565b6304f5eab713bc5c --- /dev/null +++ b/transformer/diffusion_pytorch_model-00003-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40cd68d9cbcedeeecc8c0d7097d8d3661abd94a561d845a83467f51dc2c5d510 +size 4951011392 diff --git a/transformer/diffusion_pytorch_model-00004-of-00014.safetensors b/transformer/diffusion_pytorch_model-00004-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..19510bcb68d4e15a99a0b38ab821bcabfda7d514 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00004-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ac9bda9384b13089db4c7e8aa0d6717f25cc8b359e7c22de972928affbde53f +size 4951093744 diff --git a/transformer/diffusion_pytorch_model-00005-of-00014.safetensors b/transformer/diffusion_pytorch_model-00005-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ef5f954625670acd56104a3ab0e7802d8bd7afe3 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00005-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15c496abbeca6af95c75b081f1b3c79c89c955377272ce9301c57df202a56682 +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00006-of-00014.safetensors b/transformer/diffusion_pytorch_model-00006-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..49758d8754ae9f0e4cfc8ca22755cb31f9c181ca --- /dev/null +++ b/transformer/diffusion_pytorch_model-00006-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7183288b1dfd63cc5b3df83dc7143baa3d7d7e2af28dd9483bfb11302e3485b6 +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00007-of-00014.safetensors b/transformer/diffusion_pytorch_model-00007-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a5dc4f70ecab22ca20046d347251830652df9922 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00007-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f29d03e9c754a1fb3cd55b36e90eec4e766ae8bf6f12914590cf86730cf9cd4 +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00008-of-00014.safetensors b/transformer/diffusion_pytorch_model-00008-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..f96228ea19a50e8aba0fa38f8e07cbc692b72465 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00008-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8664bbf773bc62d6b1e91fbba7e65a67b46049b78e6cee3e1d88ed119ede144a +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00009-of-00014.safetensors b/transformer/diffusion_pytorch_model-00009-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3539d876db49b8dd2a99548aff88c296165eb138 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00009-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af18a5aff3479457318060670067874482fa048e7a644e00b37262bde2c0085a +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00010-of-00014.safetensors b/transformer/diffusion_pytorch_model-00010-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..bb515d4d542ffbb0614020be9a862f671da5961f --- /dev/null +++ b/transformer/diffusion_pytorch_model-00010-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c0bff9b932684a39823b1fc3d3d83a2fe7b33ff679ee0a7b98c60e102b6fa45 +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00011-of-00014.safetensors b/transformer/diffusion_pytorch_model-00011-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5bba158deaffd7bf9c71316c8cdddb157999ec2b --- /dev/null +++ b/transformer/diffusion_pytorch_model-00011-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ae788cff8de05f565042ccec34550e6f4e2629435718aac5eb993603effa37e +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00012-of-00014.safetensors b/transformer/diffusion_pytorch_model-00012-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ad5c1c29503bc7fcaf326b856c1e0ad39f201360 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00012-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a55a84afcc67fe716ac1e302eb690e28b0b96896cbee2c7533c6fc5add63efeb +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00013-of-00014.safetensors b/transformer/diffusion_pytorch_model-00013-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..c21816f566e380b0c5b15c94fb74947e04b6f75d --- /dev/null +++ b/transformer/diffusion_pytorch_model-00013-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eaea61781808c98dac0b9b98f7e8e5ee8d1d84180b206961594760b910c4c43 +size 4846133208 diff --git a/transformer/diffusion_pytorch_model-00014-of-00014.safetensors b/transformer/diffusion_pytorch_model-00014-of-00014.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6343ff768725c0b3c32c8f64f697bdeaaa12c857 --- /dev/null +++ b/transformer/diffusion_pytorch_model-00014-of-00014.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76c360055f102fe88c0132ef325e4dbb193955a7cfbc92a15a2c0c337ef5d85c +size 2182996112 diff --git a/transformer/diffusion_pytorch_model.safetensors.index.json b/transformer/diffusion_pytorch_model.safetensors.index.json new file mode 100644 index 0000000000000000000000000000000000000000..8c634524e998b6be92d5ce1983030bd37e87718d --- /dev/null +++ b/transformer/diffusion_pytorch_model.safetensors.index.json @@ -0,0 +1,1350 @@ +{ + "metadata": { + "total_size": 65580334336 + }, + "weight_map": { + "blocks.0.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.add_k_proj.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.add_k_proj.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.add_v_proj.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.add_v_proj.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.norm_added_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.norm_added_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.norm_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.norm_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.to_k.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.to_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.to_out.0.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.to_out.0.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.to_q.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.to_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.to_v.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.attn2.to_v.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.ffn.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.ffn.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.ffn.net.2.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.ffn.net.2.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.norm2.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.norm2.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.0.scale_shift_table": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.add_k_proj.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.add_k_proj.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.add_v_proj.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.add_v_proj.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.norm_added_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.norm_added_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.norm_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.norm_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.to_k.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.to_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.to_out.0.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.to_out.0.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.to_q.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.to_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.to_v.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.attn2.to_v.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.ffn.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.ffn.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.ffn.net.2.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.ffn.net.2.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.norm2.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.norm2.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.1.scale_shift_table": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.10.attn1.norm_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.norm_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.to_k.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.to_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.to_out.0.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.to_out.0.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.to_q.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.to_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.to_v.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn1.to_v.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.add_k_proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.add_k_proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.add_v_proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.add_v_proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.norm_added_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.norm_added_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.norm_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.norm_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.to_k.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.to_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.to_out.0.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.to_out.0.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.to_q.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.to_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.to_v.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.attn2.to_v.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.ffn.net.0.proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.ffn.net.0.proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.ffn.net.2.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.ffn.net.2.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.norm2.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.norm2.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.10.scale_shift_table": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.norm_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.norm_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.to_k.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.to_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.to_out.0.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.to_out.0.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.to_q.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.to_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.to_v.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn1.to_v.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.add_k_proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.add_k_proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.add_v_proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.add_v_proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.norm_added_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.norm_added_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.norm_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.norm_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.to_k.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.to_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.to_out.0.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.to_out.0.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.to_q.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.to_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.to_v.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.attn2.to_v.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.ffn.net.0.proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.11.ffn.net.0.proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.11.ffn.net.2.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.11.ffn.net.2.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.11.norm2.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.norm2.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.11.scale_shift_table": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.12.attn1.norm_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.norm_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.to_k.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.to_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.to_out.0.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.to_out.0.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.to_q.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.to_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.to_v.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn1.to_v.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.add_k_proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.add_k_proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.add_v_proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.add_v_proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.norm_added_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.norm_added_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.norm_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.norm_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.to_k.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.to_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.to_out.0.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.to_out.0.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.to_q.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.to_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.to_v.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.attn2.to_v.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.ffn.net.0.proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.ffn.net.0.proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.ffn.net.2.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.ffn.net.2.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.norm2.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.norm2.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.12.scale_shift_table": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.norm_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.norm_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.to_k.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.to_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.to_out.0.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.to_out.0.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.to_q.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.to_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.to_v.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn1.to_v.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.add_k_proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.add_k_proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.add_v_proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.add_v_proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.norm_added_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.norm_added_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.norm_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.norm_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.to_k.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.to_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.to_out.0.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.to_out.0.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.to_q.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.to_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.to_v.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.attn2.to_v.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.ffn.net.0.proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.ffn.net.0.proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.ffn.net.2.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.ffn.net.2.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.norm2.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.norm2.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.13.scale_shift_table": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.norm_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.norm_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.to_k.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.to_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.to_out.0.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.to_out.0.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.to_q.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.to_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.to_v.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn1.to_v.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.add_k_proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.add_k_proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.add_v_proj.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.add_v_proj.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.norm_added_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.norm_added_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.norm_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.norm_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.to_k.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.to_k.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.to_out.0.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.to_out.0.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.to_q.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.to_q.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.to_v.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.attn2.to_v.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.ffn.net.0.proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.14.ffn.net.0.proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.14.ffn.net.2.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.14.ffn.net.2.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.14.norm2.bias": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.norm2.weight": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.14.scale_shift_table": "diffusion_pytorch_model-00005-of-00014.safetensors", + "blocks.15.attn1.norm_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.norm_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.to_k.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.to_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.to_out.0.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.to_out.0.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.to_q.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.to_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.to_v.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn1.to_v.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.add_k_proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.add_k_proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.add_v_proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.add_v_proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.norm_added_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.norm_added_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.norm_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.norm_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.to_k.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.to_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.to_out.0.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.to_out.0.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.to_q.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.to_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.to_v.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.attn2.to_v.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.ffn.net.0.proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.ffn.net.0.proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.ffn.net.2.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.ffn.net.2.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.norm2.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.norm2.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.15.scale_shift_table": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.norm_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.norm_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.to_k.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.to_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.to_out.0.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.to_out.0.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.to_q.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.to_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.to_v.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn1.to_v.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.add_k_proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.add_k_proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.add_v_proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.add_v_proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.norm_added_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.norm_added_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.norm_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.norm_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.to_k.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.to_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.to_out.0.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.to_out.0.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.to_q.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.to_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.to_v.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.attn2.to_v.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.ffn.net.0.proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.ffn.net.0.proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.ffn.net.2.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.ffn.net.2.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.norm2.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.norm2.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.16.scale_shift_table": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.norm_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.norm_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.to_k.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.to_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.to_out.0.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.to_out.0.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.to_q.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.to_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.to_v.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn1.to_v.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.add_k_proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.add_k_proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.add_v_proj.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.add_v_proj.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.norm_added_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.norm_added_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.norm_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.norm_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.to_k.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.to_k.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.to_out.0.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.to_out.0.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.to_q.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.to_q.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.to_v.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.attn2.to_v.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.ffn.net.0.proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.17.ffn.net.0.proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.17.ffn.net.2.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.17.ffn.net.2.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.17.norm2.bias": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.norm2.weight": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.17.scale_shift_table": "diffusion_pytorch_model-00006-of-00014.safetensors", + "blocks.18.attn1.norm_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.norm_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.to_k.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.to_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.to_out.0.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.to_out.0.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.to_q.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.to_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.to_v.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn1.to_v.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.add_k_proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.add_k_proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.add_v_proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.add_v_proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.norm_added_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.norm_added_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.norm_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.norm_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.to_k.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.to_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.to_out.0.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.to_out.0.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.to_q.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.to_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.to_v.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.attn2.to_v.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.ffn.net.0.proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.ffn.net.0.proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.ffn.net.2.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.ffn.net.2.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.norm2.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.norm2.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.18.scale_shift_table": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.norm_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.norm_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.to_k.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.to_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.to_out.0.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.to_out.0.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.to_q.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.to_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.to_v.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn1.to_v.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.add_k_proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.add_k_proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.add_v_proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.add_v_proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.norm_added_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.norm_added_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.norm_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.norm_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.to_k.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.to_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.to_out.0.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.to_out.0.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.to_q.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.to_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.to_v.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.attn2.to_v.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.ffn.net.0.proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.ffn.net.0.proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.ffn.net.2.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.ffn.net.2.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.norm2.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.norm2.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.19.scale_shift_table": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.2.attn1.norm_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.norm_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.to_k.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.to_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.to_out.0.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.to_out.0.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.to_q.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.to_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.to_v.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn1.to_v.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn2.add_k_proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.attn2.add_k_proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.attn2.add_v_proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.attn2.add_v_proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.attn2.norm_added_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.attn2.norm_added_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.attn2.norm_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn2.norm_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn2.to_k.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn2.to_k.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn2.to_out.0.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.attn2.to_out.0.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.attn2.to_q.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn2.to_q.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn2.to_v.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.attn2.to_v.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.2.ffn.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.ffn.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.ffn.net.2.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.ffn.net.2.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.norm2.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.norm2.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.2.scale_shift_table": "diffusion_pytorch_model-00001-of-00014.safetensors", + "blocks.20.attn1.norm_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.norm_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.to_k.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.to_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.to_out.0.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.to_out.0.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.to_q.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.to_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.to_v.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn1.to_v.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.add_k_proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.add_k_proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.add_v_proj.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.add_v_proj.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.norm_added_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.norm_added_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.norm_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.norm_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.to_k.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.to_k.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.to_out.0.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.to_out.0.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.to_q.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.to_q.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.to_v.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.attn2.to_v.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.ffn.net.0.proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.20.ffn.net.0.proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.20.ffn.net.2.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.20.ffn.net.2.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.20.norm2.bias": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.norm2.weight": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.20.scale_shift_table": "diffusion_pytorch_model-00007-of-00014.safetensors", + "blocks.21.attn1.norm_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.norm_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.to_k.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.to_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.to_out.0.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.to_out.0.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.to_q.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.to_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.to_v.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn1.to_v.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.add_k_proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.add_k_proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.add_v_proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.add_v_proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.norm_added_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.norm_added_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.norm_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.norm_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.to_k.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.to_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.to_out.0.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.to_out.0.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.to_q.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.to_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.to_v.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.attn2.to_v.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.ffn.net.0.proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.ffn.net.0.proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.ffn.net.2.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.ffn.net.2.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.norm2.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.norm2.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.21.scale_shift_table": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.norm_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.norm_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.to_k.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.to_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.to_out.0.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.to_out.0.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.to_q.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.to_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.to_v.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn1.to_v.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.add_k_proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.add_k_proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.add_v_proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.add_v_proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.norm_added_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.norm_added_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.norm_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.norm_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.to_k.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.to_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.to_out.0.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.to_out.0.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.to_q.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.to_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.to_v.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.attn2.to_v.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.ffn.net.0.proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.ffn.net.0.proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.ffn.net.2.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.ffn.net.2.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.norm2.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.norm2.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.22.scale_shift_table": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.norm_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.norm_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.to_k.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.to_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.to_out.0.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.to_out.0.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.to_q.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.to_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.to_v.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn1.to_v.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.add_k_proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.add_k_proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.add_v_proj.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.add_v_proj.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.norm_added_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.norm_added_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.norm_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.norm_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.to_k.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.to_k.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.to_out.0.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.to_out.0.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.to_q.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.to_q.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.to_v.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.attn2.to_v.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.ffn.net.0.proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.23.ffn.net.0.proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.23.ffn.net.2.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.23.ffn.net.2.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.23.norm2.bias": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.norm2.weight": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.23.scale_shift_table": "diffusion_pytorch_model-00008-of-00014.safetensors", + "blocks.24.attn1.norm_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.norm_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.to_k.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.to_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.to_out.0.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.to_out.0.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.to_q.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.to_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.to_v.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn1.to_v.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.add_k_proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.add_k_proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.add_v_proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.add_v_proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.norm_added_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.norm_added_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.norm_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.norm_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.to_k.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.to_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.to_out.0.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.to_out.0.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.to_q.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.to_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.to_v.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.attn2.to_v.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.ffn.net.0.proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.ffn.net.0.proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.ffn.net.2.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.ffn.net.2.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.norm2.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.norm2.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.24.scale_shift_table": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.norm_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.norm_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.to_k.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.to_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.to_out.0.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.to_out.0.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.to_q.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.to_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.to_v.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn1.to_v.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.add_k_proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.add_k_proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.add_v_proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.add_v_proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.norm_added_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.norm_added_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.norm_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.norm_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.to_k.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.to_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.to_out.0.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.to_out.0.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.to_q.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.to_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.to_v.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.attn2.to_v.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.ffn.net.0.proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.ffn.net.0.proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.ffn.net.2.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.ffn.net.2.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.norm2.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.norm2.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.25.scale_shift_table": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.norm_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.norm_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.to_k.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.to_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.to_out.0.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.to_out.0.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.to_q.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.to_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.to_v.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn1.to_v.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.add_k_proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.add_k_proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.add_v_proj.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.add_v_proj.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.norm_added_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.norm_added_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.norm_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.norm_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.to_k.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.to_k.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.to_out.0.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.to_out.0.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.to_q.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.to_q.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.to_v.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.attn2.to_v.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.ffn.net.0.proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.26.ffn.net.0.proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.26.ffn.net.2.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.26.ffn.net.2.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.26.norm2.bias": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.norm2.weight": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.26.scale_shift_table": "diffusion_pytorch_model-00009-of-00014.safetensors", + "blocks.27.attn1.norm_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.norm_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.to_k.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.to_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.to_out.0.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.to_out.0.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.to_q.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.to_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.to_v.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn1.to_v.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.add_k_proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.add_k_proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.add_v_proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.add_v_proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.norm_added_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.norm_added_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.norm_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.norm_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.to_k.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.to_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.to_out.0.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.to_out.0.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.to_q.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.to_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.to_v.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.attn2.to_v.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.ffn.net.0.proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.ffn.net.0.proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.ffn.net.2.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.ffn.net.2.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.norm2.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.norm2.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.27.scale_shift_table": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.norm_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.norm_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.to_k.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.to_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.to_out.0.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.to_out.0.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.to_q.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.to_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.to_v.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn1.to_v.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.add_k_proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.add_k_proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.add_v_proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.add_v_proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.norm_added_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.norm_added_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.norm_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.norm_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.to_k.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.to_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.to_out.0.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.to_out.0.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.to_q.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.to_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.to_v.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.attn2.to_v.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.ffn.net.0.proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.ffn.net.0.proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.ffn.net.2.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.ffn.net.2.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.norm2.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.norm2.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.28.scale_shift_table": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.norm_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.norm_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.to_k.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.to_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.to_out.0.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.to_out.0.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.to_q.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.to_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.to_v.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn1.to_v.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.add_k_proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.add_k_proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.add_v_proj.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.add_v_proj.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.norm_added_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.norm_added_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.norm_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.norm_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.to_k.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.to_k.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.to_out.0.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.to_out.0.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.to_q.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.to_q.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.to_v.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.attn2.to_v.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.ffn.net.0.proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.29.ffn.net.0.proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.29.ffn.net.2.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.29.ffn.net.2.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.29.norm2.bias": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.norm2.weight": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.29.scale_shift_table": "diffusion_pytorch_model-00010-of-00014.safetensors", + "blocks.3.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.add_k_proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.add_k_proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.add_v_proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.add_v_proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.norm_added_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.norm_added_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.norm_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.norm_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.to_k.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.to_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.to_out.0.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.to_out.0.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.to_q.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.to_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.to_v.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.attn2.to_v.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.ffn.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.ffn.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.ffn.net.2.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.ffn.net.2.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.norm2.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.norm2.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.3.scale_shift_table": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.30.attn1.norm_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.norm_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.to_k.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.to_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.to_out.0.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.to_out.0.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.to_q.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.to_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.to_v.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn1.to_v.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.add_k_proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.add_k_proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.add_v_proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.add_v_proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.norm_added_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.norm_added_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.norm_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.norm_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.to_k.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.to_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.to_out.0.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.to_out.0.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.to_q.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.to_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.to_v.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.attn2.to_v.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.ffn.net.0.proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.ffn.net.0.proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.ffn.net.2.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.ffn.net.2.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.norm2.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.norm2.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.30.scale_shift_table": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.norm_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.norm_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.to_k.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.to_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.to_out.0.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.to_out.0.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.to_q.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.to_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.to_v.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn1.to_v.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.add_k_proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.add_k_proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.add_v_proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.add_v_proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.norm_added_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.norm_added_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.norm_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.norm_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.to_k.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.to_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.to_out.0.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.to_out.0.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.to_q.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.to_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.to_v.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.attn2.to_v.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.ffn.net.0.proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.ffn.net.0.proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.ffn.net.2.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.ffn.net.2.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.norm2.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.norm2.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.31.scale_shift_table": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.norm_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.norm_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.to_k.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.to_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.to_out.0.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.to_out.0.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.to_q.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.to_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.to_v.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn1.to_v.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.add_k_proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.add_k_proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.add_v_proj.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.add_v_proj.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.norm_added_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.norm_added_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.norm_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.norm_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.to_k.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.to_k.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.to_out.0.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.to_out.0.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.to_q.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.to_q.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.to_v.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.attn2.to_v.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.ffn.net.0.proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.32.ffn.net.0.proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.32.ffn.net.2.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.32.ffn.net.2.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.32.norm2.bias": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.norm2.weight": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.32.scale_shift_table": "diffusion_pytorch_model-00011-of-00014.safetensors", + "blocks.33.attn1.norm_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.norm_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.to_k.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.to_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.to_out.0.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.to_out.0.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.to_q.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.to_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.to_v.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn1.to_v.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.add_k_proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.add_k_proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.add_v_proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.add_v_proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.norm_added_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.norm_added_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.norm_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.norm_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.to_k.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.to_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.to_out.0.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.to_out.0.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.to_q.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.to_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.to_v.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.attn2.to_v.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.ffn.net.0.proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.ffn.net.0.proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.ffn.net.2.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.ffn.net.2.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.norm2.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.norm2.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.33.scale_shift_table": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.norm_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.norm_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.to_k.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.to_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.to_out.0.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.to_out.0.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.to_q.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.to_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.to_v.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn1.to_v.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.add_k_proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.add_k_proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.add_v_proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.add_v_proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.norm_added_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.norm_added_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.norm_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.norm_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.to_k.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.to_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.to_out.0.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.to_out.0.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.to_q.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.to_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.to_v.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.attn2.to_v.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.ffn.net.0.proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.ffn.net.0.proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.ffn.net.2.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.ffn.net.2.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.norm2.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.norm2.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.34.scale_shift_table": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.norm_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.norm_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.to_k.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.to_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.to_out.0.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.to_out.0.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.to_q.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.to_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.to_v.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn1.to_v.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.add_k_proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.add_k_proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.add_v_proj.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.add_v_proj.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.norm_added_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.norm_added_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.norm_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.norm_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.to_k.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.to_k.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.to_out.0.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.to_out.0.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.to_q.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.to_q.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.to_v.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.attn2.to_v.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.ffn.net.0.proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.35.ffn.net.0.proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.35.ffn.net.2.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.35.ffn.net.2.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.35.norm2.bias": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.norm2.weight": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.35.scale_shift_table": "diffusion_pytorch_model-00012-of-00014.safetensors", + "blocks.36.attn1.norm_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.norm_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.to_k.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.to_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.to_out.0.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.to_out.0.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.to_q.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.to_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.to_v.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn1.to_v.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.add_k_proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.add_k_proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.add_v_proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.add_v_proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.norm_added_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.norm_added_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.norm_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.norm_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.to_k.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.to_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.to_out.0.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.to_out.0.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.to_q.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.to_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.to_v.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.attn2.to_v.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.ffn.net.0.proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.ffn.net.0.proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.ffn.net.2.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.ffn.net.2.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.norm2.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.norm2.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.36.scale_shift_table": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.norm_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.norm_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.to_k.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.to_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.to_out.0.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.to_out.0.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.to_q.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.to_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.to_v.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn1.to_v.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.add_k_proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.add_k_proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.add_v_proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.add_v_proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.norm_added_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.norm_added_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.norm_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.norm_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.to_k.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.to_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.to_out.0.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.to_out.0.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.to_q.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.to_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.to_v.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.attn2.to_v.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.ffn.net.0.proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.ffn.net.0.proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.ffn.net.2.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.ffn.net.2.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.norm2.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.norm2.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.37.scale_shift_table": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.norm_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.norm_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.to_k.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.to_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.to_out.0.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.to_out.0.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.to_q.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.to_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.to_v.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn1.to_v.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.add_k_proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.add_k_proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.add_v_proj.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.add_v_proj.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.norm_added_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.norm_added_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.norm_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.norm_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.to_k.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.to_k.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.to_out.0.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.to_out.0.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.to_q.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.to_q.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.to_v.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.attn2.to_v.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.ffn.net.0.proj.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.38.ffn.net.0.proj.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.38.ffn.net.2.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.38.ffn.net.2.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.38.norm2.bias": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.norm2.weight": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.38.scale_shift_table": "diffusion_pytorch_model-00013-of-00014.safetensors", + "blocks.39.attn1.norm_k.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.norm_q.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.to_k.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.to_k.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.to_out.0.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.to_out.0.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.to_q.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.to_q.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.to_v.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn1.to_v.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.add_k_proj.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.add_k_proj.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.add_v_proj.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.add_v_proj.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.norm_added_k.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.norm_added_q.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.norm_k.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.norm_q.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.to_k.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.to_k.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.to_out.0.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.to_out.0.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.to_q.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.to_q.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.to_v.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.attn2.to_v.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.ffn.net.0.proj.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.ffn.net.0.proj.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.ffn.net.2.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.ffn.net.2.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.norm2.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.norm2.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.39.scale_shift_table": "diffusion_pytorch_model-00014-of-00014.safetensors", + "blocks.4.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.add_k_proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.add_k_proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.add_v_proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.add_v_proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.norm_added_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.norm_added_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.norm_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.norm_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.to_k.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.to_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.to_out.0.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.to_out.0.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.to_q.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.to_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.to_v.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.attn2.to_v.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.ffn.net.0.proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.ffn.net.0.proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.ffn.net.2.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.ffn.net.2.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.norm2.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.norm2.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.4.scale_shift_table": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.norm_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.norm_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.to_k.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.to_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.to_out.0.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.to_out.0.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.to_q.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.to_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.to_v.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn1.to_v.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.add_k_proj.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.add_k_proj.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.add_v_proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.attn2.add_v_proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.attn2.norm_added_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.attn2.norm_added_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.attn2.norm_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.norm_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.to_k.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.to_k.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.to_out.0.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.attn2.to_out.0.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.attn2.to_q.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.to_q.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.to_v.bias": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.attn2.to_v.weight": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.5.ffn.net.0.proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.ffn.net.0.proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.ffn.net.2.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.ffn.net.2.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.norm2.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.norm2.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.5.scale_shift_table": "diffusion_pytorch_model-00002-of-00014.safetensors", + "blocks.6.attn1.norm_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.norm_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.to_k.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.to_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.to_out.0.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.to_out.0.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.to_q.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.to_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.to_v.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn1.to_v.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.add_k_proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.add_k_proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.add_v_proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.add_v_proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.norm_added_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.norm_added_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.norm_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.norm_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.to_k.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.to_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.to_out.0.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.to_out.0.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.to_q.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.to_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.to_v.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.attn2.to_v.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.ffn.net.0.proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.ffn.net.0.proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.ffn.net.2.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.ffn.net.2.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.norm2.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.norm2.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.6.scale_shift_table": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.norm_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.norm_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.to_k.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.to_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.to_out.0.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.to_out.0.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.to_q.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.to_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.to_v.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn1.to_v.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.add_k_proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.add_k_proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.add_v_proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.add_v_proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.norm_added_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.norm_added_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.norm_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.norm_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.to_k.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.to_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.to_out.0.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.to_out.0.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.to_q.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.to_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.to_v.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.attn2.to_v.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.ffn.net.0.proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.ffn.net.0.proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.ffn.net.2.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.ffn.net.2.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.norm2.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.norm2.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.7.scale_shift_table": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.norm_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.norm_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.to_k.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.to_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.to_out.0.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.to_out.0.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.to_q.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.to_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.to_v.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn1.to_v.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.add_k_proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.add_k_proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.add_v_proj.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.add_v_proj.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.norm_added_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.attn2.norm_added_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.attn2.norm_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.norm_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.to_k.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.to_k.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.to_out.0.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.attn2.to_out.0.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.attn2.to_q.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.to_q.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.to_v.bias": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.attn2.to_v.weight": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.8.ffn.net.0.proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.ffn.net.0.proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.ffn.net.2.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.ffn.net.2.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.norm2.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.norm2.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.8.scale_shift_table": "diffusion_pytorch_model-00003-of-00014.safetensors", + "blocks.9.attn1.norm_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.norm_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.to_k.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.to_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.to_out.0.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.to_out.0.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.to_q.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.to_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.to_v.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn1.to_v.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.add_k_proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.add_k_proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.add_v_proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.add_v_proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.norm_added_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.norm_added_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.norm_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.norm_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.to_k.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.to_k.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.to_out.0.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.to_out.0.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.to_q.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.to_q.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.to_v.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.attn2.to_v.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.ffn.net.0.proj.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.ffn.net.0.proj.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.ffn.net.2.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.ffn.net.2.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.norm2.bias": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.norm2.weight": "diffusion_pytorch_model-00004-of-00014.safetensors", + "blocks.9.scale_shift_table": "diffusion_pytorch_model-00004-of-00014.safetensors", + "condition_embedder.image_embedder.ff.net.0.proj.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.image_embedder.ff.net.0.proj.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.image_embedder.ff.net.2.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.image_embedder.ff.net.2.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.image_embedder.norm1.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.image_embedder.norm1.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.image_embedder.norm2.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.image_embedder.norm2.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.text_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.text_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.text_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.text_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.time_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.time_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.time_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.time_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.time_proj.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "condition_embedder.time_proj.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "patch_embedding.bias": "diffusion_pytorch_model-00001-of-00014.safetensors", + "patch_embedding.weight": "diffusion_pytorch_model-00001-of-00014.safetensors", + "proj_out.bias": "diffusion_pytorch_model-00014-of-00014.safetensors", + "proj_out.weight": "diffusion_pytorch_model-00014-of-00014.safetensors", + "scale_shift_table": "diffusion_pytorch_model-00001-of-00014.safetensors" + } +} diff --git a/vae/config.json b/vae/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fe988ee53511225fb2fd0a01004d6e19524df75f --- /dev/null +++ b/vae/config.json @@ -0,0 +1,56 @@ +{ + "_class_name": "AutoencoderKLWan", + "_diffusers_version": "0.33.0.dev0", + "attn_scales": [], + "base_dim": 96, + "dim_mult": [ + 1, + 2, + 4, + 4 + ], + "dropout": 0.0, + "latents_mean": [ + -0.7571, + -0.7089, + -0.9113, + 0.1075, + -0.1745, + 0.9653, + -0.1517, + 1.5508, + 0.4134, + -0.0715, + 0.5517, + -0.3632, + -0.1922, + -0.9497, + 0.2503, + -0.2921 + ], + "latents_std": [ + 2.8184, + 1.4541, + 2.3275, + 2.6558, + 1.2196, + 1.7708, + 2.6052, + 2.0743, + 3.2687, + 2.1526, + 2.8652, + 1.5579, + 1.6382, + 1.1253, + 2.8251, + 1.916 + ], + "num_res_blocks": 2, + "temperal_downsample": [ + false, + true, + true + ], + "z_dim": 16 +} diff --git a/vae/diffusion_pytorch_model.safetensors b/vae/diffusion_pytorch_model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..af13d1f7b1bc28a9f1e9cd85cac5da951aead1b3 --- /dev/null +++ b/vae/diffusion_pytorch_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6e524b3fffede1787a74e81b30976dce5400c4439ba64222168e607ed19e793 +size 507591892