diff --git a/loss_params.pth b/loss_params.pth new file mode 100644 index 0000000000000000000000000000000000000000..9fc2cfc6dc7bb8d4e6eab51ba6ebd96c163dca11 --- /dev/null +++ b/loss_params.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e4c687fb455b7495e325d5f1761391d281323de6d2a493b153a3dac9536664e +size 3120 diff --git a/onnx/up_blocks.0/attentions.1.transformer_blocks.7.ff.net.2.bias b/onnx/up_blocks.0/attentions.1.transformer_blocks.7.ff.net.2.bias new file mode 100644 index 0000000000000000000000000000000000000000..6d738ee7ca4f9f44646976b7e74e9ae8b45316ca Binary files /dev/null and b/onnx/up_blocks.0/attentions.1.transformer_blocks.7.ff.net.2.bias differ diff --git a/onnx/up_blocks.0/attentions.1.transformer_blocks.8.norm3.weight b/onnx/up_blocks.0/attentions.1.transformer_blocks.8.norm3.weight new file mode 100644 index 0000000000000000000000000000000000000000..5ba90e0a72f26fc7f4a43a2d260019a1da581308 --- /dev/null +++ b/onnx/up_blocks.0/attentions.1.transformer_blocks.8.norm3.weight @@ -0,0 +1 @@ +l:::::m:::{:~::::z:::::{::|:d:::&::\:::H::?::{::::z:z:4::{::::::|:V::z::I:::m:j::=:n:m:e:n:j::z:X:::::i::*:t:u:::t:::0:{:::n:f:t::)9_:k::::P:::}:t:>::::::b::j:~:::::[::~:::::&:::z::n:::d:*:K::D:t:::b::m::F:::::|::L::::[:}:]:U:t:::::.:n:: ::S::b::{:G::n::t:d:T:~:z:~:b:|:c:\:$:::f::4:m:::::h:s:{::f::m::+::::n:::$::z::::::L::::{:::::::\:Z:l::c:S::V::n:F:::;:d:v:V:~:::^:z::k:::::|::::n:c::<:Z::v:f:b:::::o:=::::{:=:::T::[:\::`:::j::n::u:::f:z:s:4::~::::::v::[9V:k:::f:n:~:l:::e:::::::^::\::::::{:d:|::\:R::\:::t:::x:D:::t:{:t::T::$:::T:4:::{::t:l::Z:::::::4::\:&::>:::S:|::Z:::::::j::::v:L:::t:v:L:t:::::::::n:::::\:::T:f:a::::n::_:E:[:\::Z::Z:::4:\::::v:::::::i:z:S:m:::::::T:d:Z:u::::F:}:{:::|::|:m:2::v:n:c:|:z:::S:::F:z:t::::|::::v::{::E:f::::T:m:t::|::::]::::T:::8:W:S:d:::::y::H::]:t:::L:::u:::::v::>::::::Z:R:::Y:L:t::::v:y::4::f:t:::::":::::::t::l:::u:i:|::V:v:s:Z::+:L:O:::::::f::::j::c:o:N::::::T:f:L:::::::U:n:|:{::b:Y:K:::-:Z:k:L:::f:f:|::::C:t::\:N::4:::::`:v:::::::f::u:x::]:::~:4:K::j:::::j:z::t::{::::T:l:::j:::~::|::t::{::::::n::::*::::L::m:z:::t:b:::j::n:|:\:::m:S::::Z::::~:U::N:I:n::\:u:t:l:]:n:*::::::::{:::o::::9p::d::m::::z:::F::|:::Z:U:N:::[:L::::v:v:~:\:m::d::::m::63::::::::::c:\:::{:::K::::::n:Z:z::1::::f:Z::V:::::::m::::*:f::v:::v:::::9::{:n:U:::::\::::v:::f::m::z::[::z::|:::b::|:E:::::}:::::n::m:l:j:W::::9:t:Y::;:|:Z:T:::::::::z:;:::::L:x:::N:+::::.::f:f::{:::m:::::::9b:}:m::u:z::x::Z:::::*:2:i::m::P:}::f:f::I:Z::E::|:&:b:}::i:t::d::::::::j:::|:::[:u::u:z::Z:::|:::::z:d:::z:|:t:Y:l:e::n:.:::i:::n:d:~::::::|:k:|:\:z::::S:=:::|::t:::::d:::t:|:l:::\::::l:`:::::::::z:\::f::::::I:::::|:{:&::::L::G:{:::b::b:U:::{:::f:n::::::Y:}:::4:::b:::4::::4:~:u::V:::::9:~:l:#::c:\::m::::~::=:{:::V:::m:n:L:d:::M:d::l::>::Y:::::::d:::::<:d:Y::::|::z:{:s::l:j::::N::m:[::::Y:{:::\:Z:i::v::T:::[:v:m:::::n:L:\:h:9n::::a::::f::H: \ No newline at end of file diff --git a/onnx/up_blocks.0/attentions.1.transformer_blocks.9.ff.net.2.bias b/onnx/up_blocks.0/attentions.1.transformer_blocks.9.ff.net.2.bias new file mode 100644 index 0000000000000000000000000000000000000000..cfb0bf04c4ed3988d28320cbc11a0773695bc0d7 Binary files /dev/null and b/onnx/up_blocks.0/attentions.1.transformer_blocks.9.ff.net.2.bias differ diff --git a/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm1.bias b/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm1.bias new file mode 100644 index 0000000000000000000000000000000000000000..ed3eaf320f3043edffab9854f3e278ebc5e76aee Binary files /dev/null and b/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm1.bias differ diff --git a/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm2.bias b/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm2.bias new file mode 100644 index 0000000000000000000000000000000000000000..c091a7c2843feb9c7ffbc2bb0b71bc5f3929541f Binary files /dev/null and b/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm2.bias differ diff --git a/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm3.weight b/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm3.weight new file mode 100644 index 0000000000000000000000000000000000000000..2041980049b186fcff8c34cd57ce36f8bc2f3fe8 Binary files /dev/null and b/onnx/up_blocks.0/attentions.1.transformer_blocks.9.norm3.weight differ diff --git a/onnx/up_blocks.0/attentions.2.proj_in.bias b/onnx/up_blocks.0/attentions.2.proj_in.bias new file mode 100644 index 0000000000000000000000000000000000000000..5e9e66ad2a6488d4f0ad4482d0e7f5a7a92dc4ed Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.proj_in.bias differ diff --git a/onnx/up_blocks.0/attentions.2.proj_out.bias b/onnx/up_blocks.0/attentions.2.proj_out.bias new file mode 100644 index 0000000000000000000000000000000000000000..a7a458ad88df413cd17b1d95d5695d071ca8acfe Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.proj_out.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.0.ff.net.0.proj.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.0.ff.net.0.proj.bias new file mode 100644 index 0000000000000000000000000000000000000000..f2f7e3787c6ef2eaf248204ec9d90df16672f4ff Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.0.ff.net.0.proj.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.0.norm1.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.0.norm1.weight new file mode 100644 index 0000000000000000000000000000000000000000..98fc61ee0e9c2cd64c405f8063970af6db33313b Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.0.norm1.weight differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.2.attn1.to_out.0.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.2.attn1.to_out.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..52ef6829c33df1012f965f89fba27147c5038e64 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.2.attn1.to_out.0.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.2.ff.net.2.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.2.ff.net.2.bias new file mode 100644 index 0000000000000000000000000000000000000000..9f71b414aa811a8e39ee3b7e21315a0e3cffd606 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.2.ff.net.2.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.2.norm3.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.2.norm3.bias new file mode 100644 index 0000000000000000000000000000000000000000..f6cb5c8133aa13c4b0bbecfdd148da0c38413688 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.2.norm3.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.4.norm2.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.4.norm2.bias new file mode 100644 index 0000000000000000000000000000000000000000..fca8b739009a483a2bfe1dd1b7fc20344e1f61ff Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.4.norm2.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm1.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm1.weight new file mode 100644 index 0000000000000000000000000000000000000000..53dae0318bba1cff8b78060d70e0598b3deaba2a Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm1.weight differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm2.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm2.weight new file mode 100644 index 0000000000000000000000000000000000000000..394b212bdfd7aeae3a9d36b6fcedd1cea5deb7b6 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm2.weight differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm3.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm3.weight new file mode 100644 index 0000000000000000000000000000000000000000..d2447357e042bd0a9db0743e43d787e596d02ad1 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.5.norm3.weight differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.7.attn2.to_out.0.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.attn2.to_out.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..6cd734e9f7e8934cc65348714fbd0ceb2dcddedb Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.attn2.to_out.0.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.7.ff.net.0.proj.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.ff.net.0.proj.bias new file mode 100644 index 0000000000000000000000000000000000000000..2615caf3f52b16c91833b98e71700dd6719ae666 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.ff.net.0.proj.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.7.ff.net.2.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.ff.net.2.bias new file mode 100644 index 0000000000000000000000000000000000000000..8f3c6600bb54f3fc083999012fe21feec0287f45 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.ff.net.2.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm1.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm1.weight new file mode 100644 index 0000000000000000000000000000000000000000..146e7d3a80c6d2013b20af2872791c5109b0a191 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm1.weight differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm2.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm2.bias new file mode 100644 index 0000000000000000000000000000000000000000..af4a88dff042560bf7adb532451e3f816eb0ea79 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm2.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm3.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm3.bias new file mode 100644 index 0000000000000000000000000000000000000000..7a6951682670c7711cbf7b77a705f42700c41f95 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.7.norm3.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.8.norm1.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.8.norm1.weight new file mode 100644 index 0000000000000000000000000000000000000000..6788a7e7d1f7f6e26f8f8453f673147aa2604bcd Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.8.norm1.weight differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.8.norm3.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.8.norm3.bias new file mode 100644 index 0000000000000000000000000000000000000000..4a67aeb7e0a1b236f7495884ee125dfd3e7ebf76 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.8.norm3.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.9.attn1.to_out.0.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.attn1.to_out.0.bias new file mode 100644 index 0000000000000000000000000000000000000000..42e1e6f3e68a2a59243f65867d52368800db58d3 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.attn1.to_out.0.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm1.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm1.bias new file mode 100644 index 0000000000000000000000000000000000000000..981c69ccd3af192ab782a6d33127693d9cee3ba5 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm1.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm1.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm1.weight new file mode 100644 index 0000000000000000000000000000000000000000..3e9740c907803862673ed15b5f53e5cc57c24615 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm1.weight differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm2.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm2.bias new file mode 100644 index 0000000000000000000000000000000000000000..e4e7d7c8e9911a72e99e85b52346847d87fabdf5 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm2.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm2.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm2.weight new file mode 100644 index 0000000000000000000000000000000000000000..4cd76d79c962cb7e1ef3ac3163a833a520a3910c Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm2.weight differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm3.bias b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm3.bias new file mode 100644 index 0000000000000000000000000000000000000000..16334f71c03fa6aa24cbe79225bd3a377c428d2a Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm3.bias differ diff --git a/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm3.weight b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm3.weight new file mode 100644 index 0000000000000000000000000000000000000000..08949c3ee51a6a84106821f49cc3c4776ba9a2d2 Binary files /dev/null and b/onnx/up_blocks.0/attentions.2.transformer_blocks.9.norm3.weight differ diff --git a/onnx/up_blocks.0/onnx__Add_6904 b/onnx/up_blocks.0/onnx__Add_6904 new file mode 100644 index 0000000000000000000000000000000000000000..4ee030fdfbbb1205c2bfc8119c54dc9fb52c7359 Binary files /dev/null and b/onnx/up_blocks.0/onnx__Add_6904 differ diff --git a/onnx/up_blocks.0/onnx__Add_6906 b/onnx/up_blocks.0/onnx__Add_6906 new file mode 100644 index 0000000000000000000000000000000000000000..3213149b01c7fb6bdbb257717a74ff4c32cbf5bf Binary files /dev/null and b/onnx/up_blocks.0/onnx__Add_6906 differ diff --git a/onnx/up_blocks.0/onnx__Add_6908 b/onnx/up_blocks.0/onnx__Add_6908 new file mode 100644 index 0000000000000000000000000000000000000000..2936ef35c70dd3aa99f802621eaeb52c4dcedf97 Binary files /dev/null and b/onnx/up_blocks.0/onnx__Add_6908 differ diff --git a/onnx/up_blocks.0/onnx__Add_7152 b/onnx/up_blocks.0/onnx__Add_7152 new file mode 100644 index 0000000000000000000000000000000000000000..e811b3efb6706f2fb4642574414d1716ab907458 Binary files /dev/null and b/onnx/up_blocks.0/onnx__Add_7152 differ diff --git a/onnx/up_blocks.0/onnx__Add_7154 b/onnx/up_blocks.0/onnx__Add_7154 new file mode 100644 index 0000000000000000000000000000000000000000..dd8f84e2b9b6e3cbd885b319ab6d7682b89206e9 Binary files /dev/null and b/onnx/up_blocks.0/onnx__Add_7154 differ diff --git a/onnx/up_blocks.0/onnx__Add_7400 b/onnx/up_blocks.0/onnx__Add_7400 new file mode 100644 index 0000000000000000000000000000000000000000..5c4c1425cc33c3124e2029c29ac77f5d3b135338 Binary files /dev/null and b/onnx/up_blocks.0/onnx__Add_7400 differ diff --git a/onnx/up_blocks.0/onnx__Add_7404 b/onnx/up_blocks.0/onnx__Add_7404 new file mode 100644 index 0000000000000000000000000000000000000000..628234a840b23f23dc98bd689754c2485b76cd93 Binary files /dev/null and b/onnx/up_blocks.0/onnx__Add_7404 differ diff --git a/onnx/up_blocks.0/onnx__Mul_6907 b/onnx/up_blocks.0/onnx__Mul_6907 new file mode 100644 index 0000000000000000000000000000000000000000..d3ffbe0dece541022bdee7eb493c22284a32c7d1 --- /dev/null +++ b/onnx/up_blocks.0/onnx__Mul_6907 @@ -0,0 +1,2 @@ +8@8o8p88888x838h8x888@888h888I8K8j8Y8R878\8R8H88=8[8I888R88888{888p88I988J8P8`88{88888h8J808b8o8:888O82888@858888r8!8j88[87a8[8*8R88W8H8!8Z8888R8P8b8B8Y88R8z88q88/8Q8,8888J88a8d8(9H8r8888h8R88q888j8?8"88H888r88h88x88\888b888H88888c88=8X8j96888K889898Q88j8j8T8h89888h8f8@8o808q8`8+88[8P88g8P88z8y888p8Q8Y8888o8\7:8y8o8Z8q888G8J887a8868)8u8y88o8898 8Q8888J8J828k8"8/838)8`8898Q8:888q8O8Q88[8A8898x87@8|8Q8b8b8@8C8 9a8{8@8W88b8G8J8k88888888J8a8x8)88@8m8Z887898858g8:8a8J8J8z88r8o8(9H888J888b898b8v888888b8888j8'8888J88g8Q88888a8Q8G88 8H88@8h8I88a8[8888?8@8T8q8X88x8H8X8888v898R88"8I8o88h8-8[888j8+8)8w888x88q8P888B8Q88887i8:8`888I828h8P8J8888h8r8x888"8388g8888x8h8P88`8X88r888k88b8a8:888s8I8z8y888H8k8P88888Z8888*8j8{89I8988r88Z88y88|89888s888a8888888k8888y8C8b8788p8y88C88C8888b8W88`8q88X88P88j8888 8X88*8@8[888888G888)8Q8 8888a8W87q888h88z88k88h88o8X8q868888X8G828q8)8D8P88 8y8888[89"988<88o88S8H8*8:88|8c8R88R8R8[88a89:88868k8R8*8L8o8x8H88x88b8I8888f8+88h868J8o8p87@8p88z808w8388X898:8Z8o8o888888@888H8P888p8D8x8R8/8q8j87o8[8h8@8z8898H8Z8X88@8W88y8w8a8a8878a88c8x886)88?888I98f88z8z8x888*8988888v88i8N88w8R8k8J8@88k8J8888k8@8x8Z8b8"88X8h88n7[8W8o87@8'888!8:88c8{8z8@8(9h8r8f888r8k8,8!838%8p8y8h88j8"8B8r8"8Z88o888:8z888868w8@88h8828u8v8W8 8@8:8X8R8x8a8988X8X88j8:88u8z888<8y8888o8888w8p8y8k8z8h8w8L8@8B8a8I88g88r8z8G88h88Y88(9J8o88v8 887a8X8:8b88e8^8q88D88X89808)88*8Q8o8[8U8898b88W88X88z88h8i8a88y8F8j88a88X8J8k88y9g888c8888r8z8|8x88888*888888o8m8P8888888889288a8Q8P8g8w8W8H8 8b88q8c888y8o88W878@8h88J8P8x8@8?88a88j888Q8w88a8%8)8y88y8<8R8x88C8a8889J8h8u888k888"888h8y8888q8J8888888Q85i88)8=888k8838w88v88Z88`8J8Q8X88"888G8%8R8I88/88h8Z88R8I8788j8 +8b8!8)8`88y8@8H8{8G8888+8@8h88888:8z8g788r8S8888q8867P8888b8L888h8J8888I8Z8I88h8878C88;8"8y8a8W8Z888a8p8P88868C8Y8x888898g88@888y8888888r88888:{8R888B888888j8k88L888b888G888c88c8[8`988988v8r888X88z888y88b88z88q88888888:8p8:8j8w88o88o8o8g8j8R8z888Q8@88j888!8b8p88x8b888P8a8R8x888y888h8898h88:8g8I88w888X8y8:8"8q88z88u8k868r888188[8G88"88B8@8p8:8a8v888x88p88G8788@88f8J88 88S8 \ No newline at end of file diff --git a/onnx/up_blocks.0/onnx__Mul_7155 b/onnx/up_blocks.0/onnx__Mul_7155 new file mode 100644 index 0000000000000000000000000000000000000000..819dae504c2f0e43434b17fd68c8ab3a2d65bf60 --- /dev/null +++ b/onnx/up_blocks.0/onnx__Mul_7155 @@ -0,0 +1,9 @@ +989/9839=9b9y9k9J98[9898_898:99#9C9b9<9y999 9 9 99$939|9998R9|99 99:99-988W9z8J9)939B99899D98B98L999969B998-9U99r9I9l9J99[9D99*9(9999*988C9l889r9889949889<999-9p9:9t9:9q98v99j99499 98989#9k98[99 98d9I998G9899d99+989 9/989(99Y9K99929988{9839989J989|8J9>9599"9888939*99^9999K98w9989[9998j989[98e98889e999 9D9>99w9U9h9=9>9:988Z9b988.9d99>9z999999:9889889Y9k988898"98878888888"98L988]898988888888888888W688U99=999498898 +989B9 98889l998b9"989B9 9988989~988*9989899994^9999%98)9889 9M99M98-9:988888988[9839888888988978U9%9J98898888898%9/988U8-9888m88!988 +9w988,9888 +9j99S9788889888989=9398@98888d989.988#9898b988#98888/9998 98)9889884988-9m9+9889~9\98L98898888L9N9/98K99}9#96988888888.9C9788888b98898B98988U9.988889#98988998988<9898998888U9998:929889B998888988j9.99}9[999-99V9i9I9U9~9^98L9<9U9f99E99V9969;9999:998E9:9~9E9994:969B9>99E99+9.9@988<9+9+99988+98889#99+9*9999^9995999U9s9099 9 +99J98]88(989d99:98888}9989 98/99y9U9V9#9)9[9$92:9>98V98K998E9U9995989V999z99E9994998^9+99:9t9V99999d9]9s9<989#99f9l9988 9 99888888K9898i88788$89 9-98888888m988889>9899939*93989398+99889 988988}99:99{9+989898999889j9 98889888k999888M9 989888889 98898988999)9e98,9&9898z896999 9/99889 9E989988898*99899888998999/9k99z9j99"9G9#9+988699+9398B9 9999]98L988869839886983998U98/99r9E98"9888z8898899888^9998988 98+9498998+9 98J9'99>98889:9J98888 988"9B9+988 9s98:9k98h98 98i99>9b98 +9899/999m999J9599888<98U988 99[98989 :;988&9j9G9 99 9:988.9898i8&9 99889 99699986988J989D998 9D98889U999899899,9N9w99988#98988889*9/9U99d99r9398898 9s98$9 +98-9{989899$9/9 988988 98999[9Y988:98898899:9898U98d998+98698H998888&98889 99-998-9 +99|99:999Z9889898r9398 988k99/998+98898m9 9Z99 +9k99m989898l98H98N98898 9U999b9%98D9K9[9 9b9>9499U99 98999B99889889*989,989*98G98B98K98Y888c88888 88868888889 \ No newline at end of file diff --git a/onnx/up_blocks.0/resnets.0.conv1.bias b/onnx/up_blocks.0/resnets.0.conv1.bias new file mode 100644 index 0000000000000000000000000000000000000000..7b6d7efb42e068473a4d60028f3ce43c5aaefab2 Binary files /dev/null and b/onnx/up_blocks.0/resnets.0.conv1.bias differ diff --git a/onnx/up_blocks.0/resnets.0.conv2.bias b/onnx/up_blocks.0/resnets.0.conv2.bias new file mode 100644 index 0000000000000000000000000000000000000000..689a2910b96d69f4a3b62c52e53e8b25cbf82b5e Binary files /dev/null and b/onnx/up_blocks.0/resnets.0.conv2.bias differ diff --git a/onnx/up_blocks.0/resnets.0.time_emb_proj.bias b/onnx/up_blocks.0/resnets.0.time_emb_proj.bias new file mode 100644 index 0000000000000000000000000000000000000000..ae25c67d95fa9a236c17a89393ece17fd6e47243 Binary files /dev/null and b/onnx/up_blocks.0/resnets.0.time_emb_proj.bias differ diff --git a/onnx/up_blocks.0/resnets.1.conv1.bias b/onnx/up_blocks.0/resnets.1.conv1.bias new file mode 100644 index 0000000000000000000000000000000000000000..04bcf6486457705def357b7d861ac7a797e4329e Binary files /dev/null and b/onnx/up_blocks.0/resnets.1.conv1.bias differ diff --git a/onnx/up_blocks.0/resnets.1.time_emb_proj.bias b/onnx/up_blocks.0/resnets.1.time_emb_proj.bias new file mode 100644 index 0000000000000000000000000000000000000000..c6871eba251cb3cfed972bf2be73911383eb94c5 Binary files /dev/null and b/onnx/up_blocks.0/resnets.1.time_emb_proj.bias differ diff --git a/onnx/up_blocks.0/resnets.2.conv2.bias b/onnx/up_blocks.0/resnets.2.conv2.bias new file mode 100644 index 0000000000000000000000000000000000000000..0093987efd5c65b1fcbdb58df51017adee3d6e8f Binary files /dev/null and b/onnx/up_blocks.0/resnets.2.conv2.bias differ diff --git a/onnx/up_blocks.0/resnets.2.conv_shortcut.bias b/onnx/up_blocks.0/resnets.2.conv_shortcut.bias new file mode 100644 index 0000000000000000000000000000000000000000..38d115bc6354443376ebec603a67b060338ea4c7 Binary files /dev/null and b/onnx/up_blocks.0/resnets.2.conv_shortcut.bias differ diff --git a/onnx/up_blocks.0/resnets.2.time_emb_proj.bias b/onnx/up_blocks.0/resnets.2.time_emb_proj.bias new file mode 100644 index 0000000000000000000000000000000000000000..1b1643c8ec1a850b5626b87250d9ca553033852f Binary files /dev/null and b/onnx/up_blocks.0/resnets.2.time_emb_proj.bias differ diff --git a/onnx/up_blocks.0/upsamplers.0.conv.bias b/onnx/up_blocks.0/upsamplers.0.conv.bias new file mode 100644 index 0000000000000000000000000000000000000000..a11abe507831e52cb30788627683439c0013cdf5 Binary files /dev/null and b/onnx/up_blocks.0/upsamplers.0.conv.bias differ diff --git a/src/assets/sdxl_cache.png b/src/assets/sdxl_cache.png new file mode 100644 index 0000000000000000000000000000000000000000..cc40b7f0331215eff229d3c5f8eb16607f587bdf Binary files /dev/null and b/src/assets/sdxl_cache.png differ diff --git a/src/cache_diffusion/cachify.py b/src/cache_diffusion/cachify.py new file mode 100644 index 0000000000000000000000000000000000000000..df5b3efbb222c69984c78f75ea84a6a3fb04dd16 --- /dev/null +++ b/src/cache_diffusion/cachify.py @@ -0,0 +1,144 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import fnmatch +from contextlib import contextmanager + +from diffusers.models.attention import BasicTransformerBlock, JointTransformerBlock +from diffusers.models.transformers.pixart_transformer_2d import PixArtTransformer2DModel +from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel +from diffusers.models.unets.unet_2d_blocks import ( + CrossAttnDownBlock2D, + CrossAttnUpBlock2D, + DownBlock2D, + UNetMidBlock2DCrossAttn, + UpBlock2D, +) +from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel +from diffusers.models.unets.unet_3d_blocks import ( + CrossAttnDownBlockSpatioTemporal, + CrossAttnUpBlockSpatioTemporal, + DownBlockSpatioTemporal, + UNetMidBlockSpatioTemporal, + UpBlockSpatioTemporal, +) +from diffusers.models.unets.unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel + +from .module import CachedModule +from .utils import replace_module + +CACHED_PIPE = { + UNet2DConditionModel: ( + DownBlock2D, + CrossAttnDownBlock2D, + UNetMidBlock2DCrossAttn, + CrossAttnUpBlock2D, + UpBlock2D, + ), + PixArtTransformer2DModel: (BasicTransformerBlock), + UNetSpatioTemporalConditionModel: ( + CrossAttnDownBlockSpatioTemporal, + DownBlockSpatioTemporal, + UpBlockSpatioTemporal, + CrossAttnUpBlockSpatioTemporal, + UNetMidBlockSpatioTemporal, + ), + SD3Transformer2DModel: (JointTransformerBlock), +} + + +def _apply_to_modules(model, action, modules=None, config_list=None): + if hasattr(model, "use_trt_infer") and model.use_trt_infer: + for key, module in model.engines.items(): + if isinstance(module, CachedModule): + action(module) + elif config_list: + for config in config_list: + if _pass(key, config["wildcard_or_filter_func"]): + model.engines[key] = CachedModule(module, config["select_cache_step_func"]) + else: + for name, module in model.named_modules(): + if isinstance(module, CachedModule): + action(module) + elif modules and config_list: + for config in config_list: + if _pass(name, config["wildcard_or_filter_func"]) and isinstance( + module, modules + ): + replace_module( + model, + name, + CachedModule(module, config["select_cache_step_func"]), + ) + + +def cachify(model, config_list, modules): + def cache_action(module): + pass # No action needed, caching is handled in the loop itself + + _apply_to_modules(model, cache_action, modules, config_list) + + +def disable(pipe): + model = get_model(pipe) + _apply_to_modules(model, lambda module: module.disable_cache()) + + +def enable(pipe): + model = get_model(pipe) + _apply_to_modules(model, lambda module: module.enable_cache()) + + +def reset_status(pipe): + model = get_model(pipe) + _apply_to_modules(model, lambda module: setattr(module, "cur_step", 0)) + + +def _pass(name, wildcard_or_filter_func): + if isinstance(wildcard_or_filter_func, str): + return fnmatch.fnmatch(name, wildcard_or_filter_func) + elif callable(wildcard_or_filter_func): + return wildcard_or_filter_func(name) + else: + raise NotImplementedError(f"Unsupported type {type(wildcard_or_filter_func)}") + + +def get_model(pipe): + if hasattr(pipe, "unet"): + return pipe.unet + elif hasattr(pipe, "transformer"): + return pipe.transformer + else: + raise KeyError + + +@contextmanager +def infer(pipe): + try: + yield pipe + finally: + reset_status(pipe) + + +def prepare(pipe, config_list): + model = get_model(pipe) + assert model.__class__ in CACHED_PIPE.keys(), f"{model.__class__} is not supported!" + cachify(model, config_list, CACHED_PIPE[model.__class__]) diff --git a/src/cache_diffusion/module.py b/src/cache_diffusion/module.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ed434c30fd1ab1feeeafc6addeb73dd04655c4 --- /dev/null +++ b/src/cache_diffusion/module.py @@ -0,0 +1,55 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from torch import nn + + +class CachedModule(nn.Module): + def __init__(self, block, select_cache_step_func) -> None: + super().__init__() + self.block = block + self.select_cache_step_func = select_cache_step_func + self.cur_step = 0 + self.cached_results = None + self.enabled = True + + def __getattr__(self, name): + try: + return super().__getattr__(name) + except AttributeError: + return getattr(self.block, name) + + def if_cache(self): + return self.select_cache_step_func(self.cur_step) and self.enabled + + def enable_cache(self): + self.enabled = True + + def disable_cache(self): + self.enabled = False + self.cur_step = 0 + + def forward(self, *args, **kwargs): + if not self.if_cache(): + self.cached_results = self.block(*args, **kwargs) + if self.enabled: + self.cur_step += 1 + return self.cached_results diff --git a/src/cache_diffusion/utils.py b/src/cache_diffusion/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d8b2c27726367f7961e55f215ca6f064353dc8bd --- /dev/null +++ b/src/cache_diffusion/utils.py @@ -0,0 +1,61 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import re + +SDXL_DEFAULT_CONFIG = [ + { + "wildcard_or_filter_func": lambda name: "up_blocks.2" not in name, + "select_cache_step_func": lambda step: (step % 2) != 0, + } +] + +PIXART_DEFAULT_CONFIG = [ + { + "wildcard_or_filter_func": lambda name: not re.search( + r"transformer_blocks\.(2[1-7])\.", name + ), + "select_cache_step_func": lambda step: (step % 3) != 0, + } +] + +SVD_DEFAULT_CONFIG = [ + { + "wildcard_or_filter_func": lambda name: "up_blocks.3" not in name, + "select_cache_step_func": lambda step: (step % 2) != 0, + } +] + +SD3_DEFAULT_CONFIG = [ + { + "wildcard_or_filter_func": lambda name: re.search( + r"^((?!transformer_blocks\.(1[6-9]|2[0-3])).)*$", name + ), + "select_cache_step_func": lambda step: (step % 2) != 0, + } +] + + +def replace_module(parent, name_path, new_module): + path_parts = name_path.split(".") + for part in path_parts[:-1]: + parent = getattr(parent, part) + setattr(parent, path_parts[-1], new_module) diff --git a/src/loss.py b/src/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..24a18c6efc382f39f458948bdd16d2c7ea972713 --- /dev/null +++ b/src/loss.py @@ -0,0 +1,45 @@ +_A=None +import torch +from tqdm import tqdm +class LossSchedulerModel(torch.nn.Module): + def __init__(A,wx,we):super(LossSchedulerModel,A).__init__();assert len(wx.shape)==1 and len(we.shape)==2;B=wx.shape[0];assert B==we.shape[0]and B==we.shape[1];A.register_parameter('wx',torch.nn.Parameter(wx));A.register_parameter('we',torch.nn.Parameter(we)) + def forward(A,t,xT,e_prev): + B=e_prev;assert t-len(B)+1==0;C=xT*A.wx[t] + for(D,E)in zip(B,A.we[t]):C+=D*E + return C.to(xT.dtype) +class LossScheduler: + def __init__(A,timesteps,model):A.timesteps=timesteps;A.model=model;A.init_noise_sigma=1.;A.order=1 + @staticmethod + def load(path):A,B,C=torch.load(path,map_location='cpu');D=LossSchedulerModel(B,C);return LossScheduler(A,D) + def save(A,path):B,C,D=A.timesteps,A.model.wx,A.model.we;torch.save((B,C,D),path) + def set_timesteps(A,num_inference_steps,device='cuda'):B=device;A.xT=_A;A.e_prev=[];A.t_prev=-1;A.model=A.model.to(B);A.timesteps=A.timesteps.to(B) + def scale_model_input(A,sample,*B,**C):return sample + @torch.no_grad() + def step(self,model_output,timestep,sample,*D,**E): + A=self;B=A.timesteps.tolist().index(timestep);assert A.t_prev==-1 or B==A.t_prev+1 + if A.t_prev==-1:A.xT=sample + A.e_prev.append(model_output);C=A.model(B,A.xT,A.e_prev) + if B+1==len(A.timesteps):A.xT=_A;A.e_prev=[];A.t_prev=-1 + else:A.t_prev=B + return C, +class SchedulerWrapper: + def __init__(A,scheduler,loss_params_path='loss_params.pth'):A.scheduler=scheduler;A.catch_x,A.catch_e,A.catch_x_={},{},{};A.loss_scheduler=_A;A.loss_params_path=loss_params_path + def set_timesteps(A,num_inference_steps,**C): + D=num_inference_steps + if A.loss_scheduler is _A:B=A.scheduler.set_timesteps(D,**C);A.timesteps=A.scheduler.timesteps;A.init_noise_sigma=A.scheduler.init_noise_sigma;A.order=A.scheduler.order;return B + else:B=A.loss_scheduler.set_timesteps(D,**C);A.timesteps=A.loss_scheduler.timesteps;A.init_noise_sigma=A.scheduler.init_noise_sigma;A.order=A.scheduler.order;return B + def step(B,model_output,timestep,sample,**F): + D=sample;E=model_output;A=timestep + if B.loss_scheduler is _A: + C=B.scheduler.step(E,A,D,**F);A=A.tolist() + if A not in B.catch_x:B.catch_x[A]=[];B.catch_e[A]=[];B.catch_x_[A]=[] + B.catch_x[A].append(D.clone().detach().cpu());B.catch_e[A].append(E.clone().detach().cpu());B.catch_x_[A].append(C[0].clone().detach().cpu());return C + else:C=B.loss_scheduler.step(E,A,D,**F);return C + def scale_model_input(A,sample,timestep):return sample + def add_noise(A,original_samples,noise,timesteps):B=A.scheduler.add_noise(original_samples,noise,timesteps);return B + def get_path(C): + A=sorted([A for A in C.catch_x],reverse=True);B,D=[],[] + for E in A:F=torch.cat(C.catch_x[E],dim=0);B.append(F);G=torch.cat(C.catch_e[E],dim=0);D.append(G) + H=A[-1];I=torch.cat(C.catch_x_[H],dim=0);B.append(I);A=torch.tensor(A,dtype=torch.int32);B=torch.stack(B);D=torch.stack(D);return A,B,D + def load_loss_params(A):B,C,D=torch.load(A.loss_params_path,map_location='cpu');A.loss_model=LossSchedulerModel(C,D);A.loss_scheduler=LossScheduler(B,A.loss_model) + def prepare_loss(A,num_accelerate_steps=15):A.load_loss_params() diff --git a/src/pipe/config.py b/src/pipe/config.py new file mode 100644 index 0000000000000000000000000000000000000000..3be8b352611cccbf36fa426e5fafb33533d41edf --- /dev/null +++ b/src/pipe/config.py @@ -0,0 +1,162 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel +from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel + +sd3_common_transformer_block_config = { + "dummy_input": { + "hidden_states": (2, 4096, 1536), + "encoder_hidden_states": (2, 333, 1536), + "temb": (2, 1536), + }, + "output_names": ["encoder_hidden_states_out", "hidden_states_out"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "encoder_hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + }, +} + +ONNX_CONFIG = { + UNet2DConditionModel: { + "down_blocks.0": { + "dummy_input": { + "hidden_states": (2, 320, 128, 128), + "temb": (2, 1280), + }, + "output_names": ["sample", "res_samples_0", "res_samples_1", "res_samples_2"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + }, + }, + "down_blocks.1": { + "dummy_input": { + "hidden_states": (2, 320, 64, 64), + "temb": (2, 1280), + "encoder_hidden_states": (2, 77, 2048), + }, + "output_names": ["sample", "res_samples_0", "res_samples_1", "res_samples_2"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + "encoder_hidden_states": {0: "batch_size"}, + }, + }, + "down_blocks.2": { + "dummy_input": { + "hidden_states": (2, 640, 32, 32), + "temb": (2, 1280), + "encoder_hidden_states": (2, 77, 2048), + }, + "output_names": ["sample", "res_samples_0", "res_samples_1"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + "encoder_hidden_states": {0: "batch_size"}, + }, + }, + "mid_block": { + "dummy_input": { + "hidden_states": (2, 1280, 32, 32), + "temb": (2, 1280), + "encoder_hidden_states": (2, 77, 2048), + }, + "output_names": ["sample"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + "encoder_hidden_states": {0: "batch_size"}, + }, + }, + "up_blocks.0": { + "dummy_input": { + "hidden_states": (2, 1280, 32, 32), + "res_hidden_states_0": (2, 640, 32, 32), + "res_hidden_states_1": (2, 1280, 32, 32), + "res_hidden_states_2": (2, 1280, 32, 32), + "temb": (2, 1280), + "encoder_hidden_states": (2, 77, 2048), + }, + "output_names": ["sample"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + "encoder_hidden_states": {0: "batch_size"}, + "res_hidden_states_0": {0: "batch_size"}, + "res_hidden_states_1": {0: "batch_size"}, + "res_hidden_states_2": {0: "batch_size"}, + }, + }, + "up_blocks.1": { + "dummy_input": { + "hidden_states": (2, 1280, 64, 64), + "res_hidden_states_0": (2, 320, 64, 64), + "res_hidden_states_1": (2, 640, 64, 64), + "res_hidden_states_2": (2, 640, 64, 64), + "temb": (2, 1280), + "encoder_hidden_states": (2, 77, 2048), + }, + "output_names": ["sample"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + "encoder_hidden_states": {0: "batch_size"}, + "res_hidden_states_0": {0: "batch_size"}, + "res_hidden_states_1": {0: "batch_size"}, + "res_hidden_states_2": {0: "batch_size"}, + }, + }, + "up_blocks.2": { + "dummy_input": { + "hidden_states": (2, 640, 128, 128), + "res_hidden_states_0": (2, 320, 128, 128), + "res_hidden_states_1": (2, 320, 128, 128), + "res_hidden_states_2": (2, 320, 128, 128), + "temb": (2, 1280), + }, + "output_names": ["sample"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + "res_hidden_states_0": {0: "batch_size"}, + "res_hidden_states_1": {0: "batch_size"}, + "res_hidden_states_2": {0: "batch_size"}, + }, + }, + }, + SD3Transformer2DModel: { + **{f"transformer_blocks.{i}": sd3_common_transformer_block_config for i in range(23)}, + "transformer_blocks.23": { + "dummy_input": { + "hidden_states": (2, 4096, 1536), + "encoder_hidden_states": (2, 333, 1536), + "temb": (2, 1536), + }, + "output_names": ["hidden_states_out"], + "dynamic_axes": { + "hidden_states": {0: "batch_size"}, + "encoder_hidden_states": {0: "batch_size"}, + "temb": {0: "steps"}, + }, + }, + }, +} diff --git a/src/pipe/deploy.py b/src/pipe/deploy.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d0b667dafa3e5a807beeb391efb6f7657dc616 --- /dev/null +++ b/src/pipe/deploy.py @@ -0,0 +1,210 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import types +from pathlib import Path + +import tensorrt as trt +import torch +from cache_diffusion.cachify import CACHED_PIPE, get_model +from cuda import cudart +from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel +from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel +from pipe.config import ONNX_CONFIG +from pipe.models.sd3 import sd3_forward +from pipe.models.sdxl import ( + cachecrossattnupblock2d_forward, + cacheunet_forward, + cacheupblock2d_forward, +) +from polygraphy.backend.trt import ( + CreateConfig, + Profile, + engine_from_network, + network_from_onnx_path, + save_engine, +) +from torch.onnx import export as onnx_export + +from .utils import Engine + + +def replace_new_forward(backbone): + if backbone.__class__ == UNet2DConditionModel: + backbone.forward = types.MethodType(cacheunet_forward, backbone) + for upsample_block in backbone.up_blocks: + if ( + hasattr(upsample_block, "has_cross_attention") + and upsample_block.has_cross_attention + ): + upsample_block.forward = types.MethodType( + cachecrossattnupblock2d_forward, upsample_block + ) + else: + upsample_block.forward = types.MethodType(cacheupblock2d_forward, upsample_block) + elif backbone.__class__ == SD3Transformer2DModel: + backbone.forward = types.MethodType(sd3_forward, backbone) + + +def get_input_info(dummy_dict, info: str = None, batch_size: int = 1): + return_val = [] if info == "profile_shapes" or info == "input_names" else {} + + def collect_leaf_keys(d): + for key, value in d.items(): + if isinstance(value, dict): + collect_leaf_keys(value) + else: + value = (value[0] * batch_size,) + value[1:] + if info == "profile_shapes": + return_val.append((key, value)) # type: ignore + elif info == "profile_shapes_dict": + return_val[key] = value # type: ignore + elif info == "dummy_input": + return_val[key] = torch.ones(value).half().cuda() # type: ignore + elif info == "input_names": + return_val.append(key) # type: ignore + + collect_leaf_keys(dummy_dict) + return return_val + + +def complie2trt(cls, onnx_path: Path, engine_path: Path, batch_size: int = 1): + subdirs = [f for f in onnx_path.iterdir() if f.is_dir()] + for subdir in subdirs: + if subdir.name not in ONNX_CONFIG[cls].keys(): + continue + model_path = subdir / "model.onnx" + plan_path = engine_path / f"{subdir.name}.plan" + if not plan_path.exists(): + print(f"Building {str(model_path)}") + build_profile = Profile() + profile_shapes = get_input_info( + ONNX_CONFIG[cls][subdir.name]["dummy_input"], "profile_shapes", batch_size + ) + for input_name, input_shape in profile_shapes: + min_input_shape = (2,) + input_shape[1:] + build_profile.add(input_name, min_input_shape, input_shape, input_shape) + block_network = network_from_onnx_path( + str(model_path), flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM], strongly_typed=True + ) + build_config = CreateConfig( + builder_optimization_level=6, + tf32=True, + #bf16=True, + profiles=[build_profile], + ) + engine = engine_from_network( + block_network, + config=build_config, + ) + save_engine(engine, path=plan_path) + else: + print(f"{str(model_path)} already exists!") + + +def get_total_device_memory(backbone): + max_device_memory = 0 + for _, engine in backbone.engines.items(): + max_device_memory = max(max_device_memory, engine.engine.device_memory_size) + return max_device_memory + + +def load_engines(backbone, engine_path: Path, batch_size: int = 1): + backbone.engines = {} + for f in engine_path.iterdir(): + if f.is_file(): + eng = Engine() + eng.load(str(f)) + backbone.engines[f"{f.stem}"] = eng + _, shared_device_memory = cudart.cudaMalloc(get_total_device_memory(backbone)) + for engine in backbone.engines.values(): + engine.activate(shared_device_memory) + backbone.cuda_stream = cudart.cudaStreamCreate()[1] + for block_name in backbone.engines.keys(): + backbone.engines[block_name].allocate_buffers( + shape_dict=get_input_info( + ONNX_CONFIG[backbone.__class__][block_name]["dummy_input"], + "profile_shapes_dict", + batch_size, + ), + device=backbone.device, + batch_size=batch_size, + ) + # TODO: Free and clean up the origin pytorch cuda memory + + +def export_onnx(backbone, onnx_path: Path): + for name, module in backbone.named_modules(): + if isinstance(module, CACHED_PIPE[backbone.__class__]): + _onnx_dir = onnx_path.joinpath(f"{name}") + _onnx_file = _onnx_dir.joinpath("model.onnx") + if not _onnx_file.exists(): + _onnx_dir.mkdir(parents=True, exist_ok=True) + dummy_input = get_input_info( + ONNX_CONFIG[backbone.__class__][f"{name}"]["dummy_input"], "dummy_input" + ) + input_names = get_input_info( + ONNX_CONFIG[backbone.__class__][f"{name}"]["dummy_input"], "input_names" + ) + output_names = ONNX_CONFIG[backbone.__class__][f"{name}"]["output_names"] + onnx_export( + module, + args=dummy_input, + f=_onnx_file.as_posix(), + input_names=input_names, + output_names=output_names, + dynamic_axes=ONNX_CONFIG[backbone.__class__][f"{name}"]["dynamic_axes"], + do_constant_folding=True, + opset_version=17, + ) + else: + print(f"{str(_onnx_file)} alread exists!") + + +def warm_up(backbone, batch_size: int = 1): + print("Warming-up TensorRT engines...") + for name, engine in backbone.engines.items(): + dummy_input = get_input_info( + ONNX_CONFIG[backbone.__class__][name]["dummy_input"], "dummy_input", batch_size + ) + _ = engine(dummy_input, backbone.cuda_stream) + + +def teardown(pipe): + backbone = get_model(pipe) + for engine in backbone.engines.values(): + del engine + + cudart.cudaStreamDestroy(backbone.cuda_stream) + del backbone.cuda_stream + + +def compile(pipe, onnx_path: Path, engine_path: Path, batch_size: int = 1): + backbone = get_model(pipe) + onnx_path.mkdir(parents=True, exist_ok=True) + engine_path.mkdir(parents=True, exist_ok=True) + + replace_new_forward(backbone) + export_onnx(backbone, onnx_path) + complie2trt(backbone.__class__, onnx_path, engine_path, batch_size) + load_engines(backbone, engine_path, batch_size) + warm_up(backbone, batch_size) + backbone.use_trt_infer = True diff --git a/src/pipe/models/sd3.py b/src/pipe/models/sd3.py new file mode 100644 index 0000000000000000000000000000000000000000..33fb5f940b2382c099107af1a3e460c633ac3acd --- /dev/null +++ b/src/pipe/models/sd3.py @@ -0,0 +1,159 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from typing import Any, Dict, List, Optional, Union + +import torch +from diffusers.models.modeling_outputs import Transformer2DModelOutput +from diffusers.utils import ( + USE_PEFT_BACKEND, + is_torch_version, + scale_lora_layers, + unscale_lora_layers, +) + + +def sd3_forward( + self, + hidden_states: torch.FloatTensor, + encoder_hidden_states: torch.FloatTensor = None, + pooled_projections: torch.FloatTensor = None, + timestep: torch.LongTensor = None, + block_controlnet_hidden_states: List = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + return_dict: bool = True, +) -> Union[torch.FloatTensor, Transformer2DModelOutput]: + """ + The [`SD3Transformer2DModel`] forward method. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): + Input `hidden_states`. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): + Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. + pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected + from the embeddings of input conditions. + timestep ( `torch.LongTensor`): + Used to indicate denoising step. + block_controlnet_hidden_states: (`list` of `torch.Tensor`): + A list of tensors that if specified are added to the residuals of transformer blocks. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain + tuple. + + Returns: + If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a + `tuple` where the first element is the sample tensor. + """ + if joint_attention_kwargs is not None: + joint_attention_kwargs = joint_attention_kwargs.copy() + lora_scale = joint_attention_kwargs.pop("scale", 1.0) + else: + lora_scale = 1.0 + + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + + height, width = hidden_states.shape[-2:] + + hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too. + temb = self.time_text_embed(timestep, pooled_projections) + encoder_hidden_states = self.context_embedder(encoder_hidden_states) + + for index_block, block in enumerate(self.transformer_blocks): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = ( + {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + ) + encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), + hidden_states, + encoder_hidden_states, + temb, + **ckpt_kwargs, + ) + + else: + if hasattr(self, "use_trt_infer") and self.use_trt_infer: + feed_dict = { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "temb": temb, + } + _results = self.engines[f"transformer_blocks.{index_block}"]( + feed_dict, self.cuda_stream + ) + if index_block != 23: + encoder_hidden_states = _results["encoder_hidden_states_out"] + hidden_states = _results["hidden_states_out"] + else: + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + temb=temb, + ) + + # controlnet residual + if block_controlnet_hidden_states is not None and block.context_pre_only is False: + interval_control = len(self.transformer_blocks) // len(block_controlnet_hidden_states) + hidden_states = ( + hidden_states + block_controlnet_hidden_states[index_block // interval_control] + ) + + hidden_states = self.norm_out(hidden_states, temb) + hidden_states = self.proj_out(hidden_states) + + # unpatchify + patch_size = self.config.patch_size + height = height // patch_size + width = width // patch_size + + hidden_states = hidden_states.reshape( + shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels) + ) + hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) + output = hidden_states.reshape( + shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size) + ) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (output,) + + return Transformer2DModelOutput(sample=output) diff --git a/src/pipe/models/sdxl.py b/src/pipe/models/sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..92aaece6ab8a3313be41825c48d4c2109f3d1df7 --- /dev/null +++ b/src/pipe/models/sdxl.py @@ -0,0 +1,275 @@ +# Adapted from +# https://github.com/huggingface/diffusers/blob/73acebb8cfbd1d2954cabe1af4185f9994e61917/src/diffusers/models/unets/unet_2d_condition.py#L1039-L1312 +# https://github.com/huggingface/diffusers/blob/73acebb8cfbd1d2954cabe1af4185f9994e61917/src/diffusers/models/unets/unet_2d_blocks.py#L2482-L2564 +# https://github.com/huggingface/diffusers/blob/73acebb8cfbd1d2954cabe1af4185f9994e61917/src/diffusers/models/unets/unet_2d_blocks.py#L2617-L2679 + +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Not a contribution +# Changes made by NVIDIA CORPORATION & AFFILIATES or otherwise documented as +# NVIDIA-proprietary are not a contribution and subject to the following terms and conditions: +# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +from typing import Any, Dict, Optional, Tuple, Union + +import torch +from diffusers.models.unets.unet_2d_condition import UNet2DConditionOutput + + +def cachecrossattnupblock2d_forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_0: torch.FloatTensor, + res_hidden_states_1: torch.FloatTensor, + res_hidden_states_2: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, +) -> torch.FloatTensor: + res_hidden_states_tuple = (res_hidden_states_0, res_hidden_states_1, res_hidden_states_2) + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +def cacheupblock2d_forward( + self, + hidden_states: torch.FloatTensor, + res_hidden_states_0: torch.FloatTensor, + res_hidden_states_1: torch.FloatTensor, + res_hidden_states_2: torch.FloatTensor, + temb: Optional[torch.FloatTensor] = None, + upsample_size: Optional[int] = None, +) -> torch.FloatTensor: + res_hidden_states_tuple = (res_hidden_states_0, res_hidden_states_1, res_hidden_states_2) + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + hidden_states = resnet(hidden_states, temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +def cacheunet_forward( + self, + sample: torch.FloatTensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + mid_block_additional_residual: Optional[torch.Tensor] = None, + down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, +) -> Union[UNet2DConditionOutput, Tuple]: + # 1. time + t_emb = self.get_time_embed(sample=sample, timestep=timestep) + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + aug_emb = self.get_aug_embed( + emb=emb, + encoder_hidden_states=encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + ) + + emb = emb + aug_emb if aug_emb is not None else emb + + encoder_hidden_states = self.process_encoder_hidden_states( + encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs + ) + + # 2. pre-process + sample = self.conv_in(sample) + + if hasattr(self, "_export_precess_onnx") and self._export_precess_onnx: + return ( + sample, + encoder_hidden_states, + emb, + ) + + down_block_res_samples = (sample,) + for i, downsample_block in enumerate(self.down_blocks): + if ( + hasattr(downsample_block, "has_cross_attention") + and downsample_block.has_cross_attention + ): + if hasattr(self, "use_trt_infer") and self.use_trt_infer: + feed_dict = { + "hidden_states": sample, + "temb": emb, + "encoder_hidden_states": encoder_hidden_states, + } + down_results = self.engines[f"down_blocks.{i}"](feed_dict, self.cuda_stream) + sample = down_results["sample"] + res_samples_0 = down_results["res_samples_0"] + res_samples_1 = down_results["res_samples_1"] + if "res_samples_2" in down_results.keys(): + res_samples_2 = down_results["res_samples_2"] + else: + # For t2i-adapter CrossAttnDownBlock2D + additional_residuals = {} + + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + **additional_residuals, + ) + else: + if hasattr(self, "use_trt_infer") and self.use_trt_infer: + feed_dict = {"hidden_states": sample, "temb": emb} + down_results = self.engines[f"down_blocks.{i}"](feed_dict, self.cuda_stream) + sample = down_results["sample"] + res_samples_0 = down_results["res_samples_0"] + res_samples_1 = down_results["res_samples_1"] + if "res_samples_2" in down_results.keys(): + res_samples_2 = down_results["res_samples_2"] + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + if hasattr(self, "use_trt_infer") and self.use_trt_infer: + down_block_res_samples += ( + res_samples_0, + res_samples_1, + ) + if "res_samples_2" in down_results.keys(): + down_block_res_samples += (res_samples_2,) + else: + down_block_res_samples += res_samples + + if hasattr(self, "use_trt_infer") and self.use_trt_infer: + feed_dict = { + "hidden_states": sample, + "temb": emb, + "encoder_hidden_states": encoder_hidden_states, + } + mid_results = self.engines["mid_block"](feed_dict, self.cuda_stream) + sample = mid_results["sample"] + else: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + if hasattr(self, "use_trt_infer") and self.use_trt_infer: + feed_dict = { + "hidden_states": sample, + "res_hidden_states_0": res_samples[0], + "res_hidden_states_1": res_samples[1], + "res_hidden_states_2": res_samples[2], + "temb": emb, + "encoder_hidden_states": encoder_hidden_states, + } + up_results = self.engines[f"up_blocks.{i}"](feed_dict, self.cuda_stream) + sample = up_results["sample"] + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_0=res_samples[0], + res_hidden_states_1=res_samples[1], + res_hidden_states_2=res_samples[2], + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + ) + else: + if hasattr(self, "use_trt_infer") and self.use_trt_infer: + feed_dict = { + "hidden_states": sample, + "res_hidden_states_0": res_samples[0], + "res_hidden_states_1": res_samples[1], + "res_hidden_states_2": res_samples[2], + "temb": emb, + } + up_results = self.engines[f"up_blocks.{i}"](feed_dict, self.cuda_stream) + sample = up_results["sample"] + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_0=res_samples[0], + res_hidden_states_1=res_samples[1], + res_hidden_states_2=res_samples[2], + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) diff --git a/src/pipe/utils.py b/src/pipe/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..340f2ea58f10e9bf11fa214ce8a55b589b5c0b70 --- /dev/null +++ b/src/pipe/utils.py @@ -0,0 +1,129 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from collections import OrderedDict + +import numpy as np +import tensorrt as trt +import torch +from cuda import cudart +from polygraphy.backend.common import bytes_from_path +from polygraphy.backend.trt import engine_from_bytes + +numpy_to_torch_dtype_dict = { + np.uint8: torch.uint8, + np.int8: torch.int8, + np.int16: torch.int16, + np.int32: torch.int32, + np.int64: torch.int64, + np.float16: torch.float16, + np.float32: torch.float32, + np.float64: torch.float64, + np.complex64: torch.complex64, + np.complex128: torch.complex128, +} + + +class Engine: + def __init__( + self, + ): + self.engine = None + self.context = None + self.buffers = OrderedDict() + self.tensors = OrderedDict() + self.cuda_graph_instance = None # cuda graph + self.has_cross_attention = False + + def __del__(self): + del self.engine + del self.context + del self.buffers + del self.tensors + + def load(self, engine_path): + self.engine = engine_from_bytes(bytes_from_path(engine_path)) + + def activate(self, reuse_device_memory=None): + if reuse_device_memory: + self.context = self.engine.create_execution_context_without_device_memory() # type: ignore + self.context.device_memory = reuse_device_memory + else: + self.context = self.engine.create_execution_context() # type: ignore + + def allocate_buffers(self, shape_dict=None, device="cuda", batch_size=1): + for binding in range(self.engine.num_io_tensors): # type: ignore + name = self.engine.get_tensor_name(binding) # type: ignore + if shape_dict and name in shape_dict: + shape = shape_dict[name] + else: + shape = self.engine.get_tensor_shape(name) # type: ignore + shape = (batch_size * 2,) + shape[1:] + dtype = trt.nptype(self.engine.get_tensor_dtype(name)) # type: ignore + if self.engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT: # type: ignore + self.context.set_input_shape(name, shape) # type: ignore + tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to( + device=device + ) + self.tensors[name] = tensor + + def __call__(self, feed_dict, stream, use_cuda_graph=False): + for name, buf in feed_dict.items(): + self.tensors[name].copy_(buf) + + for name, tensor in self.tensors.items(): + self.context.set_tensor_address(name, tensor.data_ptr()) # type: ignore + + if use_cuda_graph: + if self.cuda_graph_instance is not None: + cuassert(cudart.cudaGraphLaunch(self.cuda_graph_instance, stream)) + cuassert(cudart.cudaStreamSynchronize(stream)) + else: + # do inference before CUDA graph capture + noerror = self.context.execute_async_v3(stream) # type: ignore + if not noerror: + raise ValueError("ERROR: inference failed.") + # capture cuda graph + cuassert( + cudart.cudaStreamBeginCapture( + stream, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal + ) + ) + self.context.execute_async_v3(stream) # type: ignore + self.graph = cuassert(cudart.cudaStreamEndCapture(stream)) + self.cuda_graph_instance = cuassert(cudart.cudaGraphInstantiate(self.graph, 0)) + else: + noerror = self.context.execute_async_v3(stream) # type: ignore + if not noerror: + raise ValueError("ERROR: inference failed.") + + return self.tensors + + +def cuassert(cuda_ret): + err = cuda_ret[0] + if err != cudart.cudaError_t.cudaSuccess: + raise RuntimeError( + f"CUDA ERROR: {err}, error code reference: https://nvidia.github.io/cuda-python/module/cudart.html#cuda.cudart.cudaError_t" + ) + if len(cuda_ret) > 1: + return cuda_ret[1] + return None diff --git a/src/pipeline.py b/src/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..3dcd0c76f27dcac08ced1205ba18fb9e6549f34e --- /dev/null +++ b/src/pipeline.py @@ -0,0 +1,54 @@ +import torch +from pathlib import Path +from PIL.Image import Image +from diffusers import StableDiffusionXLPipeline, DDIMScheduler +from pipelines.models import TextToImageRequest +from torch import Generator +from cache_diffusion import cachify +from pipe.deploy import compile +from loss import SchedulerWrapper + +generator = Generator(torch.device("cuda")).manual_seed(6969) +prompt = "Make submissions great again" +SDXL_DEFAULT_CONFIG = [ + { + "wildcard_or_filter_func": lambda name: "down_blocks.2" not in name and"down_blocks.3" not in name and "up_blocks.2" not in name, + "select_cache_step_func": lambda step: (step % 2 != 0) and (step >=12), + }] +def load_pipeline() -> StableDiffusionXLPipeline: + pipe = StableDiffusionXLPipeline.from_pretrained( + "models/newdream-sdxl-20",torch_dtype=torch.float16, use_safetensors=True, local_files_only=True + ).to("cuda") + compile( + pipe, + onnx_path=Path("./onnx"), + engine_path=Path("./engine"), + batch_size=1, + ) + cachify.prepare(pipe, SDXL_DEFAULT_CONFIG) + cachify.enable(pipe) + pipe.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipe.scheduler.config)) + with cachify.infer(pipe) as cached_pipe: + for _ in range(4): + pipe(prompt=prompt, num_inference_steps=20) + pipe.scheduler.prepare_loss() + cachify.disable(pipe) + return pipe + +def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image: + + if request.seed is None: + generator = None + else: + generator = Generator(pipeline.device).manual_seed(request.seed) + cachify.enable(pipeline) + with cachify.infer(pipeline) as cached_pipe: + image = cached_pipe( + prompt=request.prompt, + negative_prompt=request.negative_prompt, + width=request.width, + height=request.height, + generator=generator, + num_inference_steps=13, + ).images[0] + return image