diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000126.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000126.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..24f6fb67098f59e5bd07014a39c4f352ccc341bf
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000126.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a128800d060dab2f7eddf3170df976e8148ee89e573f3095e80177fdfca1ae3
+size 60591
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000262.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000262.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ab7cd1031142bb980f9ae0b089ff558b8e532c61
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000262.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26e111122ea3f5e7ab679e2d8dfbef03a5d305c3e90d15ac3ff846df47d40e4c
+size 12721
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000303.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000303.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..89f77c6882ef78d92f5a78b67507f207bd56b847
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000303.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c041fddbb142c848e5fa70e1fabb9275a6466ab7337cff7cb6edc8ddeb9418a1
+size 45199
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000532.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000532.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..02d65aee95da3c472373e60ed7c8666a771c90bc
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000532.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5233832e8dce9798d0398055aff9ec6585ce57fe7123f21acbf5ba86d16abab6
+size 52217
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000573.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000573.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..02fb73be94d8766c45e5e14f105c7eb682e3bd78
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000573.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f90e433dd13d7592e4eea99c3cb70330f45f94e441124af8cfbb15e55854fdb2
+size 39645
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000603.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000603.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..536c82ab6e544cca630c39acd47799dca7fc1c1c
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000603.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:27f016e18e347bae4f9b09d5e39bbdc2b79de3a6c3dd7264ef99039ed49de38e
+size 17545
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000636.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000636.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..83a32e258188587c4e063590ed091eac8828b783
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000636.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:107ca290e22fe8fadaf6f94ce2f4ddc9f14afc5d3488b90935f0404e64d83a0e
+size 48529
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000880.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000880.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..449fa4821ab6696e0548967e4f817d3a0024f007
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00000880.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4decf45a853ba2b086961c50e26e4cccba24b63590e9f73c7ad82bbad6227548
+size 53672
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001177.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001177.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..11792f24dce9f1d60423f051da5c07971c008973
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001177.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28a3a177cba41ff38e96a9b65b6519b702c66ff253a296f454420941c579b592
+size 22728
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001200.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001200.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..124368c2d268dd223e6bd3475463b3e5446660c2
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001200.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d79ee51fe1bcf51481b92612486fe63023fc04ce12b3fa451ee957d353e9834
+size 74676
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001388.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001388.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..92ba9441e6b994c9ff7dfd5a7eb204e7eff2fe66
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001388.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b4d4bb66c9217b51d483bd0bf59f46b33d8b780db903b974ef7671853c69a08c
+size 55087
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001412.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001412.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a9416c19b8ef8281f011498440d9b7c85a5577b5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001412.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2375f7baf1452f4ae5cbd66aa90c026d5790573f9a1f6c10901de3d38fd86294
+size 7727
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001498.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001498.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f1cf567700e281f4aa72763c5823ae6f061f97c7
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001498.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a197b46a1867a3bc572b4c3484aca93c1c46af80d28194afe7be9b14e25c68af
+size 14020
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001583.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001583.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e665d2cbedbce5d52213ce92553af42eb11326fc
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001583.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e74e175e13d79e30e8e7fe0cd35c3ce598cf6efb90fdff975672afc4a8725b3
+size 34441
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001766.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001766.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a28a28f6b4cbdc74a97f14055925670b90239097
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001766.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:40c1319abf88c4d04d043b50c64aa4a32897d021c8a9a3fe2c93af398dbed44b
+size 16077
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001845.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001845.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a4630bac96613fa744fcca319e1ebbf8d7fc25e7
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001845.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97ec3f822c21a5375bb7b341a4c83c9344230999c7eb09161327d31baa7d84d0
+size 82709
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001851.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001851.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fe7b7f41da7bac39dddff80f6664e5ce59359cb5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001851.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0399b03a893a1dd3f1d02cb2460bcc692b4d53c2a127e900164ffcd20239c88a
+size 62206
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001947.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001947.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..20924bc607c433b3a8b3455d5e8540e432abc1b9
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001947.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a668541d2aad025a107c5a985db441edc3286544ca5e11b2f9e8793b9679245e
+size 52437
diff --git a/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001966.jpg b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001966.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6c7df9d6d7e72626c3d82c27f0ba384b391bbb83
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/ade20k_images/ADE_val_00001966.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eee98f4061ea88c92708846e941c8b159b8f046dc63a7a5e0723c8e7c9c248ee
+size 82994
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010005.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010005.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c92ceb8b38c07fa0139d5bf114274c2d7eb2dbb4
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010005.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b680c4c797acd6e889a6758b97cb6abbbe936a28070fd0af94740c6c737a0fc
+size 149427
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010008.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010008.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d69e1d950ec401cd3d8848c13fb5e913bf960093
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010008.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bbf23cfd28cdeb077843b1b9cbf481e3967d309581959fb43d30ae79be4c1b71
+size 62247
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010012.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010012.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..21c7bfde5661946d0d22201f4bdfac3e251a9974
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010012.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5f0b492262abce8007df933d3302f5b7b264e09f7e0529321834deaa7798e4f
+size 82293
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010014.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010014.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6dbd1b32c02101f952c75e4c9ee9444b12a4a766
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010014.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f73ae589547a2ce2dac143f8914d1849a0a08fde9ef133c1caa636f68531c3a
+size 211886
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010015.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010015.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..735e4ae21089607845d6dc399412557d1ea940f0
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010015.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7860d55831d494da4d6fb1112025bd28189acc5305cf86e5499304354c225d7c
+size 185243
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010023.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010023.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..1c1a8fa8fbf8fc074e2ccf8de7a0c1654e607778
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010023.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fbbac3af8a70b09b7ecf2c3917d583ba5a8bf45ef64c16f6de98780d4e4121a9
+size 120367
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010024.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010024.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2f75b37226e8b791df377462067d826f5a64ef28
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010024.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b02868847c1b4d9db6b81f94ed1ae99217d9504aeb2d89f4e4fc3bb35c3aba9
+size 225722
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010037.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010037.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..dee551abd0b63b45b8d88dca2e24dcd0025f493d
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010037.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72e6a6f3b7088b2f89c695c0c7028a951e4209667b5ec7e058ca01700e083e19
+size 117151
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010039.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010039.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a21451721817c2a9172e18666697e1bd75390f3b
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010039.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4f3fdd9928ecfab0507d581cfc38f70f4e1d94435c3922be2d657184ff07898
+size 181612
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010040.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010040.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..00be91ea82fe45c8ef20a03a44f2546ffb133cc5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010040.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:217e1b4ffdd10bda1135caafea5e33ab4c010419d5dc314f99ecce40df3de710
+size 203649
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010041.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010041.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..347dc5b1f70901ee57936c625e7332b7c5f1e211
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010041.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:142b5760af30c396c36238216c0eea2d3149076e3906f293a537ced9da9d90b4
+size 153054
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010046.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010046.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f672b2acf038da3bf95add59139bb0388c9ec1bd
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010046.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7bf50fc5156943752f027dc1b23c5f28b608de6ffe3a51c8d5cd5f5416b20e0
+size 179720
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010056.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010056.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3223a79523d1e28af0e8d936bc9c4321e215c18c
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010056.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74d48467e4cf7d45805085231ca69d128eaa777dfe258814c3e38f2173dcad1c
+size 248664
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010058.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010058.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d6eeb6704c6bdb26374848a026a7441af0732420
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010058.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96bca117aa1027b6ee4cf2482df22c19d8f61999556079d1bdf4c47a3258b742
+size 182710
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010069.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010069.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6d75d70af3ad0ec1f5ad92820699e9b9d8ee406e
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010069.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:02c888021c40411ffadf748968c83bf0a41226d0caecad1572d2057cfb304338
+size 239108
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010073.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010073.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e32737a6526116b84f8dbcc02a9adba5858fc95d
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010073.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab711171bf64c58771d7ad64dabc27e41981f0eafb3bb6adaf2a36fd1262b500
+size 269411
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010082.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010082.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..260395db62e5eeb5a5f24014a0d40a51ab6a99dd
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010082.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac59845af8badbaa5906686ffd1899a61bf1dd0f94b29cde7cf28ea2dc1a77c5
+size 168228
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010083.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010083.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..02edc8253d8be6d20fcc22d0fe0d7eeed9e5402a
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010083.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a92a41793b78b3e2860ab078e64e38e9185946130b8122220a7e925364aef03
+size 114547
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010084.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010084.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5e781ef2b4cd0be86919cfd3c207ca090dbb60a0
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010084.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6feff2684d0513e27179644edbc1543a3eca202b82e12ec8cda4d04eabb7a293
+size 261011
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010094.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010094.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..13067771253facfe70f18fb9b5d0691ded12c6f5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010094.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e55140f78f2758edd47e1efb5c00d1145cbcc99075fdc20d69d7e5ccebf27bc4
+size 159568
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010097.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010097.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..89dd95c73a75c4c82f16419902405819baed5b04
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010097.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b2c8e4dc9c084f5999d07df456404b15adcce65e892080b4f1e18da6507504d
+size 113045
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010104.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010104.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..32bdff54a5e0eeb62bb96a938a26b135ef047292
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010104.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:edf7829ec93f99c602c70fea9b594edbfa6e015377995aa108821c828bbbaf0f
+size 187253
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010107.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010107.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e65daf878b75fb1a08a24f82dadca034241caa38
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010107.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77b4b8d390ead7e4b1691bf785ba84229b8cf7c2a73c5e4d024a138329eb0a5e
+size 87673
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010108.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010108.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..28a7d8cb91e54feed7d8bd78b0d240938a1dcb5c
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010108.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b96990e47f86b9d6f6ae88be308fb96e17972f59e232ab15a5503c715c9a3140
+size 76964
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010115.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010115.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..96d2f93c0b7453420de41bf54879219377ec8d0f
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010115.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:71afede74627d1478090fb5a092ce45584b866a0ee5bb0514f1b7bb853e7c4d1
+size 122470
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010123.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010123.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3f203d787ba24cf3a451a73f7f731274ef0900d5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010123.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:292514865ebdd1a24d97dcb174b35de24e94e3a615e2e30ba370fdad945e308c
+size 142945
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010125.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010125.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d84225b295c123dff21ce3faaf724554f6c64546
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010125.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2250e222d02425821e9d41a809ac85167f32832c297dbcadaad08e2ac16d1a15
+size 129044
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010130.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010130.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ec12f99fe8c26eb52ae7ce94dc9bcddc4aae8e57
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010130.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4cc5d910519428e15604845df0db5b7b065f64aa3859edc60b0e2a27e18446f9
+size 186066
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010136.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010136.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4228f429be8c0f7f2c2153963ba7087912f1a605
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010136.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5facda88af5ef308c39f802be2dcf4287141e290c17a0118927ea8d9358f3278
+size 114426
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010138.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010138.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..23a27b2170514459417aa50cf0f8315c61ec5baf
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010138.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de865325ee014719b7f07aaf7ed7de03abd44d4a81603c7759119dee4d79b531
+size 161164
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010142.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010142.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6b14c9cdd73fe2e0e758bd078eeb739bd9a7e49e
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010142.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0c562deb012354428349a44d12ed25e99ce5111bd0ab630d0572a15586ce3b38
+size 270353
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010145.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010145.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f2b1084e979ebbef81ba64e719d83a9490ac97e3
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010145.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fee8892ce1ad5d9ec7b50f9c9ced6b79d53df9236e5e8cd3b233b3786c4eaf13
+size 162637
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010149.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010149.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..dc78a3cde6415750bc213e2b9244d680a26282d3
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010149.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5e33a5c31c213886d974349e557b1047963d713a042e6668101562d573b3cdc
+size 203459
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010161.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010161.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6f35924420cb9f3da20e8867d109342864f68f05
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010161.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7e7eb212cad6ee8a0bfadb6e9685dcf5e194720d5894ef4a0365c00dbdd2802
+size 165598
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010175.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010175.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c67f44addf8907fd8aa5464742a9d69058533cb3
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010175.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52799e6ab5e1e0986031ab2a197a9812bc302655183021d83b4f2d8c1ce06cf0
+size 190046
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010176.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010176.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b67b5db35a043d73da7a8bb54bdc1e8e0e9a5aed
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010176.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06693452d1dd9377c20665202014129596703e0dc982205d7452c229d6bfa73d
+size 101856
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010179.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010179.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0158964dbf758ef3e63fcf6e341464fb1f02addd
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010179.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0cad457c2af724b6b18e6f982ed082a8f662ccf197b50615802f04a3d57f0f75
+size 187581
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010192.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010192.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..44bab09ca7d4d4e3e3bf22510834ae0c12a39386
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010192.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed43a05c7c7cdaa8443e13fa244a6a934bd528ac9e7f0c170c39f26308cc38a5
+size 136555
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010196.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010196.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..1279bdba36f19441a998b064d9a90cdab73f608e
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010196.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f31d8008b2c6ee7eebe33e428ef1dcfda5950cd39ba1998c8571607052a56361
+size 212632
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010205.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010205.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a99eeace8aa48c39671f667730a3c883dab1e74b
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010205.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc89900b0a93ffe36298c52eb6fdf5bb557936de83abc36674efad003eae0646
+size 88018
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010211.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010211.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6a2027d91cd97a68924521ae76f0ababadf98443
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010211.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7075959355e744741847f16aff5ef3e2bcbe0b4c4640fc1900399b8c8ede423
+size 167352
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010216.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010216.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..af4e9eb000113f078f15a8a7e7e38527c6cf5c3d
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010216.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4214b024ce8da67a230bc95968bb129620d916ca27e8ec26e096986fdc6c0883
+size 153686
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010217.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010217.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8e2fbf2f8984c29b9a07b77f794b7b0491b6a792
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010217.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d804acdd3cfdf95589a6b6c1acc00fcf5b28c46a9ad79ab8ee90ae2cc0ddc33b
+size 167361
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010219.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010219.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7af3b6a4cd2783a7a02538831f8a52f4f7631f38
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010219.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cadf8bd93a266322d7ca0faeeb001c783c9a513b0c447c14a0d75fceaf039e61
+size 166468
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010222.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010222.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c4b099705d7db887048cb95201296f5bbcb9ebcd
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010222.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:29a9cc2d98054bccbff9b3088ee9cd57d918a5f001b6a3c3cdd425ca50b5ea6a
+size 79410
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010229.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010229.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..87c7a58202512dd67f8f513d0d623209b5f9cc6e
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010229.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:98a3f370d6d7f54dd954c3d836a88b4bf211a28446d9a3ae61c7a4ab3b8ef4b6
+size 141942
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010230.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010230.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a8eb52eb69e8f324a308e34a54a6f4c39dba7834
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010230.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ba25d4493c441390ad9bd190420e84eaad618cbfe76c78d69ef2bba9dae037a2
+size 230438
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010232.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010232.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..24d126770a272a9291345ce77f013c8c5a68dd8d
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010232.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2739fe43633599276b8d81bb6ef9cabcf8307e7ae494102d6907e6022b23c231
+size 110052
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010239.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010239.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a771f295c1d8d9de918c2cc8a13e28e5c84a8226
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010239.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:834e27ac610e2f1d29434db6624c4c36224dc3a7ff37ecea599cc6ae743a1f7e
+size 113696
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010241.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010241.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..edc344bb2543e845aa5f4c0910a8e985a1f06b12
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010241.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4d0a4106b4525e6b0f61dd06150acbdfc3d42d8475522b959d031a75cef73ddd
+size 130598
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010243.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010243.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5178442707163faabb67a9691274b643766cbdb5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010243.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b982f73867655e4ae309acab28b9359ef76113a3b641d3e824842d998940e68e
+size 97021
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010244.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010244.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4fa21f451c3ccebffc78ecc06373d766f46b76ef
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010244.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fee3d045e393beb004766ed727148693bd7798dedc9cbd61c7f9d66e74835afb
+size 234619
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010245.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010245.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d84c527f976f34588cd70076dd2b0880beb38474
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010245.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b7794a5ec66f21c09cf68a844532b9b29e3e3dcc2985473bf93e9014c0d1857
+size 115424
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010248.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010248.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..defd8bbd8ed9c6ac79f8aa271e08449b94378785
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010248.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:adda9dc8eba46853a050241ca9efefd0d55c08e85dc64a20bd0c2c00ce1b387f
+size 206687
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010249.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010249.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0213df2220ba1b612926512f5d69d8b21ad7607f
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010249.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d71d14a5222bb27d8eba2ed9a4625f8e65e35032c5b5b902d79eb335594c17ec
+size 199377
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010256.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010256.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..959a9eb415a2306f4765202782baf9b28016491f
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010256.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99a220f784e6aa83c979d9b086c5f5ae4ebb34c1676c7ce92498075f55d444ac
+size 317673
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010263.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010263.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..90bc4dceb5688e68b795b50a39776068530d0507
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010263.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2b2de0663c23c57867d402cacd007275ce55da85c03f287ac919316e6b72b1d2
+size 150865
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010265.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010265.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..33d2e43211e5c0be4c9e3a539bf836adaa426817
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010265.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca2ab698d0f0a6981371ae67cc43b39eb1b552e0c246978b7abf8ce5f77404f3
+size 63484
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010275.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010275.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cb664c94ccac778b6d7fe708911ab703fb1ae52b
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010275.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:174fd56e2547a7d1c449229ba48a0a19a68208adabc335a8cf6c83612fd3346f
+size 167035
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010276.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010276.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5b6d0ec8ff0102fa8743f5ee7243a61e2de18ca5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010276.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:665f0dc6d16aa00264a34be6e2904756ecf51ee6ceead909ab951ef905ce5594
+size 294584
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010281.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010281.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0c3320c735abacf8e49aeb8fa3d7b1211530e1d1
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010281.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c48f166c97e0ec1d7b52bcd2e293956831866d26aba73cae739d18a5bf4004ab
+size 169852
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010290.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010290.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ec3cebccea7db552aee408bdd442fe4e0fcb934b
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010290.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10ed6816a6df45e78a2d013a0861716839ca21d97a2e293b1f0b300c62e1344d
+size 171364
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010303.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010303.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e74c74ff0f9df06a1127debc0f3a274f57bee0bb
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010303.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41d716df8e39fc7f094523272595b0819dce8e9a21889d514fc06607e9c77272
+size 142387
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010309.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010309.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a32527214441b33441ff7b7adcd7097463d6cdc6
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010309.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bac9886b70cd6e9a49581ab6593589cd7fd85bf218deace21e61eab6c734540c
+size 87188
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010313.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010313.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..67d3b858b82de8c3fefd0266198b3e729a40e2f0
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010313.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3627f4ce76d7ae3376d150219bac730182538b6202c4ffbc3e39a477404666aa
+size 78145
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010318.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010318.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6a722daf240f94b5eeba5957ae3fae66e2a457f5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010318.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23fded25bd4e73a3c0f217074cb20cd2d761ea09ae70d3f99f7769a4dcd69fac
+size 235652
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010319.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010319.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8ad37714da77ea1cd1b610edcb898e1455016db2
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010319.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:94a2520e23e60c57dd490b8ecc23a48f5e5c754ce7c88ff1610866ae0fdda89d
+size 134069
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010321.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010321.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..70ab1a0aab4f93428f07df805e23d8eee4926dcd
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010321.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5242a6ad2f8bfacaf15d8a285d22b2f02ad7715ff80a5d4c370f63eef9946ef
+size 250231
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010324.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010324.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9a7fd2b5055459247a9ec4dc6a8a06f8f756b631
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010324.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8f96f197e805f2f3bfc0c081c9d0d7fd374e9318057a55ebf7078df34cd0862
+size 158132
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010327.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010327.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a2fa85f52ae89a207d10cd749091623fc27c3de3
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010327.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04d9b748e35f577041dbba4098051cdc8183aa402837d376661e1be160aabf56
+size 203045
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010337.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010337.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d58603ac2f3eb5783cfbb931d38c00f5b86c3a83
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010337.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f15f4d4d38611dd8efeb9a6cecdfb88e53fbb593a47ef6f9241434e6650d7a8
+size 183678
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010342.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010342.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..18abf045a15a206e00d77c1dad921423e36ab427
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010342.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:081c272b0d62ca96a6365a8854cb6520911a7e6dd15384bb1f2d9ce44c22810b
+size 235553
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010343.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010343.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c17d4085e21b3deca1ac087b71c6e9cab4ddd95f
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010343.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5713aeeeeffa86886c607e3f0374d2b91ca0b6c7db6decf4c54040569373ec65
+size 237486
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010346.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010346.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0736213d412ffc23f2390b660a8b82dc01b4a25b
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010346.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67789c661e3ad0917ab1fda8aff80efe30a15860cdba5b89289d364f0e4adbc3
+size 150445
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010358.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010358.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5c771f88d04b833441c90bcedb771d89b38b3326
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010358.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6bda8fedd3c0714df052e0b8539e99f5a22cc2b5e79d8c637bd195f8c7d8671
+size 317607
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010369.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010369.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8dfe477316d8220b74929679aa7f562d27a0a470
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010369.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:07bc372e721e147638c0076bdc80b00114f0c4d7d8ccd645ebaa2fefa8878bff
+size 204564
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010386.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010386.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a07608c773af4df778cd873e1c0a3aa869549ff7
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010386.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1194b28b5f87f728eb828463c10c8ec8d9acaef4f63953d02611eb8af5cd66e7
+size 135915
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010388.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010388.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a6c32b75177202917d25c96f2d9f4ab728b8a084
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010388.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7861b90389a606cfe81f3db451ee76392146098f69508e79e0f8d6154415a9df
+size 83010
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010393.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010393.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8fec9d5a181b1b01080a2f219c7f11f2c54a9cfd
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010393.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:190828c137dbe1ac7461fef3aa09983fbad048a4b92e98f5a4576b0d68b783c5
+size 121960
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010395.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010395.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..959eb9cf8ebcb6f3982ecb61ea8be73406b23bb1
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010395.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59b2dc98d573047568456dc1ee44a6effd48128425d15a9e1cb2f1bfe5527fc1
+size 150629
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010400.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010400.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b6dce4e91ca36048c974ec50259d656b4ec81a84
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010400.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:21df5454fbab171879e4a7974ec147df8515742c5b994d55ec37f8ef86058e82
+size 199939
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010403.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010403.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e36da07febc04d27741e3c93b57506f19d53c6b5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010403.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d51b062b1edf6b19cc0f6fb84ddf1119edda24099dc83d6b36eee47f61975d77
+size 173532
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010405.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010405.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7bd6655b755891c552e4063f8ee7f10a81b45fbe
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010405.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc17c7fe4438c622314a16bbbd4224ddc283f1ca82cac6db3d719bb68afe18cd
+size 315784
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010407.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010407.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9d0f8467d79a7596f9c84caa1c6669cb70ecc35a
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010407.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:16eea1f5879b97f1e09751a317397d91b5760121962815d114dce4bd42f4d5f1
+size 109394
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010414.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010414.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3061076c830afc5871a67321fdc14f394b00e6b8
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010414.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8626e500aa6b629b678a9b5c2d9b77c2e70e975818d228cfca35c7db95568db
+size 172848
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010420.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010420.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7395accbc5d89e366dbd04b493f5a623763f7e96
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010420.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55a0fbfa13aea527efc49d6682b46b46d415b4e6698b5b33395f9215db339c63
+size 184080
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010421.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010421.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..02782aebaaf35fedc4e71796d9a79a82b9f499b0
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010421.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34b90f97f02e82c639dd4be1f9d49c6ede82c396fd2ba6544721eca9e30d2f08
+size 170870
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010428.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010428.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..db6684f86a1d014ee55a4b13917640cc9f68a006
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010428.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6bf8d08e95f5168c8f64f1622ce5a9cfca417aa248dcf964ec3d147d3cf9fce
+size 157269
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010432.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010432.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3983a7bc8605b4c8f496edd4bffd7482dab69a85
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010432.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd34a6a9f6cf486df534baf42f00955ea011692b7a4521b169ef9a2c40ca5290
+size 298230
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010440.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010440.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0ffc9d581374703bc9d3fd832604c88b7236143e
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010440.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0e31420a81d568dd7dc0639d4c1c321193e00a92bafbc04b43bebfea3612184
+size 70943
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010442.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010442.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8cecf7f9f9b822c262d22d1142cea30ce0606022
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010442.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82633602f6c8e4a11689fb79dadc3a42eca8f3882e70ab319ea356038d749d74
+size 112185
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010444.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010444.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..08ae10b677e94ba727e8a2c8925348f04bb7a528
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010444.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2488df991059df64d46b4f264b6a903313605c929c6fe2d66d80ad83a11d237
+size 92362
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010445.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010445.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2208b207c45b44f86e2eaebdd494ad65d3e570c5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010445.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:685c6a86f1551d327aa411566e0b159f1c1f67b022d9dc8b308b94372459e646
+size 144790
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010449.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010449.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..23fe94b12fe6c18b5b51fd788ed1b05eda6f4347
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010449.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a782f2e081dc584e9188d5223c7957da0d78c9492ff1e8a53bd28ca6b632d031
+size 255295
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010463.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010463.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..abd3f8b6ab085d3414f89387c3d3ee782aa9747f
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/train2017/000000010463.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54ca740f3a9bf15143bca238b388f3634e8d2d7a77de0ddd9bf22a3b10e21851
+size 177764
diff --git a/CCEdit-main/src/taming-transformers/data/coco_annotations_100/val2017/000000013348.jpg b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/val2017/000000013348.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..21edb31a6f41e325746012c20b887d9ddd2a51a1
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_annotations_100/val2017/000000013348.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8371f759628048bd294db70d9fbacb6d4f2558a12c25337fa04fea6418c9922a
+size 164490
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000018380.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000018380.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..afe85797a39027bffda1471cc5615e180a127029
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000018380.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5f019938d891305fab6ae0d0e9edc79e0d74407cdbd0ecce324aada0ab93a86e
+size 279661
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000052507.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000052507.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3b1ffd39e2ce9bdb5a751094cfb581d176411578
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000052507.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e6a1ae4cf32d80d068033a9c3ceeec13fb835e90460687bc4cd458fe9abdb02
+size 230322
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000057672.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000057672.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d5a7cc38c13a49db5dcecffd0ea56e3b7d53f36b
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000057672.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f497377f9ad39d7b90752d4faf5cd7605329652d511f5009430d39231512334
+size 147264
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000064898.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000064898.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f891165589d71a2d5f0065328a4b2376e5482b34
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000064898.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:81af7e0eb641717eb10d0517c6a39617a5f4f1c9d5a6cad47e300f7f96a4f68e
+size 175262
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000098392.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000098392.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b8c489714db4b675f3ccab0cdf891087359e41f0
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000098392.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c976adf7599725c1bab7fbae692611501e7a33fb5e182c3e301f985205340ecf
+size 92708
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000110638.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000110638.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5019817e6e0445f08e192357bb27fb1e16450358
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000110638.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e002401e4038fe47dbd68ccad90c1b5ef0365527d12fae72b4fbd3cfd7473dc9
+size 228586
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000119445.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000119445.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3f84feff8e16b425310cef7ec561416866cb9e10
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000119445.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef9c63a81758fdcbc4351eac736014c63e6677387051351771ed7183b55daa5d
+size 149162
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000128658.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000128658.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e5eefc7e8a8ccd92d07edc2ee3da858ddb192496
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000128658.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dfe0781e6f56fbb4937f7b4c351cd17b554df02c01f006b7c1a02a47d792abef
+size 105531
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000154358.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000154358.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c6ac6702d3bb97a16e2ad16257d773508f7a4f8d
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000154358.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b3b9728c30e1ca572b359f75c0937f05682e4d4bed9d69bd93ccfbac014493d3
+size 173359
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000166259.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000166259.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..207d14e2dd93baa4532223d8f67559a20ef735c5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000166259.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae0cf5374adfa2c78f50e5fc58b51a18e8db2285f00912c2eca6a2af204857d1
+size 164881
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000166563.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000166563.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d8b6390a5bf4a097f5c30626bb048eb226a887f0
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000166563.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c8478b6b153105a252bd65c4c415da6c1f37f249e304cc6232d9b66c501e6d49
+size 101772
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000175387.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000175387.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b6c6f81a1e21edff00c265e56413c59169243f49
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000175387.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86133820deb61a320d8ed39e4122b109c5d9c4fbf2398c86b4ebcc27dcb5110f
+size 63517
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000185599.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000185599.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f43c26e425572eec0ca78b0444129254b5e25d34
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000185599.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ec4dcc5131d7aba9d71b0e8cd05e278d142c522e1077f5cbc4627da246979f6
+size 119803
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000205834.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000205834.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..539be5ce4a196d44b42fec83b60526c72e5b9b48
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000205834.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86bc225035731803d15bd65aaabc7d29e8af8b9c6ccb93a8d4ee11d337410c66
+size 118903
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000231169.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000231169.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..51629fcbca481a45b7a4fa8b4beb226158b50fe9
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000231169.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:81c074b0a5f5966302986ecc7e2bea71dec9b564a6e5a0e99712dd82b1a314a4
+size 193183
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000237928.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000237928.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..38dd5b930f0d1c84fce1f1c1c308a701ceb65542
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000237928.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f0c71f74a1830e0065931d5f26fd62a42853c5a67160a0e6ea2842812cb3d21e
+size 108718
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000255824.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000255824.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9d78e78ff55ec24d20000d531f6b6ff78360e30e
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000255824.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82d1faee4f014f5a974b9c99a2b1e123049972fe4cb8c0d72323b4727fa5a9ed
+size 172916
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000256775.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000256775.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b3ae813432a72a47180354592c16168403e92275
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000256775.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa1e43e6e684a31f44044e049f966f9e3829480eb65ced9aab6545491f85f2aa
+size 143596
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000299355.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000299355.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..628b8db18acb893bdcfc752d7a0c517543b8c034
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000299355.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:14c7a30312f6a1df491f475f58f137b29a4fcb942e3d978a707a6c05c05c2b57
+size 82256
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000299720.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000299720.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..083da4cef392329088003cca29efb5a82a979b15
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000299720.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4bb0c2013ea6742625f0e013115b1ca29740c7de210de6be7ae250618d0f6941
+size 78799
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000303653.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000303653.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..40b44478327f668e603bfdb8d45710d41e6f7e4e
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000303653.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:677a7f0ad1d0af9492a3f7c73cbc166e80969f4c38c5f80d3709a5b55a5eb2c5
+size 212824
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000323895.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000323895.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8abe63a75a2a2d00eeef9de94400a75924c5cfb5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000323895.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cef175a9dedfc31ad0e53519cfe601d1413658ecf738e5a31e0c43ab3ee1095c
+size 104163
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000335529.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000335529.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5ab72a59a8abc6d2ca1bb57783af782894fa1d04
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000335529.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b5a6a8051941611cb4a05f5917e2c792b1b3f49ff63b35b5324a7e4a82fdf034
+size 148244
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000348045.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000348045.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ac1e71a870153ac70068ffaef73dce805e83375f
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000348045.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2bf52dc1bc3ddeaaf84b2f926bcae30d72569afee82ea5e88d536321d66e9428
+size 166603
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000348481.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000348481.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2e59c00152789d588116a095ac6b1677dc97ebde
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000348481.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b4434ecb88a7379cd6d8327692ce2a1255100ef66bebd602b680e00b5a4206a
+size 115455
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000350405.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000350405.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3b427251069efb6b51ce6452dcc17f65eb0249a4
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000350405.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea113f2a601d32d5032df428be396854d97bc3f0b35520280ba860a9d29d9c05
+size 69299
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000356347.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000356347.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..eaa1bf3c2baa54939fb1e429ba2763e4349c19e6
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000356347.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:866001cc56b4cd99081f4a86677db1c83fbb412a89898df463698e97db9f20e6
+size 174921
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000361180.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000361180.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..64fe766e51cc08082a7015352405013908d31e04
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000361180.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b662189e479b086af62762a0561f3b65159aa691c7b1e57266346f5536b8ce3
+size 220029
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000403385.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000403385.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5df7db206d3f60fe7bf9ac13fd7cd599729c91aa
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000403385.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:11632ed3fb470d62f7fe5f0445c4f10ec91225c4a820c95d1c3946af9426d4c7
+size 81462
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000406997.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000406997.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c41ac9c6bc59a619d16b55cbd9924b8c39e0a711
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000406997.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:73d1d5fff4154d5d221f8a7826d64a93c13556068d5826047749e7b1179d6678
+size 237838
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000452122.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000452122.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..688c1add65ec14225dff8639af22f948ddf790d5
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000452122.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28641b8180746005ada298f382067afdc96e88ee7ed83353b1ccdedd24bf1b03
+size 62118
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000491464.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000491464.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fee07ec33fe1d10dc4c4bae2b18452c25b8026a4
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000491464.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d3cf9e6cb1a68aa3923c03deb225629bcff3b0c326ae11b7be601eed7e31af1
+size 159468
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000517069.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000517069.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..df66fbd84fd5d88655d039b90865afcc1fea95cf
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000517069.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3e571a8640aa7d8da1cbd762321cc51def8fde501bbe9de911fc1f4f87e04bbc
+size 148150
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000522393.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000522393.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b9e096b92ec81edcbb31f92927d0bef60ec394c7
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000522393.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2517edbb93dfb8671bd14bef8485e42f5c5e2ece488e08bcc2fe651ec708a918
+size 227723
diff --git a/CCEdit-main/src/taming-transformers/data/coco_images/000000569273.jpg b/CCEdit-main/src/taming-transformers/data/coco_images/000000569273.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..970c96a706ba9c6a2d3bb1cd3c9c38edc93863a9
--- /dev/null
+++ b/CCEdit-main/src/taming-transformers/data/coco_images/000000569273.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:162627ee7b73fc1c7c224c4aabb26e8dc3e7b51b44b06cd86b0f182fc246aeec
+size 198160
diff --git a/ControlVideo-master/README.md b/ControlVideo-master/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c366c9cf01722123f18e6c39674472438838f41
--- /dev/null
+++ b/ControlVideo-master/README.md
@@ -0,0 +1,167 @@
+# ControlVideo
+
+Official pytorch implementation of "ControlVideo: Training-free Controllable Text-to-Video Generation"
+
+[](https://arxiv.org/abs/2305.13077)
+[](https://controlvideov1.github.io/)
+[](https://huggingface.co/spaces/Yabo/ControlVideo)
+[](https://replicate.com/cjwbw/controlvideo)
+
+
+
+
+
+ControlVideo adapts ControlNet to the video counterpart without any finetuning, aiming to directly inherit its high-quality and consistent generation
+
+
+## News
+* [07/16/2023] Add [HuggingFace demo](https://huggingface.co/spaces/Yabo/ControlVideo)!
+* [07/11/2023] Support [ControlNet 1.1](https://github.com/lllyasviel/ControlNet-v1-1-nightly) based version!
+* [05/28/2023] Thank [chenxwh](https://github.com/chenxwh), add a [Replicate demo](https://replicate.com/cjwbw/controlvideo)!
+* [05/25/2023] Code [ControlVideo](https://github.com/YBYBZhang/ControlVideo/) released!
+* [05/23/2023] Paper [ControlVideo](https://arxiv.org/abs/2305.13077) released!
+
+## Setup
+
+### 1. Download Weights
+All pre-trained weights are downloaded to `checkpoints/` directory, including the pre-trained weights of [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5), ControlNet 1.0 conditioned on [canny edges](https://huggingface.co/lllyasviel/sd-controlnet-canny), [depth maps](https://huggingface.co/lllyasviel/sd-controlnet-depth), [human poses](https://huggingface.co/lllyasviel/sd-controlnet-openpose), and ControlNet 1.1 in [here](https://huggingface.co/lllyasviel).
+The `flownet.pkl` is the weights of [RIFE](https://github.com/megvii-research/ECCV2022-RIFE).
+The final file tree likes:
+
+```none
+checkpoints
+├── stable-diffusion-v1-5
+├── sd-controlnet-canny
+├── sd-controlnet-depth
+├── sd-controlnet-openpose
+├── ...
+├── flownet.pkl
+```
+### 2. Requirements
+
+```shell
+conda create -n controlvideo python=3.10
+conda activate controlvideo
+pip install -r requirements.txt
+```
+Note: `xformers` is recommended to save memory and running time. `controlnet-aux` is updated to version 0.0.6.
+
+## Inference
+
+To perform text-to-video generation, just run this command in `inference.sh`:
+```bash
+python inference.py \
+ --prompt "A striking mallard floats effortlessly on the sparkling pond." \
+ --condition "depth" \
+ --video_path "data/mallard-water.mp4" \
+ --output_path "outputs/" \
+ --video_length 15 \
+ --smoother_steps 19 20 \
+ --width 512 \
+ --height 512 \
+ --frame_rate 2 \
+ --version v10 \
+ # --is_long_video
+```
+where `--video_length` is the length of synthesized video, `--condition` represents the type of structure sequence,
+`--smoother_steps` determines at which timesteps to perform smoothing, `--version` selects the version of ControlNet (e.g., `v10` or `v11`), and `--is_long_video` denotes whether to enable efficient long-video synthesis.
+
+## Visualizations
+
+### ControlVideo on depth maps
+
+
+
+  |
+  |
+  |
+
+
+ | "A charming flamingo gracefully wanders in the calm and serene water, its delicate neck curving into an elegant shape." |
+ "A striking mallard floats effortlessly on the sparkling pond." |
+ "A gigantic yellow jeep slowly turns on a wide, smooth road in the city." |
+
+
+  |
+  |
+  |
+
+
+ | "A sleek boat glides effortlessly through the shimmering river, van gogh style." |
+ "A majestic sailing boat cruises along the vast, azure sea." |
+ "A contented cow ambles across the dewy, verdant pasture." |
+
+
+
+### ControlVideo on canny edges
+
+
+
+  |
+  |
+  |
+
+
+ | "A young man riding a sleek, black motorbike through the winding mountain roads." |
+ "A white swan movingon the lake, cartoon style." |
+ "A dusty old jeep was making its way down the winding forest road, creaking and groaning with each bump and turn." |
+
+
+  |
+  |
+  |
+
+
+ | "A shiny red jeep smoothly turns on a narrow, winding road in the mountains." |
+ "A majestic camel gracefully strides across the scorching desert sands." |
+ "A fit man is leisurely hiking through a lush and verdant forest." |
+
+
+
+
+### ControlVideo on human poses
+
+
+
+  |
+  |
+  |
+  |
+
+
+ | "James bond moonwalk on the beach, animation style." |
+ "Goku in a mountain range, surreal style." |
+ "Hulk is jumping on the street, cartoon style." |
+ "A robot dances on a road, animation style." |
+
+
+### Long video generation
+
+
+
+  |
+  |
+
+
+ | "A steamship on the ocean, at sunset, sketch style." |
+ "Hulk is dancing on the beach, cartoon style." |
+
+
+
+## Citation
+If you make use of our work, please cite our paper.
+```bibtex
+@article{zhang2023controlvideo,
+ title={ControlVideo: Training-free Controllable Text-to-Video Generation},
+ author={Zhang, Yabo and Wei, Yuxiang and Jiang, Dongsheng and Zhang, Xiaopeng and Zuo, Wangmeng and Tian, Qi},
+ journal={arXiv preprint arXiv:2305.13077},
+ year={2023}
+}
+```
+
+## Acknowledgement
+This work repository borrows heavily from [Diffusers](https://github.com/huggingface/diffusers), [ControlNet](https://github.com/lllyasviel/ControlNet), [Tune-A-Video](https://github.com/showlab/Tune-A-Video), and [RIFE](https://github.com/megvii-research/ECCV2022-RIFE).
+The code of HuggingFace demo borrows from [fffiloni/ControlVideo](https://huggingface.co/spaces/fffiloni/ControlVideo).
+Thanks for their contributions!
+
+There are also many interesting works on video generation: [Tune-A-Video](https://github.com/showlab/Tune-A-Video), [Text2Video-Zero](https://github.com/Picsart-AI-Research/Text2Video-Zero), [Follow-Your-Pose](https://github.com/mayuelala/FollowYourPose), [Control-A-Video](https://github.com/Weifeng-Chen/control-a-video), et al.
diff --git a/ControlVideo-master/inference.py b/ControlVideo-master/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..06f012930a704be0d1bb6175c5c3641c70e0b65d
--- /dev/null
+++ b/ControlVideo-master/inference.py
@@ -0,0 +1,204 @@
+import os
+import numpy as np
+import argparse
+import imageio
+import torch
+import json
+from einops import rearrange
+from diffusers import DDIMScheduler, AutoencoderKL
+from transformers import CLIPTextModel, CLIPTokenizer
+import torchvision
+from controlnet_aux.processor import Processor
+from models.pipeline_controlvideo import ControlVideoPipeline
+from models.util import save_videos_grid, read_video
+from models.unet import UNet3DConditionModel
+from models.controlnet import ControlNetModel3D
+from models.RIFE.IFNet_HDv3 import IFNet
+
+# Device and model checkpoint paths
+device = "cuda"
+sd_path = "checkpoints/stable-diffusion-v1-5"
+inter_path = "checkpoints/flownet.pkl"
+controlnet_dict_version = {
+ "v10": {
+ "openpose": "checkpoints/sd-controlnet-openpose",
+ "depth_midas": "checkpoints/sd-controlnet-depth",
+ "canny": "checkpoints/sd-controlnet-canny",
+ },
+ "v11": {
+ "softedge_pidinet": "checkpoints/control_v11p_sd15_softedge",
+ "softedge_pidsafe": "checkpoints/control_v11p_sd15_softedge",
+ "softedge_hed": "checkpoints/control_v11p_sd15_softedge",
+ "softedge_hedsafe": "checkpoints/control_v11p_sd15_softedge",
+ "scribble_hed": "checkpoints/control_v11p_sd15_scribble",
+ "scribble_pidinet": "checkpoints/control_v11p_sd15_scribble",
+ "lineart_anime": "checkpoints/control_v11p_sd15_lineart_anime",
+ "lineart_coarse": "checkpoints/control_v11p_sd15_lineart",
+ "lineart_realistic": "checkpoints/control_v11p_sd15_lineart",
+ "depth_midas": "checkpoints/control_v11f1p_sd15_depth",
+ "depth_leres": "checkpoints/control_v11f1p_sd15_depth",
+ "depth_leres++": "checkpoints/control_v11f1p_sd15_depth",
+ "depth_zoe": "checkpoints/control_v11f1p_sd15_depth",
+ "canny": "checkpoints/control_v11p_sd15_canny",
+ "openpose": "checkpoints/control_v11p_sd15_openpose",
+ "openpose_face": "checkpoints/control_v11p_sd15_openpose",
+ "openpose_faceonly": "checkpoints/control_v11p_sd15_openpose",
+ "openpose_full": "checkpoints/control_v11p_sd15_openpose",
+ "openpose_hand": "checkpoints/control_v11p_sd15_openpose",
+ "normal_bae": "checkpoints/control_v11p_sd15_normalbae"
+ }
+}
+
+# Positive and negative prompts for generation
+POS_PROMPT = " ,best quality, extremely detailed, HD, ultra-realistic, 8K, HQ, masterpiece, trending on artstation, art, smooth"
+NEG_PROMPT = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, deformed body, bloated, ugly, unrealistic"
+
+def get_args():
+ """Parse command-line arguments."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--jsonl_path", type=str, default=None, help="Path to JSONL file for batch processing")
+ parser.add_argument("--prompt", type=str, default=None, help="Text description of target video (used for single video mode)")
+ parser.add_argument("--video_path", type=str, default=None, help="Path to a source video (used for single video mode)")
+ parser.add_argument("--output_path", type=str, default="./outputs", help="Directory for output videos")
+ parser.add_argument("--condition", type=str, default="depth", help="Condition of structure sequence")
+ parser.add_argument("--video_length", type=int, default=15, help="Length of synthesized video")
+ parser.add_argument("--height", type=int, default=512, help="Height of synthesized video, must be a multiple of 32")
+ parser.add_argument("--width", type=int, default=512, help="Width of synthesized video, must be a multiple of 32")
+ parser.add_argument("--smoother_steps", nargs='+', default=[19, 20], type=int, help="Timesteps for interleaved-frame smoother")
+ parser.add_argument("--is_long_video", action='store_true', help="Use hierarchical sampler for long videos")
+ parser.add_argument("--seed", type=int, default=42, help="Random seed for generator")
+ parser.add_argument("--version", type=str, default='v10', choices=["v10", "v11"], help="ControlNet version")
+ parser.add_argument("--frame_rate", type=int, default=None, help="Frame rate of input video (default computed from video length)")
+ parser.add_argument("--temp_video_name", type=str, default=None, help="Default video name for single video mode")
+
+ args = parser.parse_args()
+ return args
+
+def process_video(prompt, video_path, output_path, condition, video_length, height, width, smoother_steps,
+ is_long_video, seed, version, frame_rate, temp_video_name, pipe, generator):
+ """Process a single video with the given parameters."""
+ # Ensure output directory exists
+ os.makedirs(output_path, exist_ok=True)
+
+ # Adjust height and width to be multiples of 32
+ height = (height // 32) * 32
+ width = (width // 32) * 32
+
+ # Step 1: Read the video
+ video = read_video(video_path=video_path, video_length=video_length, width=width, height=height, frame_rate=frame_rate)
+ original_pixels = rearrange(video, "(b f) c h w -> b c f h w", b=1)
+ save_videos_grid(original_pixels, os.path.join(output_path, f"source_{temp_video_name}"), rescale=True)
+
+ # Step 2: Parse video to conditional frames
+ processor = Processor(condition)
+ t2i_transform = torchvision.transforms.ToPILImage()
+ pil_annotation = [processor(t2i_transform(frame), to_pil=True) for frame in video]
+ video_cond = [np.array(p).astype(np.uint8) for p in pil_annotation]
+ imageio.mimsave(os.path.join(output_path, f"{condition}_condition_{temp_video_name}"), video_cond, fps=8)
+
+ # Free up memory
+ del processor
+ torch.cuda.empty_cache()
+
+ # Step 3: Inference
+ if is_long_video:
+ window_size = int(np.sqrt(video_length))
+ sample = pipe.generate_long_video(
+ prompt + POS_PROMPT, video_length=video_length, frames=pil_annotation,
+ num_inference_steps=50, smooth_steps=smoother_steps, window_size=window_size,
+ generator=generator, guidance_scale=12.5, negative_prompt=NEG_PROMPT,
+ width=width, height=height
+ ).videos
+ else:
+ sample = pipe(
+ prompt + POS_PROMPT, video_length=video_length, frames=pil_annotation,
+ num_inference_steps=50, smooth_steps=smoother_steps,
+ generator=generator, guidance_scale=12.5, negative_prompt=NEG_PROMPT,
+ width=width, height=height
+ ).videos
+
+ # Save the generated video
+ save_videos_grid(sample, os.path.join(output_path, temp_video_name))
+
+def main():
+ """Main function to handle both single and batch video processing."""
+ args = get_args()
+
+ # Load models (shared across all videos)
+ controlnet_dict = controlnet_dict_version[args.version]
+ tokenizer = CLIPTokenizer.from_pretrained(sd_path, subfolder="tokenizer")
+ text_encoder = CLIPTextModel.from_pretrained(sd_path, subfolder="text_encoder").to(dtype=torch.float16)
+ vae = AutoencoderKL.from_pretrained(sd_path, subfolder="vae").to(dtype=torch.float16)
+ unet = UNet3DConditionModel.from_pretrained_2d(sd_path, subfolder="unet").to(dtype=torch.float16)
+ controlnet = ControlNetModel3D.from_pretrained_2d(controlnet_dict[args.condition]).to(dtype=torch.float16)
+ interpolater = IFNet(ckpt_path=inter_path).to(dtype=torch.float16)
+ scheduler = DDIMScheduler.from_pretrained(sd_path, subfolder="scheduler")
+
+ pipe = ControlVideoPipeline(
+ vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet,
+ controlnet=controlnet, interpolater=interpolater, scheduler=scheduler,
+ )
+ pipe.enable_vae_slicing()
+ pipe.enable_xformers_memory_efficient_attention()
+ pipe.to(device)
+
+ generator = torch.Generator(device="cuda")
+ generator.manual_seed(args.seed)
+
+ if args.jsonl_path:
+ # Batch processing mode
+ with open(args.jsonl_path, 'r') as f:
+ for line in f:
+ try:
+ data = json.loads(line.strip())
+ prompt = data['edit_prompt']
+ video_filename = data['video']
+ video_path = os.path.join('/home/wangjuntong/video_editing_dataset/all_sourse/', video_filename)
+
+ # Process the video with the extracted parameters
+ process_video(
+ prompt=prompt,
+ video_path=video_path,
+ output_path=args.output_path,
+ condition=args.condition,
+ video_length=args.video_length,
+ height=args.height,
+ width=args.width,
+ smoother_steps=args.smoother_steps,
+ is_long_video=args.is_long_video,
+ seed=args.seed,
+ version=args.version,
+ frame_rate=args.frame_rate,
+ temp_video_name=video_filename, # Output name matches input video name
+ pipe=pipe,
+ generator=generator
+ )
+ print(f"Processed video: {video_filename}")
+ except Exception as e:
+ print(f"Error processing line '{line.strip()}': {e}")
+ else:
+ # Single video processing mode
+ if not args.prompt or not args.video_path:
+ raise ValueError("For single video mode, --prompt and --video_path are required.")
+ temp_video_name = args.temp_video_name if args.temp_video_name else "output.mp4"
+ process_video(
+ prompt=args.prompt,
+ video_path=args.video_path,
+ output_path=args.output_path,
+ condition=args.condition,
+ video_length=args.video_length,
+ height=args.height,
+ width=args.width,
+ smoother_steps=args.smoother_steps,
+ is_long_video=args.is_long_video,
+ seed=args.seed,
+ version=args.version,
+ frame_rate=args.frame_rate,
+ temp_video_name=temp_video_name,
+ pipe=pipe,
+ generator=generator
+ )
+ print(f"Processed single video: {temp_video_name}")
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/ControlVideo-master/inference.sh b/ControlVideo-master/inference.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d2aa3b76dfda65865491bb51e0e4aeeb1e68a175
--- /dev/null
+++ b/ControlVideo-master/inference.sh
@@ -0,0 +1,12 @@
+python inference.py \
+ --prompt "A striking mallard floats effortlessly on the sparkling pond." \
+ --condition "depth_midas" \
+ --video_path "data/mallard-water.mp4" \
+ --output_path "outputs/" \
+ --video_length 15 \
+ --smoother_steps 19 20 \
+ --width 512 \
+ --height 512 \
+ --frame_rate 2 \
+ --version v10 \
+ # --is_long_video
\ No newline at end of file
diff --git a/ControlVideo-master/models/RIFE/IFNet_HDv3.py b/ControlVideo-master/models/RIFE/IFNet_HDv3.py
new file mode 100644
index 0000000000000000000000000000000000000000..d57f0a2f0889fec5d68c52bf99bf2dbd91150381
--- /dev/null
+++ b/ControlVideo-master/models/RIFE/IFNet_HDv3.py
@@ -0,0 +1,130 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from diffusers import ModelMixin
+
+from .warplayer import warp
+
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
+ return nn.Sequential(
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
+ padding=padding, dilation=dilation, bias=True),
+ nn.PReLU(out_planes)
+ )
+
+def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
+ return nn.Sequential(
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
+ padding=padding, dilation=dilation, bias=False),
+ nn.BatchNorm2d(out_planes),
+ nn.PReLU(out_planes)
+ )
+
+def convert(param):
+ return {
+ k.replace("module.", ""): v
+ for k, v in param.items()
+ if "module." in k
+ }
+
+class IFBlock(nn.Module):
+ def __init__(self, in_planes, c=64):
+ super(IFBlock, self).__init__()
+ self.conv0 = nn.Sequential(
+ conv(in_planes, c//2, 3, 2, 1),
+ conv(c//2, c, 3, 2, 1),
+ )
+ self.convblock0 = nn.Sequential(
+ conv(c, c),
+ conv(c, c)
+ )
+ self.convblock1 = nn.Sequential(
+ conv(c, c),
+ conv(c, c)
+ )
+ self.convblock2 = nn.Sequential(
+ conv(c, c),
+ conv(c, c)
+ )
+ self.convblock3 = nn.Sequential(
+ conv(c, c),
+ conv(c, c)
+ )
+ self.conv1 = nn.Sequential(
+ nn.ConvTranspose2d(c, c//2, 4, 2, 1),
+ nn.PReLU(c//2),
+ nn.ConvTranspose2d(c//2, 4, 4, 2, 1),
+ )
+ self.conv2 = nn.Sequential(
+ nn.ConvTranspose2d(c, c//2, 4, 2, 1),
+ nn.PReLU(c//2),
+ nn.ConvTranspose2d(c//2, 1, 4, 2, 1),
+ )
+
+ def forward(self, x, flow, scale=1):
+ x = F.interpolate(x, scale_factor= 1. / scale, mode="bilinear", align_corners=False, recompute_scale_factor=False)
+ flow = F.interpolate(flow, scale_factor= 1. / scale, mode="bilinear", align_corners=False, recompute_scale_factor=False) * 1. / scale
+ feat = self.conv0(torch.cat((x, flow), 1))
+ feat = self.convblock0(feat) + feat
+ feat = self.convblock1(feat) + feat
+ feat = self.convblock2(feat) + feat
+ feat = self.convblock3(feat) + feat
+ flow = self.conv1(feat)
+ mask = self.conv2(feat)
+ flow = F.interpolate(flow, scale_factor=scale, mode="bilinear", align_corners=False, recompute_scale_factor=False) * scale
+ mask = F.interpolate(mask, scale_factor=scale, mode="bilinear", align_corners=False, recompute_scale_factor=False)
+ return flow, mask
+
+class IFNet(ModelMixin):
+ def __init__(self, ckpt_path="checkpoints/flownet.pkl"):
+ super(IFNet, self).__init__()
+ self.block0 = IFBlock(7+4, c=90)
+ self.block1 = IFBlock(7+4, c=90)
+ self.block2 = IFBlock(7+4, c=90)
+ self.block_tea = IFBlock(10+4, c=90)
+ if ckpt_path is not None:
+ self.load_state_dict(convert(torch.load(ckpt_path, map_location ='cpu')))
+
+ def inference(self, img0, img1, scale=1.0):
+ imgs = torch.cat((img0, img1), 1)
+ scale_list = [4/scale, 2/scale, 1/scale]
+ flow, mask, merged = self.forward(imgs, scale_list)
+ return merged[2]
+
+ def forward(self, x, scale_list=[4, 2, 1], training=False):
+ if training == False:
+ channel = x.shape[1] // 2
+ img0 = x[:, :channel]
+ img1 = x[:, channel:]
+ flow_list = []
+ merged = []
+ mask_list = []
+ warped_img0 = img0
+ warped_img1 = img1
+ flow = (x[:, :4]).detach() * 0
+ mask = (x[:, :1]).detach() * 0
+ loss_cons = 0
+ block = [self.block0, self.block1, self.block2]
+ for i in range(3):
+ f0, m0 = block[i](torch.cat((warped_img0[:, :3], warped_img1[:, :3], mask), 1), flow, scale=scale_list[i])
+ f1, m1 = block[i](torch.cat((warped_img1[:, :3], warped_img0[:, :3], -mask), 1), torch.cat((flow[:, 2:4], flow[:, :2]), 1), scale=scale_list[i])
+ flow = flow + (f0 + torch.cat((f1[:, 2:4], f1[:, :2]), 1)) / 2
+ mask = mask + (m0 + (-m1)) / 2
+ mask_list.append(mask)
+ flow_list.append(flow)
+ warped_img0 = warp(img0, flow[:, :2])
+ warped_img1 = warp(img1, flow[:, 2:4])
+ merged.append((warped_img0, warped_img1))
+ '''
+ c0 = self.contextnet(img0, flow[:, :2])
+ c1 = self.contextnet(img1, flow[:, 2:4])
+ tmp = self.unet(img0, img1, warped_img0, warped_img1, mask, flow, c0, c1)
+ res = tmp[:, 1:4] * 2 - 1
+ '''
+ for i in range(3):
+ mask_list[i] = torch.sigmoid(mask_list[i])
+ merged[i] = merged[i][0] * mask_list[i] + merged[i][1] * (1 - mask_list[i])
+ # merged[i] = torch.clamp(merged[i] + res, 0, 1)
+ return flow_list, mask_list[2], merged
diff --git a/ControlVideo-master/models/RIFE/__pycache__/IFNet_HDv3.cpython-310.pyc b/ControlVideo-master/models/RIFE/__pycache__/IFNet_HDv3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..83b5e831b675d6f490dd672a66dd75e10acb41e4
Binary files /dev/null and b/ControlVideo-master/models/RIFE/__pycache__/IFNet_HDv3.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/RIFE/__pycache__/warplayer.cpython-310.pyc b/ControlVideo-master/models/RIFE/__pycache__/warplayer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37d20fb45e92519dfaeb1f64ed09bff9998de24a
Binary files /dev/null and b/ControlVideo-master/models/RIFE/__pycache__/warplayer.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/RIFE/warplayer.py b/ControlVideo-master/models/RIFE/warplayer.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7b2617fd3aa36e116b34b60f3ab4071d8cfd355
--- /dev/null
+++ b/ControlVideo-master/models/RIFE/warplayer.py
@@ -0,0 +1,22 @@
+import torch
+import torch.nn as nn
+
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+backwarp_tenGrid = {}
+
+
+def warp(tenInput, tenFlow):
+ k = (str(tenFlow.device), str(tenFlow.size()))
+ if k not in backwarp_tenGrid:
+ tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3], device=device).view(
+ 1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
+ tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2], device=device).view(
+ 1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])
+ backwarp_tenGrid[k] = torch.cat(
+ [tenHorizontal, tenVertical], 1).to(device)
+
+ tenFlow = torch.cat([tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0),
+ tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0)], 1)
+
+ g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1).to(dtype=tenInput.dtype)
+ return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode='bilinear', padding_mode='border', align_corners=True)
diff --git a/ControlVideo-master/models/__pycache__/attention.cpython-310.pyc b/ControlVideo-master/models/__pycache__/attention.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b3b3403a0e62f5bd10df2fe9c36201177d525dd
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/attention.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/__pycache__/controlnet.cpython-310.pyc b/ControlVideo-master/models/__pycache__/controlnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7615ceb2ea0cb6e0e313da24d4b3db32bbfbd60b
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/controlnet.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/__pycache__/controlnet_attention.cpython-310.pyc b/ControlVideo-master/models/__pycache__/controlnet_attention.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b0c602df5313e4b48f98047016454bc2a3677a9f
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/controlnet_attention.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/__pycache__/controlnet_unet_blocks.cpython-310.pyc b/ControlVideo-master/models/__pycache__/controlnet_unet_blocks.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..596a9ca698518b6d80f6489eddc3282723cd03d2
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/controlnet_unet_blocks.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/__pycache__/pipeline_controlvideo.cpython-310.pyc b/ControlVideo-master/models/__pycache__/pipeline_controlvideo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4703821c936c293be8da511c4bd2c949d80d78c8
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/pipeline_controlvideo.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/__pycache__/resnet.cpython-310.pyc b/ControlVideo-master/models/__pycache__/resnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..76b174bee9876233a711345be6580a6498942579
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/resnet.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/__pycache__/unet.cpython-310.pyc b/ControlVideo-master/models/__pycache__/unet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..76e4428fbec2c307e140b0c571182cb5eb3e99cc
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/unet.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/__pycache__/unet_blocks.cpython-310.pyc b/ControlVideo-master/models/__pycache__/unet_blocks.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bdd610582d6defb3df78668fbf1d95b3439300be
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/unet_blocks.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/__pycache__/util.cpython-310.pyc b/ControlVideo-master/models/__pycache__/util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..508bf44f9b518171cbfd347eb342ee3ece765c38
Binary files /dev/null and b/ControlVideo-master/models/__pycache__/util.cpython-310.pyc differ
diff --git a/ControlVideo-master/models/attention.py b/ControlVideo-master/models/attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..955f8a768b843269fc300ae32dc9fbdb76787c75
--- /dev/null
+++ b/ControlVideo-master/models/attention.py
@@ -0,0 +1,478 @@
+# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py
+
+from dataclasses import dataclass
+from typing import Optional, Callable
+import math
+import torch
+import torch.nn.functional as F
+from torch import nn
+from positional_encodings.torch_encodings import PositionalEncoding2D
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers import ModelMixin
+from diffusers.utils import BaseOutput
+from diffusers.utils.import_utils import is_xformers_available
+from diffusers.models.attention import CrossAttention, FeedForward, AdaLayerNorm
+from einops import rearrange, repeat
+
+
+@dataclass
+class Transformer3DModelOutput(BaseOutput):
+ sample: torch.FloatTensor
+
+
+if is_xformers_available():
+ import xformers
+ import xformers.ops
+else:
+ xformers = None
+
+
+class Transformer3DModel(ModelMixin, ConfigMixin):
+ @register_to_config
+ def __init__(
+ self,
+ num_attention_heads: int = 16,
+ attention_head_dim: int = 88,
+ in_channels: Optional[int] = None,
+ num_layers: int = 1,
+ dropout: float = 0.0,
+ norm_num_groups: int = 32,
+ cross_attention_dim: Optional[int] = None,
+ attention_bias: bool = False,
+ activation_fn: str = "geglu",
+ num_embeds_ada_norm: Optional[int] = None,
+ use_linear_projection: bool = False,
+ only_cross_attention: bool = False,
+ upcast_attention: bool = False,
+ ):
+ super().__init__()
+ self.use_linear_projection = use_linear_projection
+ self.num_attention_heads = num_attention_heads
+ self.attention_head_dim = attention_head_dim
+ inner_dim = num_attention_heads * attention_head_dim
+
+ # Define input layers
+ self.in_channels = in_channels
+
+ self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
+ if use_linear_projection:
+ self.proj_in = nn.Linear(in_channels, inner_dim)
+ else:
+ self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
+
+ # Define transformers blocks
+ self.transformer_blocks = nn.ModuleList(
+ [
+ BasicTransformerBlock(
+ inner_dim,
+ num_attention_heads,
+ attention_head_dim,
+ dropout=dropout,
+ cross_attention_dim=cross_attention_dim,
+ activation_fn=activation_fn,
+ num_embeds_ada_norm=num_embeds_ada_norm,
+ attention_bias=attention_bias,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ )
+ for d in range(num_layers)
+ ]
+ )
+
+ # 4. Define output layers
+ if use_linear_projection:
+ self.proj_out = nn.Linear(in_channels, inner_dim)
+ else:
+ self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
+
+ def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True, \
+ inter_frame=False):
+ # Input
+ assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
+ video_length = hidden_states.shape[2]
+ hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
+ encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)
+
+ batch, channel, height, weight = hidden_states.shape
+ residual = hidden_states
+
+ hidden_states = self.norm(hidden_states)
+ if not self.use_linear_projection:
+ hidden_states = self.proj_in(hidden_states)
+ inner_dim = hidden_states.shape[1]
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
+ else:
+ inner_dim = hidden_states.shape[1]
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
+ hidden_states = self.proj_in(hidden_states)
+
+ # Blocks
+ for block in self.transformer_blocks:
+ hidden_states = block(
+ hidden_states,
+ encoder_hidden_states=encoder_hidden_states,
+ timestep=timestep,
+ video_length=video_length,
+ inter_frame=inter_frame
+ )
+
+ # Output
+ if not self.use_linear_projection:
+ hidden_states = (
+ hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
+ )
+ hidden_states = self.proj_out(hidden_states)
+ else:
+ hidden_states = self.proj_out(hidden_states)
+ hidden_states = (
+ hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
+ )
+
+ output = hidden_states + residual
+
+ output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
+ if not return_dict:
+ return (output,)
+
+ return Transformer3DModelOutput(sample=output)
+
+
+class BasicTransformerBlock(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_attention_heads: int,
+ attention_head_dim: int,
+ dropout=0.0,
+ cross_attention_dim: Optional[int] = None,
+ activation_fn: str = "geglu",
+ num_embeds_ada_norm: Optional[int] = None,
+ attention_bias: bool = False,
+ only_cross_attention: bool = False,
+ upcast_attention: bool = False,
+ ):
+ super().__init__()
+ self.only_cross_attention = only_cross_attention
+ self.use_ada_layer_norm = num_embeds_ada_norm is not None
+
+ # Fully
+ self.attn1 = FullyFrameAttention(
+ query_dim=dim,
+ heads=num_attention_heads,
+ dim_head=attention_head_dim,
+ dropout=dropout,
+ bias=attention_bias,
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
+ upcast_attention=upcast_attention,
+ )
+
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim)
+
+ # Cross-Attn
+ if cross_attention_dim is not None:
+ self.attn2 = CrossAttention(
+ query_dim=dim,
+ cross_attention_dim=cross_attention_dim,
+ heads=num_attention_heads,
+ dim_head=attention_head_dim,
+ dropout=dropout,
+ bias=attention_bias,
+ upcast_attention=upcast_attention,
+ )
+ else:
+ self.attn2 = None
+
+ if cross_attention_dim is not None:
+ self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim)
+ else:
+ self.norm2 = None
+
+ # Feed-forward
+ self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn)
+ self.norm3 = nn.LayerNorm(dim)
+
+ def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None):
+ if not is_xformers_available():
+ print("Here is how to install it")
+ raise ModuleNotFoundError(
+ "Refer to https://github.com/facebookresearch/xformers for more information on how to install"
+ " xformers",
+ name="xformers",
+ )
+ elif not torch.cuda.is_available():
+ raise ValueError(
+ "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is only"
+ " available for GPU "
+ )
+ else:
+ try:
+ # Make sure we can run the memory efficient attention
+ _ = xformers.ops.memory_efficient_attention(
+ torch.randn((1, 2, 40), device="cuda"),
+ torch.randn((1, 2, 40), device="cuda"),
+ torch.randn((1, 2, 40), device="cuda"),
+ )
+ except Exception as e:
+ raise e
+ self.attn1._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
+ if self.attn2 is not None:
+ self.attn2._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
+
+ def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, attention_mask=None, video_length=None, \
+ inter_frame=False):
+ # SparseCausal-Attention
+ norm_hidden_states = (
+ self.norm1(hidden_states, timestep) if self.use_ada_layer_norm else self.norm1(hidden_states)
+ )
+
+ if self.only_cross_attention:
+ hidden_states = (
+ self.attn1(norm_hidden_states, encoder_hidden_states, attention_mask=attention_mask, inter_frame=inter_frame) + hidden_states
+ )
+ else:
+ hidden_states = self.attn1(norm_hidden_states, attention_mask=attention_mask, video_length=video_length, inter_frame=inter_frame) + hidden_states
+
+ if self.attn2 is not None:
+ # Cross-Attention
+ norm_hidden_states = (
+ self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
+ )
+ hidden_states = (
+ self.attn2(
+ norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask
+ )
+ + hidden_states
+ )
+
+ # Feed-forward
+ hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
+
+ return hidden_states
+
+class FullyFrameAttention(nn.Module):
+ r"""
+ A cross attention layer.
+
+ Parameters:
+ query_dim (`int`): The number of channels in the query.
+ cross_attention_dim (`int`, *optional*):
+ The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.
+ heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention.
+ dim_head (`int`, *optional*, defaults to 64): The number of channels in each head.
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
+ bias (`bool`, *optional*, defaults to False):
+ Set to `True` for the query, key, and value linear layers to contain a bias parameter.
+ """
+
+ def __init__(
+ self,
+ query_dim: int,
+ cross_attention_dim: Optional[int] = None,
+ heads: int = 8,
+ dim_head: int = 64,
+ dropout: float = 0.0,
+ bias=False,
+ upcast_attention: bool = False,
+ upcast_softmax: bool = False,
+ added_kv_proj_dim: Optional[int] = None,
+ norm_num_groups: Optional[int] = None,
+ ):
+ super().__init__()
+ inner_dim = dim_head * heads
+ cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim
+ self.upcast_attention = upcast_attention
+ self.upcast_softmax = upcast_softmax
+
+ self.scale = dim_head**-0.5
+
+ self.heads = heads
+ # for slice_size > 0 the attention score computation
+ # is split across the batch axis to save memory
+ # You can set slice_size with `set_attention_slice`
+ self.sliceable_head_dim = heads
+ self._slice_size = None
+ self._use_memory_efficient_attention_xformers = False
+ self.added_kv_proj_dim = added_kv_proj_dim
+
+ if norm_num_groups is not None:
+ self.group_norm = nn.GroupNorm(num_channels=inner_dim, num_groups=norm_num_groups, eps=1e-5, affine=True)
+ else:
+ self.group_norm = None
+
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=bias)
+ self.to_k = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
+ self.to_v = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
+
+ if self.added_kv_proj_dim is not None:
+ self.add_k_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim)
+ self.add_v_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim)
+
+ self.to_out = nn.ModuleList([])
+ self.to_out.append(nn.Linear(inner_dim, query_dim))
+ self.to_out.append(nn.Dropout(dropout))
+
+ def reshape_heads_to_batch_dim(self, tensor):
+ batch_size, seq_len, dim = tensor.shape
+ head_size = self.heads
+ tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
+ tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size)
+ return tensor
+
+ def reshape_batch_dim_to_heads(self, tensor):
+ batch_size, seq_len, dim = tensor.shape
+ head_size = self.heads
+ tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
+ tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)
+ return tensor
+
+ def set_attention_slice(self, slice_size):
+ if slice_size is not None and slice_size > self.sliceable_head_dim:
+ raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.")
+
+ self._slice_size = slice_size
+
+ def _attention(self, query, key, value, attention_mask=None):
+ if self.upcast_attention:
+ query = query.float()
+ key = key.float()
+
+ attention_scores = torch.baddbmm(
+ torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device),
+ query,
+ key.transpose(-1, -2),
+ beta=0,
+ alpha=self.scale,
+ )
+ if attention_mask is not None:
+ attention_scores = attention_scores + attention_mask
+
+ if self.upcast_softmax:
+ attention_scores = attention_scores.float()
+
+ attention_probs = attention_scores.softmax(dim=-1)
+
+ # cast back to the original dtype
+ attention_probs = attention_probs.to(value.dtype)
+
+ # compute attention output
+ hidden_states = torch.bmm(attention_probs, value)
+
+ # reshape hidden_states
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
+ return hidden_states
+
+ def _sliced_attention(self, query, key, value, sequence_length, dim, attention_mask):
+ batch_size_attention = query.shape[0]
+ hidden_states = torch.zeros(
+ (batch_size_attention, sequence_length, dim // self.heads), device=query.device, dtype=query.dtype
+ )
+ slice_size = self._slice_size if self._slice_size is not None else hidden_states.shape[0]
+ for i in range(hidden_states.shape[0] // slice_size):
+ start_idx = i * slice_size
+ end_idx = (i + 1) * slice_size
+
+ query_slice = query[start_idx:end_idx]
+ key_slice = key[start_idx:end_idx]
+
+ if self.upcast_attention:
+ query_slice = query_slice.float()
+ key_slice = key_slice.float()
+
+ attn_slice = torch.baddbmm(
+ torch.empty(slice_size, query.shape[1], key.shape[1], dtype=query_slice.dtype, device=query.device),
+ query_slice,
+ key_slice.transpose(-1, -2),
+ beta=0,
+ alpha=self.scale,
+ )
+
+ if attention_mask is not None:
+ attn_slice = attn_slice + attention_mask[start_idx:end_idx]
+
+ if self.upcast_softmax:
+ attn_slice = attn_slice.float()
+
+ attn_slice = attn_slice.softmax(dim=-1)
+
+ # cast back to the original dtype
+ attn_slice = attn_slice.to(value.dtype)
+ attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx])
+
+ hidden_states[start_idx:end_idx] = attn_slice
+
+ # reshape hidden_states
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
+ return hidden_states
+
+ def _memory_efficient_attention_xformers(self, query, key, value, attention_mask):
+ # TODO attention_mask
+ query = query.contiguous()
+ key = key.contiguous()
+ value = value.contiguous()
+ hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
+ return hidden_states
+
+ def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None, inter_frame=False):
+ batch_size, sequence_length, _ = hidden_states.shape
+
+ encoder_hidden_states = encoder_hidden_states
+
+ if self.group_norm is not None:
+ hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = self.to_q(hidden_states) # (bf) x d(hw) x c
+ dim = query.shape[-1]
+
+ # All frames
+ query = rearrange(query, "(b f) d c -> b (f d) c", f=video_length)
+
+ query = self.reshape_heads_to_batch_dim(query)
+
+ if self.added_kv_proj_dim is not None:
+ raise NotImplementedError
+
+ encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
+ key = self.to_k(encoder_hidden_states)
+ value = self.to_v(encoder_hidden_states)
+
+ if inter_frame:
+ key = rearrange(key, "(b f) d c -> b f d c", f=video_length)[:, [0, -1]]
+ value = rearrange(value, "(b f) d c -> b f d c", f=video_length)[:, [0, -1]]
+ key = rearrange(key, "b f d c -> b (f d) c",)
+ value = rearrange(value, "b f d c -> b (f d) c")
+ else:
+ # All frames
+ key = rearrange(key, "(b f) d c -> b (f d) c", f=video_length)
+ value = rearrange(value, "(b f) d c -> b (f d) c", f=video_length)
+
+ key = self.reshape_heads_to_batch_dim(key)
+ value = self.reshape_heads_to_batch_dim(value)
+
+ if attention_mask is not None:
+ if attention_mask.shape[-1] != query.shape[1]:
+ target_length = query.shape[1]
+ attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
+ attention_mask = attention_mask.repeat_interleave(self.heads, dim=0)
+
+ # attention, what we cannot get enough of
+ if self._use_memory_efficient_attention_xformers:
+ hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
+ # Some versions of xformers return output in fp32, cast it back to the dtype of the input
+ hidden_states = hidden_states.to(query.dtype)
+ else:
+ if self._slice_size is None or query.shape[0] // self._slice_size == 1:
+ hidden_states = self._attention(query, key, value, attention_mask)
+ else:
+ hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask)
+
+ # linear proj
+ hidden_states = self.to_out[0](hidden_states)
+
+ # dropout
+ hidden_states = self.to_out[1](hidden_states)
+
+ # All frames
+ hidden_states = rearrange(hidden_states, "b (f d) c -> (b f) d c", f=video_length)
+ return hidden_states
diff --git a/ControlVideo-master/models/controlnet.py b/ControlVideo-master/models/controlnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cb510a767471ba718521eb475b8db43463963f6
--- /dev/null
+++ b/ControlVideo-master/models/controlnet.py
@@ -0,0 +1,605 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional, Tuple, Union
+import os
+import json
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.utils import BaseOutput, logging
+from diffusers.models.embeddings import TimestepEmbedding, Timesteps
+from diffusers import ModelMixin
+from .controlnet_unet_blocks import (
+ CrossAttnDownBlock3D,
+ DownBlock3D,
+ UNetMidBlock3DCrossAttn,
+ get_down_block,
+)
+from .resnet import InflatedConv3d
+
+from diffusers.models.unet_2d_condition import UNet2DConditionModel
+from diffusers.models.cross_attention import AttnProcessor
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+@dataclass
+class ControlNetOutput(BaseOutput):
+ down_block_res_samples: Tuple[torch.Tensor]
+ mid_block_res_sample: torch.Tensor
+
+
+class ControlNetConditioningEmbedding(nn.Module):
+ """
+ Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
+ [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
+ training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
+ convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
+ (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
+ model) to encode image-space conditions ... into feature maps ..."
+ """
+
+ def __init__(
+ self,
+ conditioning_embedding_channels: int,
+ conditioning_channels: int = 3,
+ block_out_channels: Tuple[int] = (16, 32, 96, 256),
+ ):
+ super().__init__()
+
+ self.conv_in = InflatedConv3d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
+
+ self.blocks = nn.ModuleList([])
+
+ for i in range(len(block_out_channels) - 1):
+ channel_in = block_out_channels[i]
+ channel_out = block_out_channels[i + 1]
+ self.blocks.append(InflatedConv3d(channel_in, channel_in, kernel_size=3, padding=1))
+ self.blocks.append(InflatedConv3d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
+
+ self.conv_out = zero_module(
+ InflatedConv3d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
+ )
+
+ def forward(self, conditioning):
+ embedding = self.conv_in(conditioning)
+ embedding = F.silu(embedding)
+
+ for block in self.blocks:
+ embedding = block(embedding)
+ embedding = F.silu(embedding)
+
+ embedding = self.conv_out(embedding)
+
+ return embedding
+
+
+class ControlNetModel3D(ModelMixin, ConfigMixin):
+ _supports_gradient_checkpointing = True
+
+ @register_to_config
+ def __init__(
+ self,
+ in_channels: int = 4,
+ flip_sin_to_cos: bool = True,
+ freq_shift: int = 0,
+ down_block_types: Tuple[str] = (
+ "CrossAttnDownBlock3D",
+ "CrossAttnDownBlock3D",
+ "CrossAttnDownBlock3D",
+ "DownBlock3D",
+ ),
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
+ layers_per_block: int = 2,
+ downsample_padding: int = 1,
+ mid_block_scale_factor: float = 1,
+ act_fn: str = "silu",
+ norm_num_groups: Optional[int] = 32,
+ norm_eps: float = 1e-5,
+ cross_attention_dim: int = 1280,
+ attention_head_dim: Union[int, Tuple[int]] = 8,
+ dual_cross_attention: bool = False,
+ use_linear_projection: bool = False,
+ class_embed_type: Optional[str] = None,
+ num_class_embeds: Optional[int] = None,
+ upcast_attention: bool = False,
+ resnet_time_scale_shift: str = "default",
+ projection_class_embeddings_input_dim: Optional[int] = None,
+ controlnet_conditioning_channel_order: str = "rgb",
+ conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
+ ):
+ super().__init__()
+
+ # Check inputs
+ if len(block_out_channels) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
+ )
+
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
+ )
+
+ if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
+ )
+
+ # input
+ conv_in_kernel = 3
+ conv_in_padding = (conv_in_kernel - 1) // 2
+ self.conv_in = InflatedConv3d(
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
+ )
+
+ # time
+ time_embed_dim = block_out_channels[0] * 4
+
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
+ timestep_input_dim = block_out_channels[0]
+
+ self.time_embedding = TimestepEmbedding(
+ timestep_input_dim,
+ time_embed_dim,
+ act_fn=act_fn,
+ )
+
+ # class embedding
+ if class_embed_type is None and num_class_embeds is not None:
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
+ elif class_embed_type == "timestep":
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
+ elif class_embed_type == "identity":
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
+ elif class_embed_type == "projection":
+ if projection_class_embeddings_input_dim is None:
+ raise ValueError(
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
+ )
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
+ # 2. it projects from an arbitrary input dimension.
+ #
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
+ else:
+ self.class_embedding = None
+
+ # control net conditioning embedding
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
+ conditioning_embedding_channels=block_out_channels[0],
+ block_out_channels=conditioning_embedding_out_channels,
+ )
+
+ self.down_blocks = nn.ModuleList([])
+ self.controlnet_down_blocks = nn.ModuleList([])
+
+ if isinstance(only_cross_attention, bool):
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
+
+ if isinstance(attention_head_dim, int):
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
+
+ # down
+ output_channel = block_out_channels[0]
+
+ controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)
+ controlnet_block = zero_module(controlnet_block)
+ self.controlnet_down_blocks.append(controlnet_block)
+
+ for i, down_block_type in enumerate(down_block_types):
+ input_channel = output_channel
+ output_channel = block_out_channels[i]
+ is_final_block = i == len(block_out_channels) - 1
+
+ down_block = get_down_block(
+ down_block_type,
+ num_layers=layers_per_block,
+ in_channels=input_channel,
+ out_channels=output_channel,
+ temb_channels=time_embed_dim,
+ add_downsample=not is_final_block,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=attention_head_dim[i],
+ downsample_padding=downsample_padding,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention[i],
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ self.down_blocks.append(down_block)
+
+ for _ in range(layers_per_block):
+ controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)
+ controlnet_block = zero_module(controlnet_block)
+ self.controlnet_down_blocks.append(controlnet_block)
+
+ if not is_final_block:
+ controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)
+ controlnet_block = zero_module(controlnet_block)
+ self.controlnet_down_blocks.append(controlnet_block)
+
+ # mid
+ mid_block_channel = block_out_channels[-1]
+
+ controlnet_block = InflatedConv3d(mid_block_channel, mid_block_channel, kernel_size=1)
+ controlnet_block = zero_module(controlnet_block)
+ self.controlnet_mid_block = controlnet_block
+
+ # mid
+ self.mid_block = UNetMidBlock3DCrossAttn(
+ in_channels=block_out_channels[-1],
+ temb_channels=time_embed_dim,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ output_scale_factor=mid_block_scale_factor,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=attention_head_dim[-1],
+ resnet_groups=norm_num_groups,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ upcast_attention=upcast_attention,
+ )
+
+ @classmethod
+ def from_unet(
+ cls,
+ unet: UNet2DConditionModel,
+ controlnet_conditioning_channel_order: str = "rgb",
+ conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
+ load_weights_from_unet: bool = True,
+ ):
+ r"""
+ Instantiate Controlnet class from UNet2DConditionModel.
+
+ Parameters:
+ unet (`UNet2DConditionModel`):
+ UNet model which weights are copied to the ControlNet. Note that all configuration options are also
+ copied where applicable.
+ """
+ controlnet = cls(
+ in_channels=unet.config.in_channels,
+ flip_sin_to_cos=unet.config.flip_sin_to_cos,
+ freq_shift=unet.config.freq_shift,
+ down_block_types=unet.config.down_block_types,
+ only_cross_attention=unet.config.only_cross_attention,
+ block_out_channels=unet.config.block_out_channels,
+ layers_per_block=unet.config.layers_per_block,
+ downsample_padding=unet.config.downsample_padding,
+ mid_block_scale_factor=unet.config.mid_block_scale_factor,
+ act_fn=unet.config.act_fn,
+ norm_num_groups=unet.config.norm_num_groups,
+ norm_eps=unet.config.norm_eps,
+ cross_attention_dim=unet.config.cross_attention_dim,
+ attention_head_dim=unet.config.attention_head_dim,
+ use_linear_projection=unet.config.use_linear_projection,
+ class_embed_type=unet.config.class_embed_type,
+ num_class_embeds=unet.config.num_class_embeds,
+ upcast_attention=unet.config.upcast_attention,
+ resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
+ projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
+ )
+
+ if load_weights_from_unet:
+ controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())
+ controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())
+ controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())
+
+ if controlnet.class_embedding:
+ controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())
+
+ controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())
+ controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())
+
+ return controlnet
+
+ @property
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
+ def attn_processors(self) -> Dict[str, AttnProcessor]:
+ r"""
+ Returns:
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
+ indexed by its weight name.
+ """
+ # set recursively
+ processors = {}
+
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttnProcessor]):
+ if hasattr(module, "set_processor"):
+ processors[f"{name}.processor"] = module.processor
+
+ for sub_name, child in module.named_children():
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
+
+ return processors
+
+ for name, module in self.named_children():
+ fn_recursive_add_processors(name, module, processors)
+
+ return processors
+
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
+ def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]):
+ r"""
+ Parameters:
+ `processor (`dict` of `AttnProcessor` or `AttnProcessor`):
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
+ of **all** `Attention` layers.
+ In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:
+
+ """
+ count = len(self.attn_processors.keys())
+
+ if isinstance(processor, dict) and len(processor) != count:
+ raise ValueError(
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
+ )
+
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
+ if hasattr(module, "set_processor"):
+ if not isinstance(processor, dict):
+ module.set_processor(processor)
+ else:
+ module.set_processor(processor.pop(f"{name}.processor"))
+
+ for sub_name, child in module.named_children():
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
+
+ for name, module in self.named_children():
+ fn_recursive_attn_processor(name, module, processor)
+
+ # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice
+ def set_attention_slice(self, slice_size):
+ r"""
+ Enable sliced attention computation.
+
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
+
+ Args:
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
+ `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
+ must be a multiple of `slice_size`.
+ """
+ sliceable_head_dims = []
+
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
+ if hasattr(module, "set_attention_slice"):
+ sliceable_head_dims.append(module.sliceable_head_dim)
+
+ for child in module.children():
+ fn_recursive_retrieve_sliceable_dims(child)
+
+ # retrieve number of attention layers
+ for module in self.children():
+ fn_recursive_retrieve_sliceable_dims(module)
+
+ num_sliceable_layers = len(sliceable_head_dims)
+
+ if slice_size == "auto":
+ # half the attention head size is usually a good trade-off between
+ # speed and memory
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
+ elif slice_size == "max":
+ # make smallest slice possible
+ slice_size = num_sliceable_layers * [1]
+
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
+
+ if len(slice_size) != len(sliceable_head_dims):
+ raise ValueError(
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
+ )
+
+ for i in range(len(slice_size)):
+ size = slice_size[i]
+ dim = sliceable_head_dims[i]
+ if size is not None and size > dim:
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
+
+ # Recursively walk through all the children.
+ # Any children which exposes the set_attention_slice method
+ # gets the message
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
+ if hasattr(module, "set_attention_slice"):
+ module.set_attention_slice(slice_size.pop())
+
+ for child in module.children():
+ fn_recursive_set_attention_slice(child, slice_size)
+
+ reversed_slice_size = list(reversed(slice_size))
+ for module in self.children():
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D)):
+ module.gradient_checkpointing = value
+
+ def forward(
+ self,
+ sample: torch.FloatTensor,
+ timestep: Union[torch.Tensor, float, int],
+ encoder_hidden_states: torch.Tensor,
+ controlnet_cond: torch.FloatTensor,
+ conditioning_scale: float = 1.0,
+ class_labels: Optional[torch.Tensor] = None,
+ timestep_cond: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ return_dict: bool = True,
+ ) -> Union[ControlNetOutput, Tuple]:
+ # check channel order
+ channel_order = self.config.controlnet_conditioning_channel_order
+
+ if channel_order == "rgb":
+ # in rgb order by default
+ ...
+ elif channel_order == "bgr":
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
+ else:
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
+
+ # prepare attention_mask
+ if attention_mask is not None:
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
+ attention_mask = attention_mask.unsqueeze(1)
+
+ # 1. time
+ timesteps = timestep
+ if not torch.is_tensor(timesteps):
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
+ # This would be a good case for the `match` statement (Python 3.10+)
+ is_mps = sample.device.type == "mps"
+ if isinstance(timestep, float):
+ dtype = torch.float32 if is_mps else torch.float64
+ else:
+ dtype = torch.int32 if is_mps else torch.int64
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
+ elif len(timesteps.shape) == 0:
+ timesteps = timesteps[None].to(sample.device)
+
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
+ timesteps = timesteps.expand(sample.shape[0])
+
+ t_emb = self.time_proj(timesteps)
+
+ # timesteps does not contain any weights and will always return f32 tensors
+ # but time_embedding might actually be running in fp16. so we need to cast here.
+ # there might be better ways to encapsulate this.
+ t_emb = t_emb.to(dtype=self.dtype)
+
+ emb = self.time_embedding(t_emb, timestep_cond)
+
+ if self.class_embedding is not None:
+ if class_labels is None:
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
+
+ if self.config.class_embed_type == "timestep":
+ class_labels = self.time_proj(class_labels)
+
+ class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
+ emb = emb + class_emb
+
+ # 2. pre-process
+ sample = self.conv_in(sample)
+
+ controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
+
+ sample += controlnet_cond
+
+ # 3. down
+ down_block_res_samples = (sample,)
+ for downsample_block in self.down_blocks:
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
+ sample, res_samples = downsample_block(
+ hidden_states=sample,
+ temb=emb,
+ encoder_hidden_states=encoder_hidden_states,
+ attention_mask=attention_mask,
+ cross_attention_kwargs=cross_attention_kwargs,
+ )
+ else:
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
+
+ down_block_res_samples += res_samples
+
+ # 4. mid
+ if self.mid_block is not None:
+ sample = self.mid_block(
+ sample,
+ emb,
+ encoder_hidden_states=encoder_hidden_states,
+ attention_mask=attention_mask,
+ cross_attention_kwargs=cross_attention_kwargs,
+ )
+
+ # 5. Control net blocks
+
+ controlnet_down_block_res_samples = ()
+
+ for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
+ down_block_res_sample = controlnet_block(down_block_res_sample)
+ controlnet_down_block_res_samples += (down_block_res_sample,)
+
+ down_block_res_samples = controlnet_down_block_res_samples
+
+ mid_block_res_sample = self.controlnet_mid_block(sample)
+
+ # 6. scaling
+ down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
+ mid_block_res_sample *= conditioning_scale
+
+ if not return_dict:
+ return (down_block_res_samples, mid_block_res_sample)
+
+ return ControlNetOutput(
+ down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
+ )
+
+ @classmethod
+ def from_pretrained_2d(cls, pretrained_model_path, control_path=None):
+ config_file = os.path.join(pretrained_model_path, 'config.json')
+ if not os.path.isfile(config_file):
+ raise RuntimeError(f"{config_file} does not exist")
+ with open(config_file, "r") as f:
+ config = json.load(f)
+ config["_class_name"] = cls.__name__
+ config["down_block_types"] = [
+ "CrossAttnDownBlock3D",
+ "CrossAttnDownBlock3D",
+ "CrossAttnDownBlock3D",
+ "DownBlock3D"
+ ]
+
+ from diffusers.utils import WEIGHTS_NAME
+ model = cls.from_config(config)
+ if control_path is None:
+ model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
+ state_dict = torch.load(model_file, map_location="cpu")
+ else:
+ model_file = control_path
+ state_dict = torch.load(model_file, map_location="cpu")
+ state_dict = {k[14:]: state_dict[k] for k in state_dict.keys()}
+
+
+ for k, v in model.state_dict().items():
+ if '_temp.' in k:
+ state_dict.update({k: v})
+ model.load_state_dict(state_dict)
+
+ return model
+
+def zero_module(module):
+ for p in module.parameters():
+ nn.init.zeros_(p)
+ return module
diff --git a/ControlVideo-master/models/controlnet_attention.py b/ControlVideo-master/models/controlnet_attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..e45cde9a508b3b81c4359b3220aedf4d26edb3c5
--- /dev/null
+++ b/ControlVideo-master/models/controlnet_attention.py
@@ -0,0 +1,483 @@
+# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py
+
+from dataclasses import dataclass
+from typing import Optional, Callable
+import math
+import torch
+import torch.nn.functional as F
+from torch import nn
+from positional_encodings.torch_encodings import PositionalEncoding2D
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers import ModelMixin
+from diffusers.utils import BaseOutput
+from diffusers.utils.import_utils import is_xformers_available
+from diffusers.models.attention import CrossAttention, FeedForward, AdaLayerNorm
+from einops import rearrange, repeat
+
+
+@dataclass
+class Transformer3DModelOutput(BaseOutput):
+ sample: torch.FloatTensor
+
+
+if is_xformers_available():
+ import xformers
+ import xformers.ops
+else:
+ xformers = None
+
+
+class Transformer3DModel(ModelMixin, ConfigMixin):
+ @register_to_config
+ def __init__(
+ self,
+ num_attention_heads: int = 16,
+ attention_head_dim: int = 88,
+ in_channels: Optional[int] = None,
+ num_layers: int = 1,
+ dropout: float = 0.0,
+ norm_num_groups: int = 32,
+ cross_attention_dim: Optional[int] = None,
+ attention_bias: bool = False,
+ activation_fn: str = "geglu",
+ num_embeds_ada_norm: Optional[int] = None,
+ use_linear_projection: bool = False,
+ only_cross_attention: bool = False,
+ upcast_attention: bool = False,
+ ):
+ super().__init__()
+ self.use_linear_projection = use_linear_projection
+ self.num_attention_heads = num_attention_heads
+ self.attention_head_dim = attention_head_dim
+ inner_dim = num_attention_heads * attention_head_dim
+
+ # Define input layers
+ self.in_channels = in_channels
+
+ self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
+ if use_linear_projection:
+ self.proj_in = nn.Linear(in_channels, inner_dim)
+ else:
+ self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
+
+ # Define transformers blocks
+ self.transformer_blocks = nn.ModuleList(
+ [
+ BasicTransformerBlock(
+ inner_dim,
+ num_attention_heads,
+ attention_head_dim,
+ dropout=dropout,
+ cross_attention_dim=cross_attention_dim,
+ activation_fn=activation_fn,
+ num_embeds_ada_norm=num_embeds_ada_norm,
+ attention_bias=attention_bias,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ )
+ for d in range(num_layers)
+ ]
+ )
+
+ # 4. Define output layers
+ if use_linear_projection:
+ self.proj_out = nn.Linear(in_channels, inner_dim)
+ else:
+ self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
+
+ def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):
+ # Input
+ assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
+ video_length = hidden_states.shape[2]
+ hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
+ encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)
+
+ batch, channel, height, weight = hidden_states.shape
+ residual = hidden_states
+
+ hidden_states = self.norm(hidden_states)
+ if not self.use_linear_projection:
+ hidden_states = self.proj_in(hidden_states)
+ inner_dim = hidden_states.shape[1]
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
+ else:
+ inner_dim = hidden_states.shape[1]
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
+ hidden_states = self.proj_in(hidden_states)
+
+ # Blocks
+ for block in self.transformer_blocks:
+ hidden_states = block(
+ hidden_states,
+ encoder_hidden_states=encoder_hidden_states,
+ timestep=timestep,
+ video_length=video_length
+ )
+
+ # Output
+ if not self.use_linear_projection:
+ hidden_states = (
+ hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
+ )
+ hidden_states = self.proj_out(hidden_states)
+ else:
+ hidden_states = self.proj_out(hidden_states)
+ hidden_states = (
+ hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
+ )
+
+ output = hidden_states + residual
+
+ output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
+ if not return_dict:
+ return (output,)
+
+ return Transformer3DModelOutput(sample=output)
+
+
+class BasicTransformerBlock(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_attention_heads: int,
+ attention_head_dim: int,
+ dropout=0.0,
+ cross_attention_dim: Optional[int] = None,
+ activation_fn: str = "geglu",
+ num_embeds_ada_norm: Optional[int] = None,
+ attention_bias: bool = False,
+ only_cross_attention: bool = False,
+ upcast_attention: bool = False,
+ ):
+ super().__init__()
+ self.only_cross_attention = only_cross_attention
+ self.use_ada_layer_norm = num_embeds_ada_norm is not None
+
+ # Individual-Attn
+ self.attn1 = IndividualAttention(
+ query_dim=dim,
+ heads=num_attention_heads,
+ dim_head=attention_head_dim,
+ dropout=dropout,
+ bias=attention_bias,
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
+ upcast_attention=upcast_attention,
+ )
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim)
+
+ # Cross-Attn
+ if cross_attention_dim is not None:
+ self.attn2 = CrossAttention(
+ query_dim=dim,
+ cross_attention_dim=cross_attention_dim,
+ heads=num_attention_heads,
+ dim_head=attention_head_dim,
+ dropout=dropout,
+ bias=attention_bias,
+ upcast_attention=upcast_attention,
+ )
+ else:
+ self.attn2 = None
+
+ if cross_attention_dim is not None:
+ self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim)
+ else:
+ self.norm2 = None
+
+ # Feed-forward
+ self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn)
+ self.norm3 = nn.LayerNorm(dim)
+
+ self.norm_temp = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim)
+
+ def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None):
+ if not is_xformers_available():
+ print("Here is how to install it")
+ raise ModuleNotFoundError(
+ "Refer to https://github.com/facebookresearch/xformers for more information on how to install"
+ " xformers",
+ name="xformers",
+ )
+ elif not torch.cuda.is_available():
+ raise ValueError(
+ "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is only"
+ " available for GPU "
+ )
+ else:
+ try:
+ # Make sure we can run the memory efficient attention
+ _ = xformers.ops.memory_efficient_attention(
+ torch.randn((1, 2, 40), device="cuda"),
+ torch.randn((1, 2, 40), device="cuda"),
+ torch.randn((1, 2, 40), device="cuda"),
+ )
+ except Exception as e:
+ raise e
+ self.attn1._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
+ if self.attn2 is not None:
+ self.attn2._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
+ # self.attn_temp._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
+
+ def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, attention_mask=None, video_length=None):
+ # Individual-Attention
+ norm_hidden_states = (
+ self.norm1(hidden_states, timestep) if self.use_ada_layer_norm else self.norm1(hidden_states)
+ )
+
+ if self.only_cross_attention:
+ hidden_states = (
+ self.attn1(norm_hidden_states, encoder_hidden_states, attention_mask=attention_mask) + hidden_states
+ )
+ else:
+ hidden_states = self.attn1(norm_hidden_states, attention_mask=attention_mask, video_length=video_length) + hidden_states
+
+ if self.attn2 is not None:
+ # Cross-Attention
+ norm_hidden_states = (
+ self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
+ )
+ hidden_states = (
+ self.attn2(
+ norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask
+ )
+ + hidden_states
+ )
+
+ # Feed-forward
+ hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
+
+ # # Temporal-Attention
+ # d = hidden_states.shape[1]
+ # hidden_states = rearrange(hidden_states, "(b f) d c -> (b d) f c", f=video_length)
+ # norm_hidden_states = (
+ # self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)
+ # )
+ # hidden_states = self.attn_temp(norm_hidden_states) + hidden_states
+ # hidden_states = rearrange(hidden_states, "(b d) f c -> (b f) d c", d=d)
+
+ return hidden_states
+
+class IndividualAttention(nn.Module):
+ r"""
+ A cross attention layer.
+
+ Parameters:
+ query_dim (`int`): The number of channels in the query.
+ cross_attention_dim (`int`, *optional*):
+ The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.
+ heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention.
+ dim_head (`int`, *optional*, defaults to 64): The number of channels in each head.
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
+ bias (`bool`, *optional*, defaults to False):
+ Set to `True` for the query, key, and value linear layers to contain a bias parameter.
+ """
+
+ def __init__(
+ self,
+ query_dim: int,
+ cross_attention_dim: Optional[int] = None,
+ heads: int = 8,
+ dim_head: int = 64,
+ dropout: float = 0.0,
+ bias=False,
+ upcast_attention: bool = False,
+ upcast_softmax: bool = False,
+ added_kv_proj_dim: Optional[int] = None,
+ norm_num_groups: Optional[int] = None,
+ ):
+ super().__init__()
+ inner_dim = dim_head * heads
+ cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim
+ self.upcast_attention = upcast_attention
+ self.upcast_softmax = upcast_softmax
+
+ self.scale = dim_head**-0.5
+
+ self.heads = heads
+ # for slice_size > 0 the attention score computation
+ # is split across the batch axis to save memory
+ # You can set slice_size with `set_attention_slice`
+ self.sliceable_head_dim = heads
+ self._slice_size = None
+ self._use_memory_efficient_attention_xformers = False
+ self.added_kv_proj_dim = added_kv_proj_dim
+
+ if norm_num_groups is not None:
+ self.group_norm = nn.GroupNorm(num_channels=inner_dim, num_groups=norm_num_groups, eps=1e-5, affine=True)
+ else:
+ self.group_norm = None
+
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=bias)
+ self.to_k = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
+ self.to_v = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
+
+ if self.added_kv_proj_dim is not None:
+ self.add_k_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim)
+ self.add_v_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim)
+
+ self.to_out = nn.ModuleList([])
+ self.to_out.append(nn.Linear(inner_dim, query_dim))
+ self.to_out.append(nn.Dropout(dropout))
+
+ def reshape_heads_to_batch_dim(self, tensor):
+ batch_size, seq_len, dim = tensor.shape
+ head_size = self.heads
+ tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
+ tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size)
+ return tensor
+
+ def reshape_batch_dim_to_heads(self, tensor):
+ batch_size, seq_len, dim = tensor.shape
+ head_size = self.heads
+ tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
+ tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)
+ return tensor
+
+ def set_attention_slice(self, slice_size):
+ if slice_size is not None and slice_size > self.sliceable_head_dim:
+ raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.")
+
+ self._slice_size = slice_size
+
+ def _attention(self, query, key, value, attention_mask=None):
+ if self.upcast_attention:
+ query = query.float()
+ key = key.float()
+
+ attention_scores = torch.baddbmm(
+ torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device),
+ query,
+ key.transpose(-1, -2),
+ beta=0,
+ alpha=self.scale,
+ )
+
+ if attention_mask is not None:
+ attention_scores = attention_scores + attention_mask
+
+ if self.upcast_softmax:
+ attention_scores = attention_scores.float()
+
+ attention_probs = attention_scores.softmax(dim=-1)
+
+ # cast back to the original dtype
+ attention_probs = attention_probs.to(value.dtype)
+
+ # compute attention output
+ hidden_states = torch.bmm(attention_probs, value)
+
+ # reshape hidden_states
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
+ return hidden_states
+
+ def _sliced_attention(self, query, key, value, sequence_length, dim, attention_mask):
+ batch_size_attention = query.shape[0]
+ hidden_states = torch.zeros(
+ (batch_size_attention, sequence_length, dim // self.heads), device=query.device, dtype=query.dtype
+ )
+ slice_size = self._slice_size if self._slice_size is not None else hidden_states.shape[0]
+ for i in range(hidden_states.shape[0] // slice_size):
+ start_idx = i * slice_size
+ end_idx = (i + 1) * slice_size
+
+ query_slice = query[start_idx:end_idx]
+ key_slice = key[start_idx:end_idx]
+
+ if self.upcast_attention:
+ query_slice = query_slice.float()
+ key_slice = key_slice.float()
+
+ attn_slice = torch.baddbmm(
+ torch.empty(slice_size, query.shape[1], key.shape[1], dtype=query_slice.dtype, device=query.device),
+ query_slice,
+ key_slice.transpose(-1, -2),
+ beta=0,
+ alpha=self.scale,
+ )
+
+ if attention_mask is not None:
+ attn_slice = attn_slice + attention_mask[start_idx:end_idx]
+
+ if self.upcast_softmax:
+ attn_slice = attn_slice.float()
+
+ attn_slice = attn_slice.softmax(dim=-1)
+
+ # cast back to the original dtype
+ attn_slice = attn_slice.to(value.dtype)
+ attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx])
+
+ hidden_states[start_idx:end_idx] = attn_slice
+
+ # reshape hidden_states
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
+ return hidden_states
+
+ def _memory_efficient_attention_xformers(self, query, key, value, attention_mask):
+ # TODO attention_mask
+ query = query.contiguous()
+ key = key.contiguous()
+ value = value.contiguous()
+ hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
+ return hidden_states
+
+ def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None):
+ batch_size, sequence_length, _ = hidden_states.shape
+
+ encoder_hidden_states = encoder_hidden_states
+
+ if self.group_norm is not None:
+ hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = self.to_q(hidden_states) # (bf) x d(hw) x c
+ dim = query.shape[-1]
+
+ query = self.reshape_heads_to_batch_dim(query)
+
+ if self.added_kv_proj_dim is not None:
+ raise NotImplementedError
+
+ encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
+ key = self.to_k(encoder_hidden_states)
+ value = self.to_v(encoder_hidden_states)
+
+ curr_frame_index = torch.arange(video_length)
+
+ key = rearrange(key, "(b f) d c -> b f d c", f=video_length)
+
+ key = key[:, curr_frame_index]
+ key = rearrange(key, "b f d c -> (b f) d c")
+
+ value = rearrange(value, "(b f) d c -> b f d c", f=video_length)
+
+ value = value[:, curr_frame_index]
+ value = rearrange(value, "b f d c -> (b f) d c")
+
+ key = self.reshape_heads_to_batch_dim(key)
+ value = self.reshape_heads_to_batch_dim(value)
+
+ if attention_mask is not None:
+ if attention_mask.shape[-1] != query.shape[1]:
+ target_length = query.shape[1]
+ attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
+ attention_mask = attention_mask.repeat_interleave(self.heads, dim=0)
+
+ # attention, what we cannot get enough of
+ if self._use_memory_efficient_attention_xformers:
+ hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
+ # Some versions of xformers return output in fp32, cast it back to the dtype of the input
+ hidden_states = hidden_states.to(query.dtype)
+ else:
+ if self._slice_size is None or query.shape[0] // self._slice_size == 1:
+ hidden_states = self._attention(query, key, value, attention_mask)
+ else:
+ hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask)
+
+ # linear proj
+ hidden_states = self.to_out[0](hidden_states)
+
+ # dropout
+ hidden_states = self.to_out[1](hidden_states)
+ return hidden_states
diff --git a/ControlVideo-master/models/controlnet_unet_blocks.py b/ControlVideo-master/models/controlnet_unet_blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..75a3bfb5d7994a682fe8896180dd614910a69a07
--- /dev/null
+++ b/ControlVideo-master/models/controlnet_unet_blocks.py
@@ -0,0 +1,589 @@
+# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py
+
+import torch
+from torch import nn
+
+from .controlnet_attention import Transformer3DModel
+from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
+
+
+def get_down_block(
+ down_block_type,
+ num_layers,
+ in_channels,
+ out_channels,
+ temb_channels,
+ add_downsample,
+ resnet_eps,
+ resnet_act_fn,
+ attn_num_head_channels,
+ resnet_groups=None,
+ cross_attention_dim=None,
+ downsample_padding=None,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ only_cross_attention=False,
+ upcast_attention=False,
+ resnet_time_scale_shift="default",
+):
+ down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
+ if down_block_type == "DownBlock3D":
+ return DownBlock3D(
+ num_layers=num_layers,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ add_downsample=add_downsample,
+ resnet_eps=resnet_eps,
+ resnet_act_fn=resnet_act_fn,
+ resnet_groups=resnet_groups,
+ downsample_padding=downsample_padding,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ elif down_block_type == "CrossAttnDownBlock3D":
+ if cross_attention_dim is None:
+ raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
+ return CrossAttnDownBlock3D(
+ num_layers=num_layers,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ add_downsample=add_downsample,
+ resnet_eps=resnet_eps,
+ resnet_act_fn=resnet_act_fn,
+ resnet_groups=resnet_groups,
+ downsample_padding=downsample_padding,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=attn_num_head_channels,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ raise ValueError(f"{down_block_type} does not exist.")
+
+
+def get_up_block(
+ up_block_type,
+ num_layers,
+ in_channels,
+ out_channels,
+ prev_output_channel,
+ temb_channels,
+ add_upsample,
+ resnet_eps,
+ resnet_act_fn,
+ attn_num_head_channels,
+ resnet_groups=None,
+ cross_attention_dim=None,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ only_cross_attention=False,
+ upcast_attention=False,
+ resnet_time_scale_shift="default",
+):
+ up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
+ if up_block_type == "UpBlock3D":
+ return UpBlock3D(
+ num_layers=num_layers,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ prev_output_channel=prev_output_channel,
+ temb_channels=temb_channels,
+ add_upsample=add_upsample,
+ resnet_eps=resnet_eps,
+ resnet_act_fn=resnet_act_fn,
+ resnet_groups=resnet_groups,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ elif up_block_type == "CrossAttnUpBlock3D":
+ if cross_attention_dim is None:
+ raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
+ return CrossAttnUpBlock3D(
+ num_layers=num_layers,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ prev_output_channel=prev_output_channel,
+ temb_channels=temb_channels,
+ add_upsample=add_upsample,
+ resnet_eps=resnet_eps,
+ resnet_act_fn=resnet_act_fn,
+ resnet_groups=resnet_groups,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=attn_num_head_channels,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ raise ValueError(f"{up_block_type} does not exist.")
+
+
+class UNetMidBlock3DCrossAttn(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ attn_num_head_channels=1,
+ output_scale_factor=1.0,
+ cross_attention_dim=1280,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ upcast_attention=False,
+ ):
+ super().__init__()
+
+ self.has_cross_attention = True
+ self.attn_num_head_channels = attn_num_head_channels
+ resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
+
+ # there is always at least one resnet
+ resnets = [
+ ResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ ]
+ attentions = []
+
+ for _ in range(num_layers):
+ if dual_cross_attention:
+ raise NotImplementedError
+ attentions.append(
+ Transformer3DModel(
+ attn_num_head_channels,
+ in_channels // attn_num_head_channels,
+ in_channels=in_channels,
+ num_layers=1,
+ cross_attention_dim=cross_attention_dim,
+ norm_num_groups=resnet_groups,
+ use_linear_projection=use_linear_projection,
+ upcast_attention=upcast_attention,
+ )
+ )
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+
+ self.attentions = nn.ModuleList(attentions)
+ self.resnets = nn.ModuleList(resnets)
+
+ def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None):
+ hidden_states = self.resnets[0](hidden_states, temb)
+ for attn, resnet in zip(self.attentions, self.resnets[1:]):
+ hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
+ hidden_states = resnet(hidden_states, temb)
+
+ return hidden_states
+
+
+class CrossAttnDownBlock3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ attn_num_head_channels=1,
+ cross_attention_dim=1280,
+ output_scale_factor=1.0,
+ downsample_padding=1,
+ add_downsample=True,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ only_cross_attention=False,
+ upcast_attention=False,
+ ):
+ super().__init__()
+ resnets = []
+ attentions = []
+
+ self.has_cross_attention = True
+ self.attn_num_head_channels = attn_num_head_channels
+
+ for i in range(num_layers):
+ in_channels = in_channels if i == 0 else out_channels
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+ if dual_cross_attention:
+ raise NotImplementedError
+ attentions.append(
+ Transformer3DModel(
+ attn_num_head_channels,
+ out_channels // attn_num_head_channels,
+ in_channels=out_channels,
+ num_layers=1,
+ cross_attention_dim=cross_attention_dim,
+ norm_num_groups=resnet_groups,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ )
+ )
+ self.attentions = nn.ModuleList(attentions)
+ self.resnets = nn.ModuleList(resnets)
+
+ if add_downsample:
+ self.downsamplers = nn.ModuleList(
+ [
+ Downsample3D(
+ out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
+ )
+ ]
+ )
+ else:
+ self.downsamplers = None
+
+ self.gradient_checkpointing = False
+
+ def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None,cross_attention_kwargs=None):
+ output_states = ()
+
+ for resnet, attn in zip(self.resnets, self.attentions):
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module, return_dict=None):
+ def custom_forward(*inputs):
+ if return_dict is not None:
+ return module(*inputs, return_dict=return_dict)
+ else:
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(attn, return_dict=False),
+ hidden_states,
+ encoder_hidden_states,
+ )[0]
+ else:
+ hidden_states = resnet(hidden_states, temb)
+ hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
+
+ output_states += (hidden_states,)
+
+ if self.downsamplers is not None:
+ for downsampler in self.downsamplers:
+ hidden_states = downsampler(hidden_states)
+
+ output_states += (hidden_states,)
+
+ return hidden_states, output_states
+
+
+class DownBlock3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ output_scale_factor=1.0,
+ add_downsample=True,
+ downsample_padding=1,
+ ):
+ super().__init__()
+ resnets = []
+
+ for i in range(num_layers):
+ in_channels = in_channels if i == 0 else out_channels
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+
+ self.resnets = nn.ModuleList(resnets)
+
+ if add_downsample:
+ self.downsamplers = nn.ModuleList(
+ [
+ Downsample3D(
+ out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
+ )
+ ]
+ )
+ else:
+ self.downsamplers = None
+
+ self.gradient_checkpointing = False
+
+ def forward(self, hidden_states, temb=None):
+ output_states = ()
+
+ for resnet in self.resnets:
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
+ else:
+ hidden_states = resnet(hidden_states, temb)
+
+ output_states += (hidden_states,)
+
+ if self.downsamplers is not None:
+ for downsampler in self.downsamplers:
+ hidden_states = downsampler(hidden_states)
+
+ output_states += (hidden_states,)
+
+ return hidden_states, output_states
+
+
+class CrossAttnUpBlock3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ prev_output_channel: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ attn_num_head_channels=1,
+ cross_attention_dim=1280,
+ output_scale_factor=1.0,
+ add_upsample=True,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ only_cross_attention=False,
+ upcast_attention=False,
+ ):
+ super().__init__()
+ resnets = []
+ attentions = []
+
+ self.has_cross_attention = True
+ self.attn_num_head_channels = attn_num_head_channels
+
+ for i in range(num_layers):
+ res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
+
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=resnet_in_channels + res_skip_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+ if dual_cross_attention:
+ raise NotImplementedError
+ attentions.append(
+ Transformer3DModel(
+ attn_num_head_channels,
+ out_channels // attn_num_head_channels,
+ in_channels=out_channels,
+ num_layers=1,
+ cross_attention_dim=cross_attention_dim,
+ norm_num_groups=resnet_groups,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ )
+ )
+
+ self.attentions = nn.ModuleList(attentions)
+ self.resnets = nn.ModuleList(resnets)
+
+ if add_upsample:
+ self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
+ else:
+ self.upsamplers = None
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ res_hidden_states_tuple,
+ temb=None,
+ encoder_hidden_states=None,
+ upsample_size=None,
+ attention_mask=None,
+ cross_attention_kwargs=None
+ ):
+ for resnet, attn in zip(self.resnets, self.attentions):
+ # pop res hidden states
+ res_hidden_states = res_hidden_states_tuple[-1]
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
+
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module, return_dict=None):
+ def custom_forward(*inputs):
+ if return_dict is not None:
+ return module(*inputs, return_dict=return_dict)
+ else:
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(attn, return_dict=False),
+ hidden_states,
+ encoder_hidden_states,
+ )[0]
+ else:
+ hidden_states = resnet(hidden_states, temb)
+ hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
+
+ if self.upsamplers is not None:
+ for upsampler in self.upsamplers:
+ hidden_states = upsampler(hidden_states, upsample_size)
+
+ return hidden_states
+
+
+class UpBlock3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ prev_output_channel: int,
+ out_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ output_scale_factor=1.0,
+ add_upsample=True,
+ ):
+ super().__init__()
+ resnets = []
+
+ for i in range(num_layers):
+ res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
+
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=resnet_in_channels + res_skip_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+
+ self.resnets = nn.ModuleList(resnets)
+
+ if add_upsample:
+ self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
+ else:
+ self.upsamplers = None
+
+ self.gradient_checkpointing = False
+
+ def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
+ for resnet in self.resnets:
+ # pop res hidden states
+ res_hidden_states = res_hidden_states_tuple[-1]
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
+
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
+ else:
+ hidden_states = resnet(hidden_states, temb)
+
+ if self.upsamplers is not None:
+ for upsampler in self.upsamplers:
+ hidden_states = upsampler(hidden_states, upsample_size)
+
+ return hidden_states
diff --git a/ControlVideo-master/models/pipeline_controlvideo.py b/ControlVideo-master/models/pipeline_controlvideo.py
new file mode 100644
index 0000000000000000000000000000000000000000..4aa4a17024c6c4e9a3220e3fe82d5f7054ae0486
--- /dev/null
+++ b/ControlVideo-master/models/pipeline_controlvideo.py
@@ -0,0 +1,1351 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import inspect
+import os
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from dataclasses import dataclass
+
+import numpy as np
+import PIL.Image
+import torch
+from torch import nn
+from transformers import CLIPTextModel, CLIPTokenizer
+
+from diffusers.models import AutoencoderKL
+from .controlnet import ControlNetOutput
+from diffusers import ModelMixin
+from diffusers.schedulers import DDIMScheduler
+from diffusers.utils import (
+ PIL_INTERPOLATION,
+ is_accelerate_available,
+ is_accelerate_version,
+ logging,
+ randn_tensor,
+ BaseOutput
+)
+from diffusers.pipeline_utils import DiffusionPipeline
+
+from einops import rearrange
+
+from .unet import UNet3DConditionModel
+from .controlnet import ControlNetModel3D
+from .RIFE.IFNet_HDv3 import IFNet
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+@dataclass
+class ControlVideoPipelineOutput(BaseOutput):
+ videos: Union[torch.Tensor, np.ndarray]
+
+
+class MultiControlNetModel3D(ModelMixin):
+ r"""
+ Multiple `ControlNetModel` wrapper class for Multi-ControlNet
+
+ This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be
+ compatible with `ControlNetModel`.
+
+ Args:
+ controlnets (`List[ControlNetModel]`):
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
+ `ControlNetModel` as a list.
+ """
+
+ def __init__(self, controlnets: Union[List[ControlNetModel3D], Tuple[ControlNetModel3D]]):
+ super().__init__()
+ self.nets = nn.ModuleList(controlnets)
+
+ def forward(
+ self,
+ sample: torch.FloatTensor,
+ timestep: Union[torch.Tensor, float, int],
+ encoder_hidden_states: torch.Tensor,
+ controlnet_cond: List[List[torch.tensor]],
+ conditioning_scale: List[float],
+ class_labels: Optional[torch.Tensor] = None,
+ timestep_cond: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ return_dict: bool = True,
+ ) -> Union[ControlNetOutput, Tuple]:
+ for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
+ down_samples, mid_sample = controlnet(
+ sample,
+ timestep,
+ encoder_hidden_states,
+ torch.cat(image, dim=0),
+ scale,
+ class_labels,
+ timestep_cond,
+ attention_mask,
+ cross_attention_kwargs,
+ return_dict,
+ )
+
+ # merge samples
+ if i == 0:
+ down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
+ else:
+ down_block_res_samples = [
+ samples_prev + samples_curr
+ for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
+ ]
+ mid_block_res_sample += mid_sample
+
+ return down_block_res_samples, mid_block_res_sample
+
+
+class ControlVideoPipeline(DiffusionPipeline):
+ r"""
+ Pipeline for text-to-video generation using Stable Diffusion with ControlNet guidance.
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ Args:
+ vae ([`AutoencoderKL`]):
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
+ text_encoder ([`CLIPTextModel`]):
+ Frozen text-encoder. Stable Diffusion uses the text portion of
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
+ tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
+ Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
+ as a list, the outputs from each ControlNet are added together to create one combined additional
+ conditioning.
+ scheduler ([`SchedulerMixin`]):
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
+ safety_checker ([`StableDiffusionSafetyChecker`]):
+ Classification module that estimates whether generated images could be considered offensive or harmful.
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
+ feature_extractor ([`CLIPImageProcessor`]):
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
+ """
+ _optional_components = ["safety_checker", "feature_extractor"]
+
+ def __init__(
+ self,
+ vae: AutoencoderKL,
+ text_encoder: CLIPTextModel,
+ tokenizer: CLIPTokenizer,
+ unet: UNet3DConditionModel,
+ controlnet: Union[ControlNetModel3D, List[ControlNetModel3D], Tuple[ControlNetModel3D], MultiControlNetModel3D],
+ scheduler: DDIMScheduler,
+ interpolater: IFNet,
+ ):
+ super().__init__()
+
+ if isinstance(controlnet, (list, tuple)):
+ controlnet = MultiControlNetModel3D(controlnet)
+
+ self.register_modules(
+ vae=vae,
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ controlnet=controlnet,
+ scheduler=scheduler,
+ interpolater=interpolater,
+ )
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
+ def enable_vae_slicing(self):
+ r"""
+ Enable sliced VAE decoding.
+
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
+ steps. This is useful to save some memory and allow larger batch sizes.
+ """
+ self.vae.enable_slicing()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
+ def disable_vae_slicing(self):
+ r"""
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
+ computing decoding in one step.
+ """
+ self.vae.disable_slicing()
+
+ def enable_sequential_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
+ text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
+ `enable_model_cpu_offload`, but performance is lower.
+ """
+ if is_accelerate_available():
+ from accelerate import cpu_offload
+ else:
+ raise ImportError("Please install accelerate via `pip install accelerate`")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
+ cpu_offload(cpu_offloaded_model, device)
+
+ if self.safety_checker is not None:
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
+
+ def enable_model_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+ """
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
+ from accelerate import cpu_offload_with_hook
+ else:
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ hook = None
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
+
+ if self.safety_checker is not None:
+ # the safety checker can offload the vae again
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
+
+ # control net hook has be manually offloaded as it alternates with unet
+ cpu_offload_with_hook(self.controlnet, device)
+
+ # We'll offload the last model manually.
+ self.final_offload_hook = hook
+
+ @property
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
+ def _execution_device(self):
+ r"""
+ Returns the device on which the pipeline's models will be executed. After calling
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
+ hooks.
+ """
+ if not hasattr(self.unet, "_hf_hook"):
+ return self.device
+ for module in self.unet.modules():
+ if (
+ hasattr(module, "_hf_hook")
+ and hasattr(module._hf_hook, "execution_device")
+ and module._hf_hook.execution_device is not None
+ ):
+ return torch.device(module._hf_hook.execution_device)
+ return self.device
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
+ def _encode_prompt(
+ self,
+ prompt,
+ device,
+ num_videos_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ ):
+ r"""
+ Encodes the prompt into text encoder hidden states.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ prompt to be encoded
+ device: (`torch.device`):
+ torch device
+ num_videos_per_prompt (`int`):
+ number of images that should be generated per prompt
+ do_classifier_free_guidance (`bool`):
+ whether to use classifier free guidance or not
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ """
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ if prompt_embeds is None:
+ text_inputs = self.tokenizer(
+ prompt,
+ padding="max_length",
+ max_length=self.tokenizer.model_max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+ text_input_ids = text_inputs.input_ids
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
+
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
+ text_input_ids, untruncated_ids
+ ):
+ removed_text = self.tokenizer.batch_decode(
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
+ )
+ logger.warning(
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
+ )
+
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
+ attention_mask = text_inputs.attention_mask.to(device)
+ else:
+ attention_mask = None
+
+ prompt_embeds = self.text_encoder(
+ text_input_ids.to(device),
+ attention_mask=attention_mask,
+ )
+ prompt_embeds = prompt_embeds[0]
+
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
+
+ bs_embed, seq_len, _ = prompt_embeds.shape
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
+ prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1)
+
+ # get unconditional embeddings for classifier free guidance
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ uncond_tokens: List[str]
+ if negative_prompt is None:
+ uncond_tokens = [""] * batch_size
+ elif type(prompt) is not type(negative_prompt):
+ raise TypeError(
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
+ f" {type(prompt)}."
+ )
+ elif isinstance(negative_prompt, str):
+ uncond_tokens = [negative_prompt]
+ elif batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+ else:
+ uncond_tokens = negative_prompt
+
+ max_length = prompt_embeds.shape[1]
+ uncond_input = self.tokenizer(
+ uncond_tokens,
+ padding="max_length",
+ max_length=max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
+ attention_mask = uncond_input.attention_mask.to(device)
+ else:
+ attention_mask = None
+
+ negative_prompt_embeds = self.text_encoder(
+ uncond_input.input_ids.to(device),
+ attention_mask=attention_mask,
+ )
+ negative_prompt_embeds = negative_prompt_embeds[0]
+
+ if do_classifier_free_guidance:
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
+ seq_len = negative_prompt_embeds.shape[1]
+
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
+
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1)
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
+
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ return prompt_embeds
+
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
+ def decode_latents(self, latents, return_tensor=False):
+ video_length = latents.shape[2]
+ latents = 1 / 0.18215 * latents
+ latents = rearrange(latents, "b c f h w -> (b f) c h w")
+ video = self.vae.decode(latents).sample
+ video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
+ video = (video / 2 + 0.5).clamp(0, 1)
+ if return_tensor:
+ return video
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
+ video = video.cpu().float().numpy()
+ return video
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
+ def prepare_extra_step_kwargs(self, generator, eta):
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
+ # and should be between [0, 1]
+
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ extra_step_kwargs = {}
+ if accepts_eta:
+ extra_step_kwargs["eta"] = eta
+
+ # check if the scheduler accepts generator
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ if accepts_generator:
+ extra_step_kwargs["generator"] = generator
+ return extra_step_kwargs
+
+ def check_inputs(
+ self,
+ prompt,
+ # image,
+ height,
+ width,
+ callback_steps,
+ negative_prompt=None,
+ prompt_embeds=None,
+ negative_prompt_embeds=None,
+ controlnet_conditioning_scale=1.0,
+ ):
+ if height % 8 != 0 or width % 8 != 0:
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
+
+ if (callback_steps is None) or (
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
+ ):
+ raise ValueError(
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
+ f" {type(callback_steps)}."
+ )
+
+ if prompt is not None and prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
+ " only forward one of the two."
+ )
+ elif prompt is None and prompt_embeds is None:
+ raise ValueError(
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
+ )
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
+
+ if negative_prompt is not None and negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
+ raise ValueError(
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
+ f" {negative_prompt_embeds.shape}."
+ )
+
+ # Check `image`
+
+ # if isinstance(self.controlnet, ControlNetModel):
+ # self.check_image(image, prompt, prompt_embeds)
+ # elif isinstance(self.controlnet, MultiControlNetModel):
+ # if not isinstance(image, list):
+ # raise TypeError("For multiple controlnets: `image` must be type `list`")
+
+ # if len(image) != len(self.controlnet.nets):
+ # raise ValueError(
+ # "For multiple controlnets: `image` must have the same length as the number of controlnets."
+ # )
+
+ # for image_ in image:
+ # self.check_image(image_, prompt, prompt_embeds)
+ # else:
+ # assert False
+
+ # Check `controlnet_conditioning_scale`
+
+ if isinstance(self.controlnet, ControlNetModel3D):
+ if not isinstance(controlnet_conditioning_scale, float):
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
+ elif isinstance(self.controlnet, MultiControlNetModel3D):
+ if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
+ self.controlnet.nets
+ ):
+ raise ValueError(
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
+ " the same length as the number of controlnets"
+ )
+ else:
+ assert False
+
+ def check_image(self, image, prompt, prompt_embeds):
+ image_is_pil = isinstance(image, PIL.Image.Image)
+ image_is_tensor = isinstance(image, torch.Tensor)
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
+
+ if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:
+ raise TypeError(
+ "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
+ )
+
+ if image_is_pil:
+ image_batch_size = 1
+ elif image_is_tensor:
+ image_batch_size = image.shape[0]
+ elif image_is_pil_list:
+ image_batch_size = len(image)
+ elif image_is_tensor_list:
+ image_batch_size = len(image)
+
+ if prompt is not None and isinstance(prompt, str):
+ prompt_batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ prompt_batch_size = len(prompt)
+ elif prompt_embeds is not None:
+ prompt_batch_size = prompt_embeds.shape[0]
+
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
+ raise ValueError(
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
+ )
+
+ def prepare_image(
+ self, image, width, height, batch_size, num_videos_per_prompt, device, dtype, do_classifier_free_guidance
+ ):
+ if not isinstance(image, torch.Tensor):
+ if isinstance(image, PIL.Image.Image):
+ image = [image]
+
+ if isinstance(image[0], PIL.Image.Image):
+ images = []
+
+ for image_ in image:
+ image_ = image_.convert("RGB")
+ image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
+ image_ = np.array(image_)
+ image_ = image_[None, :]
+ images.append(image_)
+
+ image = images
+
+ image = np.concatenate(image, axis=0)
+ image = np.array(image).astype(np.float32) / 255.0
+ image = image.transpose(0, 3, 1, 2)
+ image = torch.from_numpy(image)
+ elif isinstance(image[0], torch.Tensor):
+ image = torch.cat(image, dim=0)
+
+ image_batch_size = image.shape[0]
+
+ if image_batch_size == 1:
+ repeat_by = batch_size
+ else:
+ # image batch size is the same as prompt batch size
+ repeat_by = num_videos_per_prompt
+
+ image = image.repeat_interleave(repeat_by, dim=0)
+
+ image = image.to(device=device, dtype=dtype)
+
+ if do_classifier_free_guidance:
+ image = torch.cat([image] * 2)
+
+ return image
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
+ def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, \
+ device, generator, latents=None, same_frame_noise=True):
+ if isinstance(generator, list) and len(generator) != batch_size:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
+ )
+
+ if latents is None:
+ if same_frame_noise:
+ shape = (batch_size, num_channels_latents, 1, height // self.vae_scale_factor, width // self.vae_scale_factor)
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ latents = latents.repeat(1, 1, video_length, 1, 1)
+ else:
+ shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor)
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ else:
+ shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor)
+ if latents.shape != shape:
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
+ latents = latents.to(device)
+
+ # scale the initial noise by the standard deviation required by the scheduler
+ latents = latents * self.scheduler.init_noise_sigma
+ return latents
+
+ def _default_height_width(self, height, width, image):
+ # NOTE: It is possible that a list of images have different
+ # dimensions for each image, so just checking the first image
+ # is not _exactly_ correct, but it is simple.
+ while isinstance(image, list):
+ image = image[0]
+
+ if height is None:
+ if isinstance(image, PIL.Image.Image):
+ height = image.height
+ elif isinstance(image, torch.Tensor):
+ height = image.shape[3]
+
+ height = (height // 8) * 8 # round down to nearest multiple of 8
+
+ if width is None:
+ if isinstance(image, PIL.Image.Image):
+ width = image.width
+ elif isinstance(image, torch.Tensor):
+ width = image.shape[2]
+
+ width = (width // 8) * 8 # round down to nearest multiple of 8
+
+ return height, width
+
+ # override DiffusionPipeline
+ def save_pretrained(
+ self,
+ save_directory: Union[str, os.PathLike],
+ safe_serialization: bool = False,
+ variant: Optional[str] = None,
+ ):
+ if isinstance(self.controlnet, ControlNetModel3D):
+ super().save_pretrained(save_directory, safe_serialization, variant)
+ else:
+ raise NotImplementedError("Currently, the `save_pretrained()` is not implemented for Multi-ControlNet.")
+
+ def get_alpha_prev(self, timestep):
+ prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
+ alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod
+ return alpha_prod_t_prev
+
+ def get_slide_window_indices(self, video_length, window_size):
+ assert window_size >=3
+ key_frame_indices = np.arange(0, video_length, window_size-1).tolist()
+
+ # Append last index
+ if key_frame_indices[-1] != (video_length-1):
+ key_frame_indices.append(video_length-1)
+
+ slices = np.split(np.arange(video_length), key_frame_indices)
+ inter_frame_list = []
+ for s in slices:
+ if len(s) < 2:
+ continue
+ inter_frame_list.append(s[1:].tolist())
+ return key_frame_indices, inter_frame_list
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ video_length: Optional[int] = 1,
+ frames: Union[List[torch.FloatTensor], List[PIL.Image.Image], List[List[torch.FloatTensor]], List[List[PIL.Image.Image]]] = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_videos_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "tensor",
+ return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
+ smooth_steps: List = [19, 20],
+ **kwargs,
+ ):
+ r"""
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
+ instead.
+ frames (`List[torch.FloatTensor]`, `List[PIL.Image.Image]`,
+ `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):
+ The ControlVideo input condition. ControlVideo uses this input condition to generate guidance to Unet. If
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
+ also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
+ height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
+ specified in init, images must be passed as a list such that each element of the list can be correctly
+ batched for input to a single controlnet.
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ The width in pixels of the generated image.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
+ [`schedulers.DDIMScheduler`], will be ignored for others.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. The function will be
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
+ called at every step.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
+ corresponding scale as a list.
+ smooth_steps (`List[int]`):
+ Perform smoother on predicted RGB frames at these timesteps.
+
+ Examples:
+
+ Returns:
+ [`ControlVideoPipelineOutput`] or `tuple`:
+ [`ControlVideoPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
+ (nsfw) content, according to the `safety_checker`.
+ """
+ # 0. Default height and width to unet
+ height, width = self._default_height_width(height, width, frames)
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ height,
+ width,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ controlnet_conditioning_scale,
+ )
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
+
+ if isinstance(self.controlnet, MultiControlNetModel3D) and isinstance(controlnet_conditioning_scale, float):
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)
+
+ # 3. Encode input prompt
+ prompt_embeds = self._encode_prompt(
+ prompt,
+ device,
+ num_videos_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ )
+
+ # 4. Prepare image
+ if isinstance(self.controlnet, ControlNetModel3D):
+ images = []
+ for i_img in frames:
+ i_img = self.prepare_image(
+ image=i_img,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_videos_per_prompt,
+ num_videos_per_prompt=num_videos_per_prompt,
+ device=device,
+ dtype=self.controlnet.dtype,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ )
+ images.append(i_img)
+ frames = torch.stack(images, dim=2) # b x c x f x h x w
+ elif isinstance(self.controlnet, MultiControlNetModel3D):
+ images = []
+ for i_img in frames:
+ i_images = []
+ for ii_img in i_img:
+ ii_img = self.prepare_image(
+ image=ii_img,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_videos_per_prompt,
+ num_videos_per_prompt=num_videos_per_prompt,
+ device=device,
+ dtype=self.controlnet.dtype,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ )
+
+ i_images.append(ii_img)
+ images.append(torch.stack(i_images, dim=2))
+ frames = images
+ else:
+ assert False
+
+ # 5. Prepare timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps = self.scheduler.timesteps
+
+ # 6. Prepare latent variables
+ num_channels_latents = self.unet.in_channels
+ latents = self.prepare_latents(
+ batch_size * num_videos_per_prompt,
+ num_channels_latents,
+ video_length,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ same_frame_noise=True,
+ )
+
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+
+ # Prepare video indices if performing smoothing
+ if len(smooth_steps) > 0:
+ video_indices = np.arange(video_length)
+ zero_indices = video_indices[0::2]
+ one_indices = video_indices[1::2]
+
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ torch.cuda.empty_cache()
+
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # controlnet(s) inference
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ controlnet_cond=frames,
+ conditioning_scale=controlnet_conditioning_scale,
+ return_dict=False,
+ )
+ # predict the noise residual
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ cross_attention_kwargs=cross_attention_kwargs,
+ down_block_additional_residuals=down_block_res_samples,
+ mid_block_additional_residual=mid_block_res_sample,
+ ).sample
+
+ # perform guidance
+ if do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ step_dict = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)
+ latents = step_dict.prev_sample
+ pred_original_sample = step_dict.pred_original_sample
+
+ # Smooth videos
+ if (num_inference_steps - i) in smooth_steps:
+ pred_video = self.decode_latents(pred_original_sample, return_tensor=True) # b c f h w
+ pred_video = rearrange(pred_video, "b c f h w -> b f c h w")
+ for b_i in range(len(pred_video)):
+ if i % 2 == 0:
+ for v_i in range(len(zero_indices)-1):
+ s_frame = pred_video[b_i][zero_indices[v_i]].unsqueeze(0)
+ e_frame = pred_video[b_i][zero_indices[v_i+1]].unsqueeze(0)
+ pred_video[b_i][one_indices[v_i]] = self.interpolater.inference(s_frame, e_frame)[0]
+ else:
+ if video_length % 2 == 1:
+ tmp_one_indices = [0] + one_indices.tolist() + [video_length-1]
+ else:
+ tmp_one_indices = [0] + one_indices.tolist()
+
+ for v_i in range(len(tmp_one_indices)-1):
+ s_frame = pred_video[b_i][tmp_one_indices[v_i]].unsqueeze(0)
+ e_frame = pred_video[b_i][tmp_one_indices[v_i+1]].unsqueeze(0)
+ pred_video[b_i][zero_indices[v_i]] = self.interpolater.inference(s_frame, e_frame)[0]
+ pred_video = rearrange(pred_video, "b f c h w -> (b f) c h w")
+ pred_video = 2.0 * pred_video - 1.0
+ # ori_pred_original_sample = pred_original_sample
+ pred_original_sample = self.vae.encode(pred_video).latent_dist.sample(generator)
+ pred_original_sample *= self.vae.config.scaling_factor
+ pred_original_sample = rearrange(pred_original_sample, "(b f) c h w -> b c f h w", f=video_length)
+
+ # predict xt-1 with smoothed x0
+ alpha_prod_t_prev =self.get_alpha_prev(t)
+ # preserve more details
+ # pred_original_sample = ori_pred_original_sample * alpha_prod_t_prev + (1 - alpha_prod_t_prev) * pred_original_sample
+ # compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
+ pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * noise_pred
+ # compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
+ latents = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
+
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
+ # If we do sequential model offloading, let's offload unet and controlnet
+ # manually for max memory savings
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.unet.to("cpu")
+ self.controlnet.to("cpu")
+ torch.cuda.empty_cache()
+ # Post-processing
+ video = self.decode_latents(latents)
+
+ # Convert to tensor
+ if output_type == "tensor":
+ video = torch.from_numpy(video)
+
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
+ if not return_dict:
+ return video
+
+ return ControlVideoPipelineOutput(videos=video)
+
+ @torch.no_grad()
+ def generate_long_video(
+ self,
+ prompt: Union[str, List[str]] = None,
+ video_length: Optional[int] = 1,
+ frames: Union[List[torch.FloatTensor], List[PIL.Image.Image], List[List[torch.FloatTensor]], List[List[PIL.Image.Image]]] = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_videos_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "tensor",
+ return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
+ smooth_steps: List = [19, 20],
+ window_size: int = 8,
+ **kwargs,
+ ):
+ r"""
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
+ instead.
+ frames (`List[torch.FloatTensor]`, `List[PIL.Image.Image]`,
+ `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):
+ The ControlVideo input condition. ControlVideo uses this input condition to generate guidance to Unet. If
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
+ also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
+ height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
+ specified in init, images must be passed as a list such that each element of the list can be correctly
+ batched for input to a single controlnet.
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ The width in pixels of the generated image.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
+ [`schedulers.DDIMScheduler`], will be ignored for others.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. The function will be
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
+ called at every step.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
+ corresponding scale as a list.
+ smooth_steps (`List[int]`):
+ Perform smoother on predicted RGB frames at these timesteps.
+ window_size ('int'):
+ The length of each short clip.
+ Examples:
+
+ Returns:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
+ (nsfw) content, according to the `safety_checker`.
+ """
+ # 0. Default height and width to unet
+ height, width = self._default_height_width(height, width, frames)
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ height,
+ width,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ controlnet_conditioning_scale,
+ )
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
+
+ if isinstance(self.controlnet, MultiControlNetModel3D) and isinstance(controlnet_conditioning_scale, float):
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)
+
+ # 3. Encode input prompt
+ prompt_embeds = self._encode_prompt(
+ prompt,
+ device,
+ num_videos_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ )
+
+ # 4. Prepare image
+ if isinstance(self.controlnet, ControlNetModel3D):
+ images = []
+ for i_img in frames:
+ i_img = self.prepare_image(
+ image=i_img,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_videos_per_prompt,
+ num_videos_per_prompt=num_videos_per_prompt,
+ device=device,
+ dtype=self.controlnet.dtype,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ )
+ images.append(i_img)
+ frames = torch.stack(images, dim=2) # b x c x f x h x w
+ elif isinstance(self.controlnet, MultiControlNetModel3D):
+ images = []
+ for i_img in frames:
+ i_images = []
+ for ii_img in i_img:
+ ii_img = self.prepare_image(
+ image=ii_img,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_videos_per_prompt,
+ num_videos_per_prompt=num_videos_per_prompt,
+ device=device,
+ dtype=self.controlnet.dtype,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ )
+
+ i_images.append(ii_img)
+ images.append(torch.stack(i_images, dim=2))
+ frames = images
+ else:
+ assert False
+
+ # 5. Prepare timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps = self.scheduler.timesteps
+
+ # 6. Prepare latent variables
+ num_channels_latents = self.unet.in_channels
+ latents = self.prepare_latents(
+ batch_size * num_videos_per_prompt,
+ num_channels_latents,
+ video_length,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ same_frame_noise=True,
+ )
+
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # Prepare indices of key frames and interval frames
+ key_frame_indices, inter_frame_list = self.get_slide_window_indices(video_length, window_size)
+
+ # Prepare video indices if performing smoothing
+ if len(smooth_steps) > 0:
+ video_indices = np.arange(video_length)
+ zero_indices = video_indices[0::2]
+ one_indices = video_indices[1::2]
+
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ torch.cuda.empty_cache()
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+ noise_pred = torch.zeros_like(latents)
+ pred_original_sample = torch.zeros_like(latents)
+
+ # 8.1 Key frames
+ # controlnet(s) inference
+ key_down_block_res_samples, key_mid_block_res_sample = self.controlnet(
+ latent_model_input[:, :, key_frame_indices],
+ t,
+ encoder_hidden_states=prompt_embeds,
+ controlnet_cond=frames[:, :, key_frame_indices],
+ conditioning_scale=controlnet_conditioning_scale,
+ return_dict=False,
+ )
+ # predict the noise residual
+ key_noise_pred = self.unet(
+ latent_model_input[:, :, key_frame_indices],
+ t,
+ encoder_hidden_states=prompt_embeds,
+ cross_attention_kwargs=cross_attention_kwargs,
+ down_block_additional_residuals=key_down_block_res_samples,
+ mid_block_additional_residual=key_mid_block_res_sample,
+ inter_frame=False,
+ ).sample
+
+ # perform guidance
+ if do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = key_noise_pred.chunk(2)
+ noise_pred[:, :, key_frame_indices] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ key_step_dict = self.scheduler.step(noise_pred[:, :, key_frame_indices], t, latents[:, :, key_frame_indices], **extra_step_kwargs)
+ latents[:, :, key_frame_indices] = key_step_dict.prev_sample
+ pred_original_sample[:, :, key_frame_indices] = key_step_dict.pred_original_sample
+
+ # 8.2 compute interval frames
+ for f_i, frame_ids in enumerate(inter_frame_list):
+ input_frame_ids = key_frame_indices[f_i:f_i+2] + frame_ids
+ # controlnet(s) inference
+ inter_down_block_res_samples, inter_mid_block_res_sample = self.controlnet(
+ latent_model_input[:, :, input_frame_ids],
+ t,
+ encoder_hidden_states=prompt_embeds,
+ controlnet_cond=frames[:, :, input_frame_ids],
+ conditioning_scale=controlnet_conditioning_scale,
+ return_dict=False,
+ )
+ # predict the noise residual
+ inter_noise_pred = self.unet(
+ latent_model_input[:, :, input_frame_ids],
+ t,
+ encoder_hidden_states=prompt_embeds,
+ cross_attention_kwargs=cross_attention_kwargs,
+ down_block_additional_residuals=inter_down_block_res_samples,
+ mid_block_additional_residual=inter_mid_block_res_sample,
+ inter_frame=True,
+ ).sample
+
+ # perform guidance
+ if do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = inter_noise_pred[:, :, 2:].chunk(2)
+ noise_pred[:, :, frame_ids] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ step_dict = self.scheduler.step(noise_pred[:, :, frame_ids], t, latents[:, :, frame_ids], **extra_step_kwargs)
+ latents[:, :, frame_ids] = step_dict.prev_sample
+ pred_original_sample[:, :, frame_ids] = step_dict.pred_original_sample
+
+ # Smooth videos
+ if (num_inference_steps - i) in smooth_steps:
+ pred_video = self.decode_latents(pred_original_sample, return_tensor=True) # b c f h w
+ pred_video = rearrange(pred_video, "b c f h w -> b f c h w")
+ for b_i in range(len(pred_video)):
+ if i % 2 == 0:
+ for v_i in range(len(zero_indices)-1):
+ s_frame = pred_video[b_i][zero_indices[v_i]].unsqueeze(0)
+ e_frame = pred_video[b_i][zero_indices[v_i+1]].unsqueeze(0)
+ pred_video[b_i][one_indices[v_i]] = self.interpolater.inference(s_frame, e_frame)[0]
+ else:
+ if video_length % 2 == 1:
+ tmp_one_indices = [0] + one_indices.tolist() + [video_length-1]
+ else:
+ tmp_one_indices = [0] + one_indices.tolist()
+ for v_i in range(len(tmp_one_indices)-1):
+ s_frame = pred_video[b_i][tmp_one_indices[v_i]].unsqueeze(0)
+ e_frame = pred_video[b_i][tmp_one_indices[v_i+1]].unsqueeze(0)
+ pred_video[b_i][zero_indices[v_i]] = self.interpolater.inference(s_frame, e_frame)[0]
+ pred_video = rearrange(pred_video, "b f c h w -> (b f) c h w")
+ pred_video = 2.0 * pred_video - 1.0
+ for v_i in range(len(pred_video)):
+ pred_original_sample[:, :, v_i] = self.vae.encode(pred_video[v_i:v_i+1]).latent_dist.sample(generator)
+ pred_original_sample[:, :, v_i] *= self.vae.config.scaling_factor
+
+
+ # predict xt-1 with smoothed x0
+ alpha_prod_t_prev =self.get_alpha_prev(t)
+ # preserve more details
+ pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * noise_pred
+ # compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
+ latents = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
+
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ callback(i, t, latents)
+
+ # If we do sequential model offloading, let's offload unet and controlnet
+ # manually for max memory savings
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.unet.to("cpu")
+ self.controlnet.to("cpu")
+ torch.cuda.empty_cache()
+ # Post-processing
+ video = self.decode_latents(latents)
+
+ # Convert to tensor
+ if output_type == "tensor":
+ video = torch.from_numpy(video)
+
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.final_offload_hook.offload()
+
+ if not return_dict:
+ return video
+
+ return ControlVideoPipelineOutput(videos=video)
diff --git a/ControlVideo-master/models/resnet.py b/ControlVideo-master/models/resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b30f620639f068144fb33c65113d68605135baf
--- /dev/null
+++ b/ControlVideo-master/models/resnet.py
@@ -0,0 +1,217 @@
+# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from einops import rearrange
+
+
+class InflatedConv3d(nn.Conv2d):
+ def forward(self, x):
+ video_length = x.shape[2]
+
+ x = rearrange(x, "b c f h w -> (b f) c h w")
+ x = super().forward(x)
+ x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
+
+ return x
+
+class TemporalConv1d(nn.Conv1d):
+ def forward(self, x):
+ b, c, f, h, w = x.shape
+ y = rearrange(x.clone(), "b c f h w -> (b h w) c f")
+ y = super().forward(y)
+ y = rearrange(y, "(b h w) c f -> b c f h w", b=b, h=h, w=w)
+ return y
+
+
+class Upsample3D(nn.Module):
+ def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
+ super().__init__()
+ self.channels = channels
+ self.out_channels = out_channels or channels
+ self.use_conv = use_conv
+ self.use_conv_transpose = use_conv_transpose
+ self.name = name
+
+ conv = None
+ if use_conv_transpose:
+ raise NotImplementedError
+ elif use_conv:
+ conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)
+
+ if name == "conv":
+ self.conv = conv
+ else:
+ self.Conv2d_0 = conv
+
+ def forward(self, hidden_states, output_size=None):
+ assert hidden_states.shape[1] == self.channels
+
+ if self.use_conv_transpose:
+ raise NotImplementedError
+
+ # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
+ dtype = hidden_states.dtype
+ if dtype == torch.bfloat16:
+ hidden_states = hidden_states.to(torch.float32)
+
+ # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
+ if hidden_states.shape[0] >= 64:
+ hidden_states = hidden_states.contiguous()
+
+ # if `output_size` is passed we force the interpolation output
+ # size and do not make use of `scale_factor=2`
+ if output_size is None:
+ hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest")
+ else:
+ hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
+
+ # If the input is bfloat16, we cast back to bfloat16
+ if dtype == torch.bfloat16:
+ hidden_states = hidden_states.to(dtype)
+
+ if self.use_conv:
+ if self.name == "conv":
+ hidden_states = self.conv(hidden_states)
+ else:
+ hidden_states = self.Conv2d_0(hidden_states)
+
+ return hidden_states
+
+
+class Downsample3D(nn.Module):
+ def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
+ super().__init__()
+ self.channels = channels
+ self.out_channels = out_channels or channels
+ self.use_conv = use_conv
+ self.padding = padding
+ stride = 2
+ self.name = name
+
+ if use_conv:
+ conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
+ else:
+ raise NotImplementedError
+
+ if name == "conv":
+ self.Conv2d_0 = conv
+ self.conv = conv
+ elif name == "Conv2d_0":
+ self.conv = conv
+ else:
+ self.conv = conv
+
+ def forward(self, hidden_states):
+ assert hidden_states.shape[1] == self.channels
+ if self.use_conv and self.padding == 0:
+ raise NotImplementedError
+
+ assert hidden_states.shape[1] == self.channels
+ hidden_states = self.conv(hidden_states)
+
+ return hidden_states
+
+
+class ResnetBlock3D(nn.Module):
+ def __init__(
+ self,
+ *,
+ in_channels,
+ out_channels=None,
+ conv_shortcut=False,
+ dropout=0.0,
+ temb_channels=512,
+ groups=32,
+ groups_out=None,
+ pre_norm=True,
+ eps=1e-6,
+ non_linearity="swish",
+ time_embedding_norm="default",
+ output_scale_factor=1.0,
+ use_in_shortcut=None,
+ ):
+ super().__init__()
+ self.pre_norm = pre_norm
+ self.pre_norm = True
+ self.in_channels = in_channels
+ out_channels = in_channels if out_channels is None else out_channels
+ self.out_channels = out_channels
+ self.use_conv_shortcut = conv_shortcut
+ self.time_embedding_norm = time_embedding_norm
+ self.output_scale_factor = output_scale_factor
+
+ if groups_out is None:
+ groups_out = groups
+
+ self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
+
+ self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
+
+ if temb_channels is not None:
+ if self.time_embedding_norm == "default":
+ time_emb_proj_out_channels = out_channels
+ elif self.time_embedding_norm == "scale_shift":
+ time_emb_proj_out_channels = out_channels * 2
+ else:
+ raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
+
+ self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)
+ else:
+ self.time_emb_proj = None
+
+ self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
+ self.dropout = torch.nn.Dropout(dropout)
+ self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
+
+ if non_linearity == "swish":
+ self.nonlinearity = lambda x: F.silu(x)
+ elif non_linearity == "mish":
+ self.nonlinearity = Mish()
+ elif non_linearity == "silu":
+ self.nonlinearity = nn.SiLU()
+
+ self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
+
+ self.conv_shortcut = None
+ if self.use_in_shortcut:
+ self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
+
+ def forward(self, input_tensor, temb):
+ hidden_states = input_tensor
+
+ hidden_states = self.norm1(hidden_states)
+ hidden_states = self.nonlinearity(hidden_states)
+
+ hidden_states = self.conv1(hidden_states)
+
+ if temb is not None:
+ temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
+
+ if temb is not None and self.time_embedding_norm == "default":
+ hidden_states = hidden_states + temb
+
+ hidden_states = self.norm2(hidden_states)
+
+ if temb is not None and self.time_embedding_norm == "scale_shift":
+ scale, shift = torch.chunk(temb, 2, dim=1)
+ hidden_states = hidden_states * (1 + scale) + shift
+
+ hidden_states = self.nonlinearity(hidden_states)
+
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.conv2(hidden_states)
+
+ if self.conv_shortcut is not None:
+ input_tensor = self.conv_shortcut(input_tensor)
+
+ output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
+
+ return output_tensor
+
+
+class Mish(torch.nn.Module):
+ def forward(self, hidden_states):
+ return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states))
\ No newline at end of file
diff --git a/ControlVideo-master/models/unet.py b/ControlVideo-master/models/unet.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7684760f2ebb57d9a6a19819e0d3a4b409b83a2
--- /dev/null
+++ b/ControlVideo-master/models/unet.py
@@ -0,0 +1,472 @@
+# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
+
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import os
+import json
+
+import torch
+import torch.nn as nn
+import torch.utils.checkpoint
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers import ModelMixin
+from diffusers.utils import BaseOutput, logging
+from diffusers.models.embeddings import TimestepEmbedding, Timesteps
+from .unet_blocks import (
+ CrossAttnDownBlock3D,
+ CrossAttnUpBlock3D,
+ DownBlock3D,
+ UNetMidBlock3DCrossAttn,
+ UpBlock3D,
+ get_down_block,
+ get_up_block,
+)
+from .resnet import InflatedConv3d
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+@dataclass
+class UNet3DConditionOutput(BaseOutput):
+ sample: torch.FloatTensor
+
+
+class UNet3DConditionModel(ModelMixin, ConfigMixin):
+ _supports_gradient_checkpointing = True
+
+ @register_to_config
+ def __init__(
+ self,
+ sample_size: Optional[int] = None,
+ in_channels: int = 4,
+ out_channels: int = 4,
+ center_input_sample: bool = False,
+ flip_sin_to_cos: bool = True,
+ freq_shift: int = 0,
+ down_block_types: Tuple[str] = (
+ "CrossAttnDownBlock3D",
+ "CrossAttnDownBlock3D",
+ "CrossAttnDownBlock3D",
+ "DownBlock3D",
+ ),
+ mid_block_type: str = "UNetMidBlock3DCrossAttn",
+ up_block_types: Tuple[str] = (
+ "UpBlock3D",
+ "CrossAttnUpBlock3D",
+ "CrossAttnUpBlock3D",
+ "CrossAttnUpBlock3D"
+ ),
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
+ layers_per_block: int = 2,
+ downsample_padding: int = 1,
+ mid_block_scale_factor: float = 1,
+ act_fn: str = "silu",
+ norm_num_groups: int = 32,
+ norm_eps: float = 1e-5,
+ cross_attention_dim: int = 1280,
+ attention_head_dim: Union[int, Tuple[int]] = 8,
+ dual_cross_attention: bool = False,
+ use_linear_projection: bool = False,
+ class_embed_type: Optional[str] = None,
+ num_class_embeds: Optional[int] = None,
+ upcast_attention: bool = False,
+ resnet_time_scale_shift: str = "default",
+ ):
+ super().__init__()
+
+ self.sample_size = sample_size
+ time_embed_dim = block_out_channels[0] * 4
+
+ # input
+ self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
+
+ # time
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
+ timestep_input_dim = block_out_channels[0]
+
+ self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
+
+ # class embedding
+ if class_embed_type is None and num_class_embeds is not None:
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
+ elif class_embed_type == "timestep":
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
+ elif class_embed_type == "identity":
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
+ else:
+ self.class_embedding = None
+
+ self.down_blocks = nn.ModuleList([])
+ self.mid_block = None
+ self.up_blocks = nn.ModuleList([])
+
+ if isinstance(only_cross_attention, bool):
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
+
+ if isinstance(attention_head_dim, int):
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
+
+ # down
+ output_channel = block_out_channels[0]
+ for i, down_block_type in enumerate(down_block_types):
+ input_channel = output_channel
+ output_channel = block_out_channels[i]
+ is_final_block = i == len(block_out_channels) - 1
+
+ down_block = get_down_block(
+ down_block_type,
+ num_layers=layers_per_block,
+ in_channels=input_channel,
+ out_channels=output_channel,
+ temb_channels=time_embed_dim,
+ add_downsample=not is_final_block,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=attention_head_dim[i],
+ downsample_padding=downsample_padding,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention[i],
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ self.down_blocks.append(down_block)
+
+ # mid
+ if mid_block_type == "UNetMidBlock3DCrossAttn":
+ self.mid_block = UNetMidBlock3DCrossAttn(
+ in_channels=block_out_channels[-1],
+ temb_channels=time_embed_dim,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ output_scale_factor=mid_block_scale_factor,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=attention_head_dim[-1],
+ resnet_groups=norm_num_groups,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ upcast_attention=upcast_attention,
+ )
+ else:
+ raise ValueError(f"unknown mid_block_type : {mid_block_type}")
+
+ # count how many layers upsample the videos
+ self.num_upsamplers = 0
+
+ # up
+ reversed_block_out_channels = list(reversed(block_out_channels))
+ reversed_attention_head_dim = list(reversed(attention_head_dim))
+ only_cross_attention = list(reversed(only_cross_attention))
+ output_channel = reversed_block_out_channels[0]
+ for i, up_block_type in enumerate(up_block_types):
+ is_final_block = i == len(block_out_channels) - 1
+
+ prev_output_channel = output_channel
+ output_channel = reversed_block_out_channels[i]
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
+
+ # add upsample block for all BUT final layer
+ if not is_final_block:
+ add_upsample = True
+ self.num_upsamplers += 1
+ else:
+ add_upsample = False
+
+ up_block = get_up_block(
+ up_block_type,
+ num_layers=layers_per_block + 1,
+ in_channels=input_channel,
+ out_channels=output_channel,
+ prev_output_channel=prev_output_channel,
+ temb_channels=time_embed_dim,
+ add_upsample=add_upsample,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=reversed_attention_head_dim[i],
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention[i],
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ self.up_blocks.append(up_block)
+ prev_output_channel = output_channel
+
+ # out
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
+ self.conv_act = nn.SiLU()
+ self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
+
+ def set_attention_slice(self, slice_size):
+ r"""
+ Enable sliced attention computation.
+
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
+
+ Args:
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
+ `"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
+ must be a multiple of `slice_size`.
+ """
+ sliceable_head_dims = []
+
+ def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
+ if hasattr(module, "set_attention_slice"):
+ sliceable_head_dims.append(module.sliceable_head_dim)
+
+ for child in module.children():
+ fn_recursive_retrieve_slicable_dims(child)
+
+ # retrieve number of attention layers
+ for module in self.children():
+ fn_recursive_retrieve_slicable_dims(module)
+
+ num_slicable_layers = len(sliceable_head_dims)
+
+ if slice_size == "auto":
+ # half the attention head size is usually a good trade-off between
+ # speed and memory
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
+ elif slice_size == "max":
+ # make smallest slice possible
+ slice_size = num_slicable_layers * [1]
+
+ slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
+
+ if len(slice_size) != len(sliceable_head_dims):
+ raise ValueError(
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
+ )
+
+ for i in range(len(slice_size)):
+ size = slice_size[i]
+ dim = sliceable_head_dims[i]
+ if size is not None and size > dim:
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
+
+ # Recursively walk through all the children.
+ # Any children which exposes the set_attention_slice method
+ # gets the message
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
+ if hasattr(module, "set_attention_slice"):
+ module.set_attention_slice(slice_size.pop())
+
+ for child in module.children():
+ fn_recursive_set_attention_slice(child, slice_size)
+
+ reversed_slice_size = list(reversed(slice_size))
+ for module in self.children():
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):
+ module.gradient_checkpointing = value
+
+ def forward(
+ self,
+ sample: torch.FloatTensor,
+ timestep: Union[torch.Tensor, float, int],
+ encoder_hidden_states: torch.Tensor,
+ class_labels: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ return_dict: bool = True,
+ cross_attention_kwargs = None,
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
+ inter_frame = False,
+ ) -> Union[UNet3DConditionOutput, Tuple]:
+ r"""
+ Args:
+ sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
+ timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
+ encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
+
+ Returns:
+ [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
+ [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
+ returning a tuple, the first element is the sample tensor.
+ """
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
+ # on the fly if necessary.
+ default_overall_up_factor = 2**self.num_upsamplers
+
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
+ forward_upsample_size = False
+ upsample_size = None
+
+ if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
+ logger.info("Forward upsample size to force interpolation output size.")
+ forward_upsample_size = True
+
+ # prepare attention_mask
+ if attention_mask is not None:
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
+ attention_mask = attention_mask.unsqueeze(1)
+
+ # center input if necessary
+ if self.config.center_input_sample:
+ sample = 2 * sample - 1.0
+
+ # time
+ timesteps = timestep
+ if not torch.is_tensor(timesteps):
+ # This would be a good case for the `match` statement (Python 3.10+)
+ is_mps = sample.device.type == "mps"
+ if isinstance(timestep, float):
+ dtype = torch.float32 if is_mps else torch.float64
+ else:
+ dtype = torch.int32 if is_mps else torch.int64
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
+ elif len(timesteps.shape) == 0:
+ timesteps = timesteps[None].to(sample.device)
+
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
+ timesteps = timesteps.expand(sample.shape[0])
+
+ t_emb = self.time_proj(timesteps)
+
+ # timesteps does not contain any weights and will always return f32 tensors
+ # but time_embedding might actually be running in fp16. so we need to cast here.
+ # there might be better ways to encapsulate this.
+ t_emb = t_emb.to(dtype=self.dtype)
+ emb = self.time_embedding(t_emb)
+
+ if self.class_embedding is not None:
+ if class_labels is None:
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
+
+ if self.config.class_embed_type == "timestep":
+ class_labels = self.time_proj(class_labels)
+
+ class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
+ emb = emb + class_emb
+
+ # pre-process
+ sample = self.conv_in(sample)
+
+ # down
+ down_block_res_samples = (sample,)
+ for downsample_block in self.down_blocks:
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
+ sample, res_samples = downsample_block(
+ hidden_states=sample,
+ temb=emb,
+ encoder_hidden_states=encoder_hidden_states,
+ attention_mask=attention_mask,
+ inter_frame=inter_frame
+ )
+ else:
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
+
+ down_block_res_samples += res_samples
+
+ if down_block_additional_residuals is not None:
+ new_down_block_res_samples = ()
+
+ for down_block_res_sample, down_block_additional_residual in zip(
+ down_block_res_samples, down_block_additional_residuals
+ ):
+ down_block_res_sample += down_block_additional_residual
+ new_down_block_res_samples += (down_block_res_sample,)
+
+ down_block_res_samples = new_down_block_res_samples
+
+ # mid
+ sample = self.mid_block(
+ sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask,
+ inter_frame=inter_frame
+
+ )
+
+ if mid_block_additional_residual is not None:
+ sample += mid_block_additional_residual
+
+ # up
+ for i, upsample_block in enumerate(self.up_blocks):
+ is_final_block = i == len(self.up_blocks) - 1
+
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
+
+ # if we have not reached the final block and need to forward the
+ # upsample size, we do it here
+ if not is_final_block and forward_upsample_size:
+ upsample_size = down_block_res_samples[-1].shape[2:]
+
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
+ sample = upsample_block(
+ hidden_states=sample,
+ temb=emb,
+ res_hidden_states_tuple=res_samples,
+ encoder_hidden_states=encoder_hidden_states,
+ upsample_size=upsample_size,
+ attention_mask=attention_mask,
+ inter_frame=inter_frame
+ )
+ else:
+ sample = upsample_block(
+ hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
+ )
+ # post-process
+ sample = self.conv_norm_out(sample)
+ sample = self.conv_act(sample)
+ sample = self.conv_out(sample)
+
+ if not return_dict:
+ return (sample,)
+
+ return UNet3DConditionOutput(sample=sample)
+
+ @classmethod
+ def from_pretrained_2d(cls, pretrained_model_path, subfolder=None):
+ if subfolder is not None:
+ pretrained_model_path = os.path.join(pretrained_model_path, subfolder)
+
+ config_file = os.path.join(pretrained_model_path, 'config.json')
+ if not os.path.isfile(config_file):
+ raise RuntimeError(f"{config_file} does not exist")
+ with open(config_file, "r") as f:
+ config = json.load(f)
+ config["_class_name"] = cls.__name__
+ config["down_block_types"] = [
+ "CrossAttnDownBlock3D",
+ "CrossAttnDownBlock3D",
+ "CrossAttnDownBlock3D",
+ "DownBlock3D"
+ ]
+ config["up_block_types"] = [
+ "UpBlock3D",
+ "CrossAttnUpBlock3D",
+ "CrossAttnUpBlock3D",
+ "CrossAttnUpBlock3D"
+ ]
+
+ from diffusers.utils import WEIGHTS_NAME
+ model = cls.from_config(config)
+ model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
+ if not os.path.isfile(model_file):
+ raise RuntimeError(f"{model_file} does not exist")
+ state_dict = torch.load(model_file, map_location="cpu")
+ # for k, v in model.state_dict().items():
+ # if '_temp.' in k:
+ # state_dict.update({k: v})
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
\ No newline at end of file
diff --git a/ControlVideo-master/models/unet_blocks.py b/ControlVideo-master/models/unet_blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed5015ba1fdb949dd2bce3f6df5fa01828aff48e
--- /dev/null
+++ b/ControlVideo-master/models/unet_blocks.py
@@ -0,0 +1,588 @@
+# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py
+
+import torch
+from torch import nn
+
+from .attention import Transformer3DModel
+from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
+
+
+def get_down_block(
+ down_block_type,
+ num_layers,
+ in_channels,
+ out_channels,
+ temb_channels,
+ add_downsample,
+ resnet_eps,
+ resnet_act_fn,
+ attn_num_head_channels,
+ resnet_groups=None,
+ cross_attention_dim=None,
+ downsample_padding=None,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ only_cross_attention=False,
+ upcast_attention=False,
+ resnet_time_scale_shift="default",
+):
+ down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
+ if down_block_type == "DownBlock3D":
+ return DownBlock3D(
+ num_layers=num_layers,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ add_downsample=add_downsample,
+ resnet_eps=resnet_eps,
+ resnet_act_fn=resnet_act_fn,
+ resnet_groups=resnet_groups,
+ downsample_padding=downsample_padding,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ elif down_block_type == "CrossAttnDownBlock3D":
+ if cross_attention_dim is None:
+ raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
+ return CrossAttnDownBlock3D(
+ num_layers=num_layers,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ add_downsample=add_downsample,
+ resnet_eps=resnet_eps,
+ resnet_act_fn=resnet_act_fn,
+ resnet_groups=resnet_groups,
+ downsample_padding=downsample_padding,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=attn_num_head_channels,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ raise ValueError(f"{down_block_type} does not exist.")
+
+
+def get_up_block(
+ up_block_type,
+ num_layers,
+ in_channels,
+ out_channels,
+ prev_output_channel,
+ temb_channels,
+ add_upsample,
+ resnet_eps,
+ resnet_act_fn,
+ attn_num_head_channels,
+ resnet_groups=None,
+ cross_attention_dim=None,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ only_cross_attention=False,
+ upcast_attention=False,
+ resnet_time_scale_shift="default",
+):
+ up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
+ if up_block_type == "UpBlock3D":
+ return UpBlock3D(
+ num_layers=num_layers,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ prev_output_channel=prev_output_channel,
+ temb_channels=temb_channels,
+ add_upsample=add_upsample,
+ resnet_eps=resnet_eps,
+ resnet_act_fn=resnet_act_fn,
+ resnet_groups=resnet_groups,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ elif up_block_type == "CrossAttnUpBlock3D":
+ if cross_attention_dim is None:
+ raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
+ return CrossAttnUpBlock3D(
+ num_layers=num_layers,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ prev_output_channel=prev_output_channel,
+ temb_channels=temb_channels,
+ add_upsample=add_upsample,
+ resnet_eps=resnet_eps,
+ resnet_act_fn=resnet_act_fn,
+ resnet_groups=resnet_groups,
+ cross_attention_dim=cross_attention_dim,
+ attn_num_head_channels=attn_num_head_channels,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ )
+ raise ValueError(f"{up_block_type} does not exist.")
+
+
+class UNetMidBlock3DCrossAttn(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ attn_num_head_channels=1,
+ output_scale_factor=1.0,
+ cross_attention_dim=1280,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ upcast_attention=False,
+ ):
+ super().__init__()
+
+ self.has_cross_attention = True
+ self.attn_num_head_channels = attn_num_head_channels
+ resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
+
+ # there is always at least one resnet
+ resnets = [
+ ResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ ]
+ attentions = []
+
+ for _ in range(num_layers):
+ if dual_cross_attention:
+ raise NotImplementedError
+ attentions.append(
+ Transformer3DModel(
+ attn_num_head_channels,
+ in_channels // attn_num_head_channels,
+ in_channels=in_channels,
+ num_layers=1,
+ cross_attention_dim=cross_attention_dim,
+ norm_num_groups=resnet_groups,
+ use_linear_projection=use_linear_projection,
+ upcast_attention=upcast_attention,
+ )
+ )
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+
+ self.attentions = nn.ModuleList(attentions)
+ self.resnets = nn.ModuleList(resnets)
+
+ def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, inter_frame=False):
+ hidden_states = self.resnets[0](hidden_states, temb)
+ for attn, resnet in zip(self.attentions, self.resnets[1:]):
+ hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, inter_frame=inter_frame).sample
+ hidden_states = resnet(hidden_states, temb)
+
+ return hidden_states
+
+
+class CrossAttnDownBlock3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ attn_num_head_channels=1,
+ cross_attention_dim=1280,
+ output_scale_factor=1.0,
+ downsample_padding=1,
+ add_downsample=True,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ only_cross_attention=False,
+ upcast_attention=False,
+ ):
+ super().__init__()
+ resnets = []
+ attentions = []
+
+ self.has_cross_attention = True
+ self.attn_num_head_channels = attn_num_head_channels
+
+ for i in range(num_layers):
+ in_channels = in_channels if i == 0 else out_channels
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+ if dual_cross_attention:
+ raise NotImplementedError
+ attentions.append(
+ Transformer3DModel(
+ attn_num_head_channels,
+ out_channels // attn_num_head_channels,
+ in_channels=out_channels,
+ num_layers=1,
+ cross_attention_dim=cross_attention_dim,
+ norm_num_groups=resnet_groups,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ )
+ )
+ self.attentions = nn.ModuleList(attentions)
+ self.resnets = nn.ModuleList(resnets)
+
+ if add_downsample:
+ self.downsamplers = nn.ModuleList(
+ [
+ Downsample3D(
+ out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
+ )
+ ]
+ )
+ else:
+ self.downsamplers = None
+
+ self.gradient_checkpointing = False
+
+ def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, inter_frame=False):
+ output_states = ()
+
+ for resnet, attn in zip(self.resnets, self.attentions):
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module, return_dict=None, inter_frame=None):
+ def custom_forward(*inputs):
+ if return_dict is not None:
+ return module(*inputs, return_dict=return_dict, inter_frame=inter_frame)
+ else:
+ return module(*inputs)
+
+ return custom_forward
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(attn, return_dict=False, inter_frame=inter_frame),
+ hidden_states,
+ encoder_hidden_states,
+ )[0]
+ else:
+ hidden_states = resnet(hidden_states, temb)
+ hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, inter_frame=inter_frame).sample
+
+ output_states += (hidden_states,)
+
+ if self.downsamplers is not None:
+ for downsampler in self.downsamplers:
+ hidden_states = downsampler(hidden_states)
+
+ output_states += (hidden_states,)
+
+ return hidden_states, output_states
+
+
+class DownBlock3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ output_scale_factor=1.0,
+ add_downsample=True,
+ downsample_padding=1,
+ ):
+ super().__init__()
+ resnets = []
+
+ for i in range(num_layers):
+ in_channels = in_channels if i == 0 else out_channels
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+
+ self.resnets = nn.ModuleList(resnets)
+
+ if add_downsample:
+ self.downsamplers = nn.ModuleList(
+ [
+ Downsample3D(
+ out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
+ )
+ ]
+ )
+ else:
+ self.downsamplers = None
+
+ self.gradient_checkpointing = False
+
+ def forward(self, hidden_states, temb=None):
+ output_states = ()
+
+ for resnet in self.resnets:
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
+ else:
+ hidden_states = resnet(hidden_states, temb)
+
+ output_states += (hidden_states,)
+
+ if self.downsamplers is not None:
+ for downsampler in self.downsamplers:
+ hidden_states = downsampler(hidden_states)
+
+ output_states += (hidden_states,)
+
+ return hidden_states, output_states
+
+
+class CrossAttnUpBlock3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ prev_output_channel: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ attn_num_head_channels=1,
+ cross_attention_dim=1280,
+ output_scale_factor=1.0,
+ add_upsample=True,
+ dual_cross_attention=False,
+ use_linear_projection=False,
+ only_cross_attention=False,
+ upcast_attention=False,
+ ):
+ super().__init__()
+ resnets = []
+ attentions = []
+
+ self.has_cross_attention = True
+ self.attn_num_head_channels = attn_num_head_channels
+
+ for i in range(num_layers):
+ res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
+
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=resnet_in_channels + res_skip_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+ if dual_cross_attention:
+ raise NotImplementedError
+ attentions.append(
+ Transformer3DModel(
+ attn_num_head_channels,
+ out_channels // attn_num_head_channels,
+ in_channels=out_channels,
+ num_layers=1,
+ cross_attention_dim=cross_attention_dim,
+ norm_num_groups=resnet_groups,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention,
+ upcast_attention=upcast_attention,
+ )
+ )
+
+ self.attentions = nn.ModuleList(attentions)
+ self.resnets = nn.ModuleList(resnets)
+
+ if add_upsample:
+ self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
+ else:
+ self.upsamplers = None
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ res_hidden_states_tuple,
+ temb=None,
+ encoder_hidden_states=None,
+ upsample_size=None,
+ attention_mask=None,
+ inter_frame=False
+ ):
+ for resnet, attn in zip(self.resnets, self.attentions):
+ # pop res hidden states
+ res_hidden_states = res_hidden_states_tuple[-1]
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
+
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module, return_dict=None, inter_frame=None):
+ def custom_forward(*inputs):
+ if return_dict is not None:
+ return module(*inputs, return_dict=return_dict, inter_frame=inter_frame)
+ else:
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(attn, return_dict=False, inter_frame=inter_frame),
+ hidden_states,
+ encoder_hidden_states,
+ )[0]
+ else:
+ hidden_states = resnet(hidden_states, temb)
+ hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, inter_frame=inter_frame).sample
+
+ if self.upsamplers is not None:
+ for upsampler in self.upsamplers:
+ hidden_states = upsampler(hidden_states, upsample_size)
+
+ return hidden_states
+
+
+class UpBlock3D(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ prev_output_channel: int,
+ out_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_time_scale_shift: str = "default",
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ resnet_pre_norm: bool = True,
+ output_scale_factor=1.0,
+ add_upsample=True,
+ ):
+ super().__init__()
+ resnets = []
+
+ for i in range(num_layers):
+ res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
+
+ resnets.append(
+ ResnetBlock3D(
+ in_channels=resnet_in_channels + res_skip_channels,
+ out_channels=out_channels,
+ temb_channels=temb_channels,
+ eps=resnet_eps,
+ groups=resnet_groups,
+ dropout=dropout,
+ time_embedding_norm=resnet_time_scale_shift,
+ non_linearity=resnet_act_fn,
+ output_scale_factor=output_scale_factor,
+ pre_norm=resnet_pre_norm,
+ )
+ )
+
+ self.resnets = nn.ModuleList(resnets)
+
+ if add_upsample:
+ self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
+ else:
+ self.upsamplers = None
+
+ self.gradient_checkpointing = False
+
+ def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
+ for resnet in self.resnets:
+ # pop res hidden states
+ res_hidden_states = res_hidden_states_tuple[-1]
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
+
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
+ else:
+ hidden_states = resnet(hidden_states, temb)
+
+ if self.upsamplers is not None:
+ for upsampler in self.upsamplers:
+ hidden_states = upsampler(hidden_states, upsample_size)
+
+ return hidden_states
diff --git a/ControlVideo-master/models/util.py b/ControlVideo-master/models/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..faba28d79fc80c2786872e2d9fa7edb267b18949
--- /dev/null
+++ b/ControlVideo-master/models/util.py
@@ -0,0 +1,122 @@
+import os
+import imageio
+import numpy as np
+from typing import Union
+import decord
+decord.bridge.set_bridge('torch')
+import torch
+import torchvision
+import PIL
+from typing import List
+from tqdm import tqdm
+from einops import rearrange
+
+from controlnet_aux import CannyDetector
+
+def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=4, fps=8):
+ videos = rearrange(videos, "b c t h w -> t b c h w")
+ outputs = []
+ for x in videos:
+ x = torchvision.utils.make_grid(x, nrow=n_rows)
+ x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
+ if rescale:
+ x = (x + 1.0) / 2.0 # -1,1 -> 0,1
+ x = (x * 255).numpy().astype(np.uint8)
+ outputs.append(x)
+
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ imageio.mimsave(path, outputs, fps=fps)
+
+def save_videos_grid_pil(videos: List[PIL.Image.Image], path: str, rescale=False, n_rows=4, fps=8):
+ videos = rearrange(videos, "b c t h w -> t b c h w")
+ outputs = []
+ for x in videos:
+ x = torchvision.utils.make_grid(x, nrow=n_rows)
+ x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
+ if rescale:
+ x = (x + 1.0) / 2.0 # -1,1 -> 0,1
+ x = (x * 255).numpy().astype(np.uint8)
+ outputs.append(x)
+
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ imageio.mimsave(path, outputs, fps=fps)
+
+def read_video(video_path, video_length, width=512, height=512, frame_rate=None):
+ vr = decord.VideoReader(video_path, width=width, height=height)
+ if frame_rate is None:
+ frame_rate = max(1, len(vr) // video_length)
+ sample_index = list(range(0, len(vr), frame_rate))[:video_length]
+ video = vr.get_batch(sample_index)
+ video = rearrange(video, "f h w c -> f c h w")
+ video = (video / 127.5 - 1.0)
+ return video
+
+
+def get_annotation(video, annotator):
+ t2i_transform = torchvision.transforms.ToPILImage()
+ annotation = []
+ for frame in video:
+ pil_frame = t2i_transform(frame)
+ if isinstance(annotator, CannyDetector):
+ annotation.append(annotator(pil_frame, low_threshold=100, high_threshold=200))
+ else:
+ annotation.append(annotator(pil_frame))
+ return annotation
+
+# DDIM Inversion
+@torch.no_grad()
+def init_prompt(prompt, pipeline):
+ uncond_input = pipeline.tokenizer(
+ [""], padding="max_length", max_length=pipeline.tokenizer.model_max_length,
+ return_tensors="pt"
+ )
+ uncond_embeddings = pipeline.text_encoder(uncond_input.input_ids.to(pipeline.device))[0]
+ text_input = pipeline.tokenizer(
+ [prompt],
+ padding="max_length",
+ max_length=pipeline.tokenizer.model_max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+ text_embeddings = pipeline.text_encoder(text_input.input_ids.to(pipeline.device))[0]
+ context = torch.cat([uncond_embeddings, text_embeddings])
+
+ return context
+
+
+def next_step(model_output: Union[torch.FloatTensor, np.ndarray], timestep: int,
+ sample: Union[torch.FloatTensor, np.ndarray], ddim_scheduler):
+ timestep, next_timestep = min(
+ timestep - ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps, 999), timestep
+ alpha_prod_t = ddim_scheduler.alphas_cumprod[timestep] if timestep >= 0 else ddim_scheduler.final_alpha_cumprod
+ alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep]
+ beta_prod_t = 1 - alpha_prod_t
+ next_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
+ next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
+ next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction
+ return next_sample
+
+
+def get_noise_pred_single(latents, t, context, unet):
+ noise_pred = unet(latents, t, encoder_hidden_states=context)["sample"]
+ return noise_pred
+
+
+@torch.no_grad()
+def ddim_loop(pipeline, ddim_scheduler, latent, num_inv_steps, prompt):
+ context = init_prompt(prompt, pipeline)
+ uncond_embeddings, cond_embeddings = context.chunk(2)
+ all_latent = [latent]
+ latent = latent.clone().detach()
+ for i in tqdm(range(num_inv_steps)):
+ t = ddim_scheduler.timesteps[len(ddim_scheduler.timesteps) - i - 1]
+ noise_pred = get_noise_pred_single(latent, t, cond_embeddings, pipeline.unet)
+ latent = next_step(noise_pred, t, latent, ddim_scheduler)
+ all_latent.append(latent)
+ return all_latent
+
+
+@torch.no_grad()
+def ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=""):
+ ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)
+ return ddim_latents