diff --git a/FateZero-main/data/negative_reg/bird/198-dfdcf603f0d6dd9a790f4d6a658032b117abf50e.jpg b/FateZero-main/data/negative_reg/bird/198-dfdcf603f0d6dd9a790f4d6a658032b117abf50e.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6bb0d48ab40f658a38a479b55821d5dd19b72dbb
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/198-dfdcf603f0d6dd9a790f4d6a658032b117abf50e.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:874f6102f2c00a522c60d5fa2431c29b7898457c15db62d04e5dd4807e52ebf4
+size 69670
diff --git a/FateZero-main/data/negative_reg/bird/347-defdc0fea089f7e2d6b9cb296eca877dd5f7a7d2.jpg b/FateZero-main/data/negative_reg/bird/347-defdc0fea089f7e2d6b9cb296eca877dd5f7a7d2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f3697e512d08b003709994a97c3528e738400c10
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/347-defdc0fea089f7e2d6b9cb296eca877dd5f7a7d2.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:008df3061807a6e99772c29e98f087807a8e4470c0ad33528d3cb8398416c7be
+size 32933
diff --git a/FateZero-main/data/negative_reg/bird/359-b6be4d41cabadb5c0f5ae7a3b5aa8c87d2811b25.jpg b/FateZero-main/data/negative_reg/bird/359-b6be4d41cabadb5c0f5ae7a3b5aa8c87d2811b25.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..bcafabe09124139f6766ba43b04b5f0e4decf244
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/359-b6be4d41cabadb5c0f5ae7a3b5aa8c87d2811b25.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:70ba62f8d0af506a492523e4a787519395cc166c4c7c4d41626d86a942f28649
+size 71247
diff --git a/FateZero-main/data/negative_reg/bird/405-2b8123d16a14b5e322c9a7e9cdff6b5620de744e.jpg b/FateZero-main/data/negative_reg/bird/405-2b8123d16a14b5e322c9a7e9cdff6b5620de744e.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..71876d4edfd7a30e8fe7c712efd7c0d1af7fb65b
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/405-2b8123d16a14b5e322c9a7e9cdff6b5620de744e.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0f72522d56c1a16b6745a7ac620ef5062d326b86167a07776801fe07ba10a233
+size 37268
diff --git a/FateZero-main/data/negative_reg/bird/452-edfc79bf16e82da5c65e4f814947229096dec56b.jpg b/FateZero-main/data/negative_reg/bird/452-edfc79bf16e82da5c65e4f814947229096dec56b.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b26d6aa445862036882e52901e856e0c7252730a
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/452-edfc79bf16e82da5c65e4f814947229096dec56b.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fdc3bcedcfef88bec2789354f52d52389dc08979e741a1441e794e54150f6431
+size 34736
diff --git a/FateZero-main/data/negative_reg/bird/490-6586977261049294340e1f2ca63a704b95742218.jpg b/FateZero-main/data/negative_reg/bird/490-6586977261049294340e1f2ca63a704b95742218.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..841b11d1397d7108cc9b3c54b8bce86aae23f580
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/490-6586977261049294340e1f2ca63a704b95742218.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:283f1351ff4e2b1cea9ed6e31e15bc360466f327110e7d5a7c5e94c3dafd9654
+size 19675
diff --git a/FateZero-main/data/negative_reg/bird/529-5e8face1afa7450de032369b3a378874bd526cc4.jpg b/FateZero-main/data/negative_reg/bird/529-5e8face1afa7450de032369b3a378874bd526cc4.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..22460632d6add115fa49455d7ce4b7e80cf7fa7a
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/529-5e8face1afa7450de032369b3a378874bd526cc4.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:390f48fab998bd7fddb657c3a593486a83336b14ab2299deb97501fb93797911
+size 24944
diff --git a/FateZero-main/data/negative_reg/bird/551-76631a31d87d5384757e68066843bb8655577148.jpg b/FateZero-main/data/negative_reg/bird/551-76631a31d87d5384757e68066843bb8655577148.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6891cf4a680a13ad2de521fb2ff824884c0b8e6b
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/551-76631a31d87d5384757e68066843bb8655577148.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9c379279a21c46891c414d914f6c9a3b5f8df882c812858464945fb9787604a7
+size 19431
diff --git a/FateZero-main/data/negative_reg/bird/66-1e78a8e81d922e992f0e0968cd751d2541b2f130.jpg b/FateZero-main/data/negative_reg/bird/66-1e78a8e81d922e992f0e0968cd751d2541b2f130.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cc482669dfaaee005e83f957abad4815e2276461
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/66-1e78a8e81d922e992f0e0968cd751d2541b2f130.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ad44ab5a74626513dd42bde66bbecb1f08c26e9398f6e1dd0ee317160d81f96
+size 44479
diff --git a/FateZero-main/data/negative_reg/bird/8-158ad1d17e579bab9d48cd69025d9cfce31266fa.jpg b/FateZero-main/data/negative_reg/bird/8-158ad1d17e579bab9d48cd69025d9cfce31266fa.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4304997a0926e08421e100f2edcc7c1bf5a7e5d9
--- /dev/null
+++ b/FateZero-main/data/negative_reg/bird/8-158ad1d17e579bab9d48cd69025d9cfce31266fa.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9deb95a6c7fa33cb4f26a004f9f662f2ea7c9cd27c519621df6fa075f00e981
+size 19415
diff --git a/FateZero-main/data/negative_reg/car/132-7102babd6a1810bd7e1dc01a2241d51ceeeb44dc.jpg b/FateZero-main/data/negative_reg/car/132-7102babd6a1810bd7e1dc01a2241d51ceeeb44dc.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f40422b16cf275aad2dbdc14603b0b59c42a3bc2
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/132-7102babd6a1810bd7e1dc01a2241d51ceeeb44dc.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fbbf251d59c75bb5bfb35a09046b79005df2cc04f0ba5bfa6447c376fdcf51b
+size 34880
diff --git a/FateZero-main/data/negative_reg/car/135-c8ae1edb43f1ceeb0ea05b94a595d6bc8b2798c6.jpg b/FateZero-main/data/negative_reg/car/135-c8ae1edb43f1ceeb0ea05b94a595d6bc8b2798c6.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d44abae13faea0bc9b5abd80dc9da8ddc8bd2380
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/135-c8ae1edb43f1ceeb0ea05b94a595d6bc8b2798c6.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5f416223cc2e30808ef8ec014aa4282922d82226cb6449fc7a95115beb61cae6
+size 63066
diff --git a/FateZero-main/data/negative_reg/car/137-59d324538c4873d14fb45a7c52a8a54ce90ff4d5.jpg b/FateZero-main/data/negative_reg/car/137-59d324538c4873d14fb45a7c52a8a54ce90ff4d5.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b3729fe9b92d6cfb6d08c72ee3270ca97dc7117d
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/137-59d324538c4873d14fb45a7c52a8a54ce90ff4d5.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ffebd4a466c1d8b1a593fe391414a65bad5fd4f46122b1c83fb663bfd720e8a3
+size 59377
diff --git a/FateZero-main/data/negative_reg/car/222-beff21bebbbb9dabd49517cbd4522791953b688b.jpg b/FateZero-main/data/negative_reg/car/222-beff21bebbbb9dabd49517cbd4522791953b688b.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cfe0a74305dc6d796f940be5499a47d852c779df
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/222-beff21bebbbb9dabd49517cbd4522791953b688b.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e29c50a880c22f528414442c43d4e44644c879ed12b644bf128b0d9f791b8f60
+size 61301
diff --git a/FateZero-main/data/negative_reg/car/246-1aa12ea27561eaa19320fa4f057c420afd795453.jpg b/FateZero-main/data/negative_reg/car/246-1aa12ea27561eaa19320fa4f057c420afd795453.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..92935d02b0340373e20f693cfbef66eebf29c396
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/246-1aa12ea27561eaa19320fa4f057c420afd795453.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a8d82b925f4c3361e0fe9154459c57527a9ff713f3dc90a389aa1411c3579ab0
+size 62901
diff --git a/FateZero-main/data/negative_reg/car/257-0bcf1c0411420992816ac3269cc04f9dfc828cee.jpg b/FateZero-main/data/negative_reg/car/257-0bcf1c0411420992816ac3269cc04f9dfc828cee.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..42d2ffa7af923037e2c54b360a85ccd59e16a930
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/257-0bcf1c0411420992816ac3269cc04f9dfc828cee.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b4a600ff6f8f19c4105adb69822719a72949588bad8e1afdc8a63e155c9a59fd
+size 48205
diff --git a/FateZero-main/data/negative_reg/car/331-93e1c2cb142929baeab22fc994290d7315cd8f40.jpg b/FateZero-main/data/negative_reg/car/331-93e1c2cb142929baeab22fc994290d7315cd8f40.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..20da97dcb2062ab9c8b381d0a9821d6c0f4d7c5f
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/331-93e1c2cb142929baeab22fc994290d7315cd8f40.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7afd83249f9eb1114de6deae21efb0ac44eac1461914953346a108ba26c85f28
+size 62155
diff --git a/FateZero-main/data/negative_reg/car/34-f49ef63f40978ced5363df3ba47201bacfe41dec.jpg b/FateZero-main/data/negative_reg/car/34-f49ef63f40978ced5363df3ba47201bacfe41dec.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d48a0fac2046e0505f8f224a50346dc53a576628
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/34-f49ef63f40978ced5363df3ba47201bacfe41dec.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3209c18e7c87a3811a9c49a51cd4f446ae555ba1d5b3a81b8a9abe9a8c640e59
+size 56985
diff --git a/FateZero-main/data/negative_reg/car/357-56fc8ca34173c7321b5564e29bd57ae47d78b2aa.jpg b/FateZero-main/data/negative_reg/car/357-56fc8ca34173c7321b5564e29bd57ae47d78b2aa.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a45cb39461513d8b447b05ba2d3d68afbecada87
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/357-56fc8ca34173c7321b5564e29bd57ae47d78b2aa.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:35a9368b2d85119b17ed4457cf41f7f9f78f7250094a234f1e5fef7306d8d896
+size 61904
diff --git a/FateZero-main/data/negative_reg/car/361-a64d1b37c755ef754d2b7c27e9f7e156da6bc576.jpg b/FateZero-main/data/negative_reg/car/361-a64d1b37c755ef754d2b7c27e9f7e156da6bc576.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..39e289b948123607a1c1eac6fd24ea0076355264
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/361-a64d1b37c755ef754d2b7c27e9f7e156da6bc576.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bba6d8b655f4d0bf333514be5b968578e1437647bdbf181c6ce72ac31be1161d
+size 70214
diff --git a/FateZero-main/data/negative_reg/car/426-5bbfca82723b7804db51fc7705400610b6885f99.jpg b/FateZero-main/data/negative_reg/car/426-5bbfca82723b7804db51fc7705400610b6885f99.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ceee1f6dd7373eaeec7e743fbe76e930e3d8669d
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/426-5bbfca82723b7804db51fc7705400610b6885f99.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a8a2af8bb5b52192e8e767e742b25cff69a8397e013fa27b6bfd58a526accd8
+size 74638
diff --git a/FateZero-main/data/negative_reg/car/542-0b9afd5a396cf38cceb5affafac671d9a84f1130.jpg b/FateZero-main/data/negative_reg/car/542-0b9afd5a396cf38cceb5affafac671d9a84f1130.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..38f79d2c896764ddac1f3efbc3cd1b6f6e8a1da9
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/542-0b9afd5a396cf38cceb5affafac671d9a84f1130.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:413c41dd7bf66433827492d90881b44f903958abbe22d911af810930fd56bf93
+size 45071
diff --git a/FateZero-main/data/negative_reg/car/576-2f9d3df0358dc7c8b0b0b40d2a7d63995432ffa5.jpg b/FateZero-main/data/negative_reg/car/576-2f9d3df0358dc7c8b0b0b40d2a7d63995432ffa5.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..bdcbcfcab6608d45b0aaf8d27b8edb4db8e01b90
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/576-2f9d3df0358dc7c8b0b0b40d2a7d63995432ffa5.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:387c8fc1e92543d2a5a0f0c47cca3ae943b15612c6a70259f8396691fb0842f4
+size 64732
diff --git a/FateZero-main/data/negative_reg/car/656-1e8349c458b6cb470804cd09c6dac39a97cdef25.jpg b/FateZero-main/data/negative_reg/car/656-1e8349c458b6cb470804cd09c6dac39a97cdef25.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fcb460ec4499eb6f45fafd63f93395d3eb70c176
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/656-1e8349c458b6cb470804cd09c6dac39a97cdef25.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5ea1ca1c392d43dbd772edcbb0ac61d1b6894e7dad091d4d618bbb6a9fa13c0e
+size 62266
diff --git a/FateZero-main/data/negative_reg/car/675-86bf57b2da85f2912d9ed793e92f9140eb276c17.jpg b/FateZero-main/data/negative_reg/car/675-86bf57b2da85f2912d9ed793e92f9140eb276c17.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..eb9d965ad2e636c4f3dd2b327d182152bab28950
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/675-86bf57b2da85f2912d9ed793e92f9140eb276c17.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b127eef7eb2e46808e94ff0473ff1adc9148fbe3992f076c225d3c53bd2637aa
+size 55694
diff --git a/FateZero-main/data/negative_reg/car/678-60e3c9248c8eb7cb912a74071e0b9cecfbb05f45.jpg b/FateZero-main/data/negative_reg/car/678-60e3c9248c8eb7cb912a74071e0b9cecfbb05f45.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..45b05aaea0332ebfd33686851ac4c5bd58816d3e
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/678-60e3c9248c8eb7cb912a74071e0b9cecfbb05f45.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8ac20dcc704b3f30bdb07bedd92e48f9503b75a86d6cd17c848ef733b9b4457d
+size 53961
diff --git a/FateZero-main/data/negative_reg/car/682-7d849fbd641fe5bbb39a07ae290f055572ba79a6.jpg b/FateZero-main/data/negative_reg/car/682-7d849fbd641fe5bbb39a07ae290f055572ba79a6.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..36fb4dc19bc6b13662f170e80242bf0c8cbc98fa
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/682-7d849fbd641fe5bbb39a07ae290f055572ba79a6.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5504db8659862ff6ec3ba89387e75c8b068f3e8c8db9a5fe4f3561dd431fc583
+size 75791
diff --git a/FateZero-main/data/negative_reg/car/83-c0d83d05870f16b380369272da855119b9a368ec.jpg b/FateZero-main/data/negative_reg/car/83-c0d83d05870f16b380369272da855119b9a368ec.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f5049771ecf7d9196dbed5e05575e165508f2e81
--- /dev/null
+++ b/FateZero-main/data/negative_reg/car/83-c0d83d05870f16b380369272da855119b9a368ec.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d19690854cebc236d6cb8697e881a86a83acb59eba17c7a9c37022b60ef662e
+size 54614
diff --git a/RAVE-main/CIVIT_AI/civit_ai.sh b/RAVE-main/CIVIT_AI/civit_ai.sh
new file mode 100644
index 0000000000000000000000000000000000000000..df5c04191c19ecf3b6ac0ef98b73ccec74a8787f
--- /dev/null
+++ b/RAVE-main/CIVIT_AI/civit_ai.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+civit_ai=$1
+
+CWDPATH=$(pwd)
+mkdir $CWDPATH/CIVIT_AI/safetensors
+cd $CWDPATH/CIVIT_AI/safetensors
+
+mkdir $civit_ai
+cd $civit_ai
+wget https://civitai.com/api/download/models/$civit_ai --content-disposition
+
+model_name=$(ls -l | awk '{print $9}')
+model_name=${model_name//$'\n'/}
+model_name2=${model_name//$'.safetensors'/}
+
+eval "$(conda shell.bash hook)"
+conda activate rave
+cd ../..
+python convert.py \
+ --checkpoint_path "$CWDPATH/CIVIT_AI/safetensors/$civit_ai/$model_name" \
+ --dump_path "$CWDPATH/CIVIT_AI/diffusers_models/$civit_ai/$model_name2" \
+ --from_safetensors
+
+rm -rf $CWDPATH/CIVIT_AI/safetensors/
+
+echo "Download is done! Check the diffusers_models folder. $model_name"
\ No newline at end of file
diff --git a/RAVE-main/CIVIT_AI/convert.py b/RAVE-main/CIVIT_AI/convert.py
new file mode 100644
index 0000000000000000000000000000000000000000..e117afcdd4c7e892890fa7896b32e184265d76bf
--- /dev/null
+++ b/RAVE-main/CIVIT_AI/convert.py
@@ -0,0 +1,182 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Conversion script for the LDM checkpoints. """
+
+import argparse
+import importlib
+
+import torch
+
+from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
+ )
+ # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
+ parser.add_argument(
+ "--original_config_file",
+ default=None,
+ type=str,
+ help="The YAML config file corresponding to the original architecture.",
+ )
+ parser.add_argument(
+ "--num_in_channels",
+ default=None,
+ type=int,
+ help="The number of input channels. If `None` number of input channels will be automatically inferred.",
+ )
+ parser.add_argument(
+ "--scheduler_type",
+ default="pndm",
+ type=str,
+ help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
+ )
+ parser.add_argument(
+ "--pipeline_type",
+ default=None,
+ type=str,
+ help=(
+ "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
+ ". If `None` pipeline will be automatically inferred."
+ ),
+ )
+ parser.add_argument(
+ "--image_size",
+ default=None,
+ type=int,
+ help=(
+ "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
+ " Base. Use 768 for Stable Diffusion v2."
+ ),
+ )
+ parser.add_argument(
+ "--prediction_type",
+ default=None,
+ type=str,
+ help=(
+ "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
+ " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
+ ),
+ )
+ parser.add_argument(
+ "--extract_ema",
+ action="store_true",
+ help=(
+ "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
+ " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
+ " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
+ ),
+ )
+ parser.add_argument(
+ "--upcast_attention",
+ action="store_true",
+ help=(
+ "Whether the attention computation should always be upcasted. This is necessary when running stable"
+ " diffusion 2.1."
+ ),
+ )
+ parser.add_argument(
+ "--from_safetensors",
+ action="store_true",
+ help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
+ )
+ parser.add_argument(
+ "--to_safetensors",
+ action="store_true",
+ help="Whether to store pipeline in safetensors format or not.",
+ )
+ parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
+ parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
+ parser.add_argument(
+ "--stable_unclip",
+ type=str,
+ default=None,
+ required=False,
+ help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
+ )
+ parser.add_argument(
+ "--stable_unclip_prior",
+ type=str,
+ default=None,
+ required=False,
+ help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
+ )
+ parser.add_argument(
+ "--clip_stats_path",
+ type=str,
+ help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
+ required=False,
+ )
+ parser.add_argument(
+ "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
+ )
+ parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
+ parser.add_argument(
+ "--vae_path",
+ type=str,
+ default=None,
+ required=False,
+ help="Set to a path, hub id to an already converted vae to not convert it again.",
+ )
+ parser.add_argument(
+ "--pipeline_class_name",
+ type=str,
+ default=None,
+ required=False,
+ help="Specify the pipeline class name",
+ )
+
+ args = parser.parse_args()
+
+ if args.pipeline_class_name is not None:
+ library = importlib.import_module("diffusers")
+ class_obj = getattr(library, args.pipeline_class_name)
+ pipeline_class = class_obj
+ else:
+ pipeline_class = None
+
+ pipe = download_from_original_stable_diffusion_ckpt(
+ checkpoint_path=args.checkpoint_path,
+ original_config_file=args.original_config_file,
+ # config_files=args.config_files,
+ image_size=args.image_size,
+ prediction_type=args.prediction_type,
+ model_type=args.pipeline_type,
+ extract_ema=args.extract_ema,
+ scheduler_type=args.scheduler_type,
+ num_in_channels=args.num_in_channels,
+ upcast_attention=args.upcast_attention,
+ from_safetensors=args.from_safetensors,
+ device=args.device,
+ stable_unclip=args.stable_unclip,
+ stable_unclip_prior=args.stable_unclip_prior,
+ clip_stats_path=args.clip_stats_path,
+ controlnet=args.controlnet,
+ vae_path=args.vae_path,
+ pipeline_class=pipeline_class,
+ )
+
+ if args.half:
+ pipe.to(torch_dtype=torch.float16)
+
+ if args.controlnet:
+ # only save the controlnet model
+ pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
+ else:
+ pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
diff --git a/RAVE-main/pretrained_models/.gitattributes b/RAVE-main/pretrained_models/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..c7d9f3332a950355d5a77d85000f05e6f45435ea
--- /dev/null
+++ b/RAVE-main/pretrained_models/.gitattributes
@@ -0,0 +1,34 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/RAVE-main/scripts/run_experiment.py b/RAVE-main/scripts/run_experiment.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c74bae699956bd9fdb1d94ed74e93d1f4b5d5e3
--- /dev/null
+++ b/RAVE-main/scripts/run_experiment.py
@@ -0,0 +1,130 @@
+import torch
+import argparse
+import os
+import json
+import sys
+import datetime
+import imageio # Import imageio for MP4 saving
+sys.path.append(os.getcwd())
+from pipelines.sd_controlnet_rave import RAVE
+from pipelines.sd_multicontrolnet_rave import RAVE_MultiControlNet
+import utils.constants as const
+import utils.video_grid_utils as vgu
+import warnings
+warnings.filterwarnings("ignore")
+import numpy as np
+
+def init_device():
+ """Initialize the device (CUDA if available, else CPU)."""
+ device_name = 'cuda' if torch.cuda.is_available() else 'cpu'
+ device = torch.device(device_name)
+ return device
+
+def init_paths(input_ns, video_name, save_folder):
+ """Initialize paths for video processing based on video name and save folder."""
+ # Set save path directly to the video name (e.g., truck.mp4) under save_folder
+ save_dir = save_folder
+ os.makedirs(save_dir, exist_ok=True)
+ input_ns.save_path = os.path.join(save_dir, video_name) # Use video_name directly as filename
+
+ # Set video path using the fixed base path and video name
+ input_ns.video_path = f'/home/wangjuntong/video_editing_dataset/all_sourse/{video_name}'
+
+ # Set Hugging Face ControlNet path based on preprocess_name
+ if '-' in input_ns.preprocess_name:
+ input_ns.hf_cn_path = [const.PREPROCESSOR_DICT[i] for i in input_ns.preprocess_name.split('-')]
+ else:
+ input_ns.hf_cn_path = const.PREPROCESSOR_DICT[input_ns.preprocess_name]
+ input_ns.hf_path = "runwayml/stable-diffusion-v1-5"
+
+ # Set inverse and control paths (though not used for saving)
+ input_ns.inverse_path = f'{const.GENERATED_DATA_PATH}/inverses/{video_name}/{input_ns.preprocess_name}_{input_ns.model_id}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}'
+ input_ns.control_path = f'{const.GENERATED_DATA_PATH}/controls/{video_name}/{input_ns.preprocess_name}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}'
+ os.makedirs(input_ns.control_path, exist_ok=True)
+ os.makedirs(input_ns.inverse_path, exist_ok=True)
+
+ return input_ns
+
+def run(input_ns, video_name, positive_prompts, save_folder):
+ """Run the video editing process with the given parameters."""
+ if 'model_id' not in input_ns.__dict__:
+ input_ns.model_id = "None"
+ device = init_device()
+ input_ns = init_paths(input_ns, video_name, save_folder)
+
+
+ print(f"Save path: {input_ns.save_path}")
+
+ # Prepare video frames as a grid
+ input_ns.image_pil_list = vgu.prepare_video_to_grid(input_ns.video_path, input_ns.sample_size, input_ns.grid_size, input_ns.pad)
+ input_ns.sample_size = len(input_ns.image_pil_list)
+ print(f'Frame count: {len(input_ns.image_pil_list)}')
+
+ # Choose the appropriate ControlNet class
+ controlnet_class = RAVE_MultiControlNet if '-' in str(input_ns.controlnet_conditioning_scale) else RAVE
+ CN = controlnet_class(device)
+
+ # Initialize models
+ CN.init_models(input_ns.hf_cn_path, input_ns.hf_path, input_ns.preprocess_name, input_ns.model_id)
+
+ input_dict = vars(input_ns)
+
+ # Run the editing process
+ start_time = datetime.datetime.now()
+ if '-' in str(input_ns.controlnet_conditioning_scale):
+ res_vid, control_vid_1, control_vid_2 = CN(input_dict)
+ else:
+ res_vid, control_vid = CN(input_dict)
+ end_time = datetime.datetime.now()
+
+ # Convert PIL images to numpy arrays for imageio
+ res_vid_np = [np.array(img) for img in res_vid]
+
+ # Save the result video as MP4
+ imageio.mimwrite(input_ns.save_path, res_vid_np, format='mp4', fps=30, quality=8)
+
+if __name__ == '__main__':
+ # Parse command-line argument for JSONL file path
+ parser = argparse.ArgumentParser(description='Batch video editing with JSONL input.')
+ parser.add_argument('--jsonl_path', type=str, required=True, help='Path to the JSONL file containing video info')
+ args = parser.parse_args()
+
+ # Fixed parameters
+ fixed_params = {
+ 'preprocess_name': 'depth_zoe',
+ 'batch_size': 4,
+ 'batch_size_vae': 1,
+ 'cond_step_start': 0.0,
+ 'controlnet_conditioning_scale': 1.0,
+ 'controlnet_guidance_end': 1.0,
+ 'controlnet_guidance_start': 0.0,
+ 'give_control_inversion': True,
+ 'grid_size': 3,
+ 'sample_size': -1,
+ 'pad': 1,
+ 'guidance_scale': 7.5,
+ 'inversion_prompt': '',
+ 'is_ddim_inversion': True,
+ 'is_shuffle': True,
+ 'negative_prompts': '',
+ 'num_inference_steps': 50,
+ 'num_inversion_step': 50,
+ 'seed': 0,
+ 'model_id': 'None'
+ }
+
+ # Read and process each line in the JSONL file
+ with open(args.jsonl_path, 'r') as f:
+ for line in f:
+ data = json.loads(line)
+ video_name = data['video'] # Use video key directly as filename (e.g., "truck.mp4")
+ positive_prompts = data['edit_prompt']
+ save_folder = f'/home/wangjuntong/RAVE-main/outputs/lnk_painting/{video_name.rsplit(".", 1)[0]}' # Folder named after video without extension
+
+ # Create input namespace with fixed and dynamic parameters
+ input_ns = argparse.Namespace(**fixed_params)
+ input_ns.positive_prompts = positive_prompts
+ input_ns.video_name = video_name
+
+ # Run the editing process
+ run(input_ns, video_name, positive_prompts, save_folder)
\ No newline at end of file
diff --git a/vid2vid-zero-main/.gitignore b/vid2vid-zero-main/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..8e71423bcdad682bd07a89bc841a7b7e5dd35a4c
--- /dev/null
+++ b/vid2vid-zero-main/.gitignore
@@ -0,0 +1,175 @@
+# custom dirs
+checkpoints/
+outputs/
+
+# Initially taken from Github's Python gitignore files
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# tests and logs
+tests/fixtures/cached_*_text.txt
+logs/
+lightning_logs/
+lang_code_data/
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# vscode
+.vs
+.vscode
+
+# Pycharm
+.idea
+
+# TF code
+tensorflow_code
+
+# Models
+proc_data
+
+# examples
+runs
+/runs_old
+/wandb
+/examples/runs
+/examples/**/*.args
+/examples/rag/sweep
+
+# data
+/data
+serialization_dir
+
+# emacs
+*.*~
+debug.env
+
+# vim
+.*.swp
+
+#ctags
+tags
+
+# pre-commit
+.pre-commit*
+
+# .lock
+*.lock
+
+# DS_Store (MacOS)
+.DS_Store
+# RL pipelines may produce mp4 outputs
+*.mp4
+
+# dependencies
+/transformers
diff --git a/vid2vid-zero-main/README.md b/vid2vid-zero-main/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9c1fbb3a9506239c3ff1f66f45127e43b6bec987
--- /dev/null
+++ b/vid2vid-zero-main/README.md
@@ -0,0 +1,152 @@
+
+
+
+
+
+
vid2vid-zero for Zero-Shot Video Editing
+
+
+
+[Wen Wang](https://scholar.google.com/citations?user=1ks0R04AAAAJ&hl=zh-CN)
1*, [Kangyang Xie](https://github.com/felix-ky)
1*, [Zide Liu](https://github.com/zideliu)
1*, [Hao Chen](https://scholar.google.com.au/citations?user=FaOqRpcAAAAJ&hl=en)
1, [Yue Cao](http://yue-cao.me/)
2, [Xinlong Wang](https://www.xloong.wang/)
2, [Chunhua Shen](https://cshen.github.io/)
1
+
+
1[ZJU](https://www.zju.edu.cn/english/),
2[BAAI](https://www.baai.ac.cn/english.html)
+
+
+
+[](https://huggingface.co/spaces/BAAI/vid2vid-zero)
+
+
+
+
+
+
+
+We propose vid2vid-zero, a simple yet effective method for zero-shot video editing. Our vid2vid-zero leverages off-the-shelf image diffusion models, and doesn't require training on any video. At the core of our method is a null-text inversion module for text-to-video alignment, a cross-frame modeling module for temporal consistency, and a spatial regularization module for fidelity to the original video. Without any training, we leverage the dynamic nature of the attention mechanism to enable bi-directional temporal modeling at test time.
+Experiments and analyses show promising results in editing attributes, subjects, places, etc., in real-world videos.
+
+
+## Highlights
+
+- Video editing with off-the-shelf image diffusion models.
+
+- No training on any video.
+
+- Promising results in editing attributes, subjects, places, etc., in real-world videos.
+
+## News
+* [2023.4.12] Online Gradio Demo is available [here](https://huggingface.co/spaces/BAAI/vid2vid-zero).
+* [2023.4.11] Add Gradio Demo (runs in local).
+* [2023.4.9] Code released!
+
+## Installation
+### Requirements
+
+```shell
+pip install -r requirements.txt
+```
+Installing [xformers](https://github.com/facebookresearch/xformers) is highly recommended for improved efficiency and speed on GPUs.
+
+### Weights
+
+**[Stable Diffusion]** [Stable Diffusion](https://arxiv.org/abs/2112.10752) is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input. The pre-trained Stable Diffusion models can be downloaded from [🤗 Hugging Face](https://huggingface.co) (e.g., [Stable Diffusion v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4), [v2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1)). We use Stable Diffusion v1-4 by default.
+
+## Zero-shot testing
+
+Simply run:
+
+```bash
+accelerate launch test_vid2vid_zero.py --config path/to/config
+```
+
+For example:
+```bash
+accelerate launch test_vid2vid_zero.py --config configs/car-moving.yaml
+```
+
+## Gradio Demo
+Launch the local demo built with [gradio](https://gradio.app/):
+```bash
+python app.py
+```
+
+Or you can use our online gradio demo [here](https://huggingface.co/spaces/BAAI/vid2vid-zero).
+
+Note that we disable Null-text Inversion and enable fp16 for faster demo response.
+
+## Examples
+
+
+ | Input Video |
+ Output Video |
+ Input Video |
+ Output Video |
+
+
+
+ | "A car is moving on the road" |
+ "A Porsche car is moving on the desert" |
+ "A car is moving on the road" |
+ "A jeep car is moving on the snow" |
+
+
+
+  |
+  |
+
+
+
+
+ | "A man is running" |
+ "Stephen Curry is running in Time Square" |
+ "A man is running" |
+ "A man is running in New York City" |
+
+
+
+  |
+  |
+
+
+
+ | "A child is riding a bike on the road" |
+ "a child is riding a bike on the flooded road" |
+ "A child is riding a bike on the road" |
+ "a lego child is riding a bike on the road.gif" |
+
+
+
+  |
+  |
+
+
+
+ | "A car is moving on the road" |
+ "A car is moving on the snow" |
+ "A car is moving on the road" |
+ "A jeep car is moving on the desert" |
+
+
+
+  |
+  |
+
+
+
+## Citation
+
+```
+@article{vid2vid-zero,
+ title={Zero-Shot Video Editing Using Off-The-Shelf Image Diffusion Models},
+ author={Wang, Wen and Xie, kangyang and Liu, Zide and Chen, Hao and Cao, Yue and Wang, Xinlong and Shen, Chunhua},
+ journal={arXiv preprint arXiv:2303.17599},
+ year={2023}
+}
+```
+
+## Acknowledgement
+[Tune-A-Video](https://github.com/showlab/Tune-A-Video), [diffusers](https://github.com/huggingface/diffusers), [prompt-to-prompt](https://github.com/google/prompt-to-prompt).
+
+## Contact
+
+**We are hiring** at all levels at BAAI Vision Team, including full-time researchers, engineers and interns.
+If you are interested in working with us on **foundation model, visual perception and multimodal learning**, please contact [Xinlong Wang](https://www.xloong.wang/) (`wangxinlong@baai.ac.cn`) and [Yue Cao](http://yue-cao.me/) (`caoyue@baai.ac.cn`).
diff --git a/vid2vid-zero-main/app.py b/vid2vid-zero-main/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..10a9375276e6bbaab815e00a3ba0dc7ef36821b6
--- /dev/null
+++ b/vid2vid-zero-main/app.py
@@ -0,0 +1,66 @@
+# Most code is from https://huggingface.co/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI
+
+#!/usr/bin/env python
+
+from __future__ import annotations
+
+import os
+from subprocess import getoutput
+
+import gradio as gr
+import torch
+
+from gradio_demo.app_running import create_demo
+from gradio_demo.runner import Runner
+
+TITLE = '# [vid2vid-zero](https://github.com/baaivision/vid2vid-zero)'
+
+ORIGINAL_SPACE_ID = 'BAAI/vid2vid-zero'
+SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
+GPU_DATA = getoutput('nvidia-smi')
+
+if os.getenv('SYSTEM') == 'spaces' and SPACE_ID != ORIGINAL_SPACE_ID:
+ SETTINGS = f'Settings'
+else:
+ SETTINGS = 'Settings'
+
+CUDA_NOT_AVAILABLE_WARNING = f'''## Attention - Running on CPU.
+
+You can assign a GPU in the {SETTINGS} tab if you are running this on HF Spaces.
+You can use "T4 small/medium" to run this demo.
+
+'''
+
+HF_TOKEN_NOT_SPECIFIED_WARNING = f'''The environment variable `HF_TOKEN` is not specified. Feel free to specify your Hugging Face token with write permission if you don't want to manually provide it for every run.
+
+You can check and create your Hugging Face tokens here.
+You can specify environment variables in the "Repository secrets" section of the {SETTINGS} tab.
+
+'''
+
+HF_TOKEN = os.getenv('HF_TOKEN')
+
+
+def show_warning(warning_text: str) -> gr.Blocks:
+ with gr.Blocks() as demo:
+ with gr.Box():
+ gr.Markdown(warning_text)
+ return demo
+
+
+pipe = None
+runner = Runner(HF_TOKEN)
+
+with gr.Blocks(css='gradio_demo/style.css') as demo:
+ if not torch.cuda.is_available():
+ show_warning(CUDA_NOT_AVAILABLE_WARNING)
+
+ gr.Markdown(TITLE)
+ with gr.Tabs():
+ with gr.TabItem('Zero-shot Testing'):
+ create_demo(runner, pipe)
+
+ if not HF_TOKEN:
+ show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
+
+demo.queue(max_size=1).launch(share=False)
diff --git a/vid2vid-zero-main/checkpoints/.gitattributes b/vid2vid-zero-main/checkpoints/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..213840dbe07c9bcea00eb7abe79ff645f1234ba6
--- /dev/null
+++ b/vid2vid-zero-main/checkpoints/.gitattributes
@@ -0,0 +1,32 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
\ No newline at end of file
diff --git a/vid2vid-zero-main/configs/Cartoon_kangaroos.yaml b/vid2vid-zero-main/configs/Cartoon_kangaroos.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..456ae4d2efba1da1cf2e01906f9b0de224db0072
--- /dev/null
+++ b/vid2vid-zero-main/configs/Cartoon_kangaroos.yaml
@@ -0,0 +1,37 @@
+pretrained_model_path: checkpoints
+output_dir: "/home/wangjuntong/vid2vid-zero-main/outputs/cartoon_kangaroo/"
+input_data:
+ video_path: "/home/wangjuntong/vid2vid-zero-main/AI_video/A cartoon kangaroo disco dances.mp4"
+ prompt: A cartoon kangaroo disco dances.
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A cartoon robot disco dances.
+ - A man disco dances.
+ - A cartoon man disco dances.
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/black-swan.yaml b/vid2vid-zero-main/configs/black-swan.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..91e58f76499ee1b89210b8e981118c94dbd26f48
--- /dev/null
+++ b/vid2vid-zero-main/configs/black-swan.yaml
@@ -0,0 +1,36 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/black-swan
+input_data:
+ video_path: data/black-swan.mp4
+ prompt: a blackswan is swimming on the water
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 4
+validation_data:
+ prompts:
+ - a black swan is swimming on the water, Van Gogh style
+ - a white swan is swimming on the water
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/brown-bear.yaml b/vid2vid-zero-main/configs/brown-bear.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..278548c89c86e516b4a3d54ac13da6962a141896
--- /dev/null
+++ b/vid2vid-zero-main/configs/brown-bear.yaml
@@ -0,0 +1,37 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/brown-bear
+input_data:
+ video_path: data/brown-bear.mp4
+ prompt: a brown bear is sitting on the ground
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - a brown bear is sitting on the grass
+ - a black bear is sitting on the grass
+ - a polar bear is sitting on the ground
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/car-moving.yaml b/vid2vid-zero-main/configs/car-moving.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..041ee6519696836b50422e8e0b45f1c3fa1a8646
--- /dev/null
+++ b/vid2vid-zero-main/configs/car-moving.yaml
@@ -0,0 +1,37 @@
+pretrained_model_path: checkpoints
+output_dir: outputs/car-moving
+input_data:
+ video_path: data/car-moving.mp4
+ prompt: a car is moving on the road
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - a car is moving on the snow
+ - a jeep car is moving on the road
+ - a jeep car is moving on the desert
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/car-turn.yaml b/vid2vid-zero-main/configs/car-turn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e1e2e2b811326f8c816695ec80337195fc20a41d
--- /dev/null
+++ b/vid2vid-zero-main/configs/car-turn.yaml
@@ -0,0 +1,39 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: "outputs/car-turn"
+
+input_data:
+ video_path: "data/car-turn.mp4"
+ prompt: "a jeep car is moving on the road"
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 6
+
+validation_data:
+ prompts:
+ - "a jeep car is moving on the beach"
+ - "a jeep car is moving on the snow"
+ - "a Porsche car is moving on the desert"
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/child-riding.yaml b/vid2vid-zero-main/configs/child-riding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cd83efd52c8ead752506bbb66caf5b8fb4157aa8
--- /dev/null
+++ b/vid2vid-zero-main/configs/child-riding.yaml
@@ -0,0 +1,40 @@
+
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/child-riding
+
+input_data:
+ video_path: data/child-riding.mp4
+ prompt: "a child is riding a bike on the road"
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+
+validation_data:
+ # inv_latent: "outputs_2d/car-turn/inv_latents/ddim_latent-0.pt" # latent inversed w/o SCAttn !
+ prompts:
+ - a lego child is riding a bike on the road
+ - a child is riding a bike on the flooded road
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/cow-walking.yaml b/vid2vid-zero-main/configs/cow-walking.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4dc4d7f5244f2bc0f01997e03ecd818a8c16b824
--- /dev/null
+++ b/vid2vid-zero-main/configs/cow-walking.yaml
@@ -0,0 +1,37 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/cow-walking
+input_data:
+ video_path: data/cow-walking.mp4
+ prompt: a cow is walking on the grass
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 2
+validation_data:
+ prompts:
+ - a lion is walking on the grass
+ - a dog is walking on the grass
+ - a cow is walking on the snow
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/dog-walking.yaml b/vid2vid-zero-main/configs/dog-walking.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6239909b2ab16304cba3df2337d1f3dfdf6b6e2d
--- /dev/null
+++ b/vid2vid-zero-main/configs/dog-walking.yaml
@@ -0,0 +1,35 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/dog_walking
+input_data:
+ video_path: data/dog-walking.mp4
+ prompt: a dog is walking on the ground
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 15
+ sample_frame_rate: 3
+validation_data:
+ prompts:
+ - a dog is walking on the ground, Van Gogh style
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/horse-running.yaml b/vid2vid-zero-main/configs/horse-running.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bdf28c851896b56e8d9d4477f942e2e0258eb463
--- /dev/null
+++ b/vid2vid-zero-main/configs/horse-running.yaml
@@ -0,0 +1,36 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/horse-running
+input_data:
+ video_path: data/horse-running.mp4
+ prompt: a horse is running on the beach
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 2
+validation_data:
+ prompts:
+ - a dog is running on the beach
+ - a dog is running on the desert
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/lion-roaring.yaml b/vid2vid-zero-main/configs/lion-roaring.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a25f368e88acbdc8a11b9874005bd0e431b1ed3a
--- /dev/null
+++ b/vid2vid-zero-main/configs/lion-roaring.yaml
@@ -0,0 +1,37 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: ./outputs/lion-roaring
+input_data:
+ video_path: data/lion-roaring.mp4
+ prompt: a lion is roaring
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 2
+validation_data:
+ prompts:
+ - a lego lion is roaring
+ - a wolf is roaring, anime style
+ - a lion is roaring, anime style
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/man-running.yaml b/vid2vid-zero-main/configs/man-running.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2132ea4ca91e9e4e38eea55e230b534026848778
--- /dev/null
+++ b/vid2vid-zero-main/configs/man-running.yaml
@@ -0,0 +1,37 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/man-running
+input_data:
+ video_path: data/man-running.mp4
+ prompt: a man is running
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 25
+ sample_frame_rate: 2
+validation_data:
+ prompts:
+ - Stephen Curry is running in Time Square
+ - a man is running, Van Gogh style
+ - a man is running in New York City
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/man-surfing.yaml b/vid2vid-zero-main/configs/man-surfing.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fa8b43b695373ef57b2fbba33d70ce149ab22a67
--- /dev/null
+++ b/vid2vid-zero-main/configs/man-surfing.yaml
@@ -0,0 +1,36 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/man-surfing
+input_data:
+ video_path: data/man-surfing.mp4
+ prompt: a man is surfing
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 3
+validation_data:
+ prompts:
+ - a boy is surfing in the desert
+ - Iron Man is surfing is surfing
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/plane.yaml b/vid2vid-zero-main/configs/plane.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..97af1b7f98be723028378401ab22311970e8eb94
--- /dev/null
+++ b/vid2vid-zero-main/configs/plane.yaml
@@ -0,0 +1,37 @@
+pretrained_model_path: checkpoints
+output_dir: "/home/wangjuntong/vid2vid-zero-main/outputs/aircraft-landing/"
+input_data:
+ video_path: "/home/wangjuntong/video_editing_dataset/real_video/aircraft-landing.mp4"
+ prompt: "A plane is landing."
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A helicopter is landing on a helipad.
+ - A small private plane is landing at a rural airfield.
+ - A balck plane is landing at a busy airport.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/rabbit-watermelon.yaml b/vid2vid-zero-main/configs/rabbit-watermelon.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..137ed31ca7284b2d8c143fbf7197074294150e19
--- /dev/null
+++ b/vid2vid-zero-main/configs/rabbit-watermelon.yaml
@@ -0,0 +1,40 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: "outputs/rabbit-watermelon"
+
+input_data:
+ video_path: "data/rabbit-watermelon.mp4"
+ prompt: "a rabbit is eating a watermelon"
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 6
+
+validation_data:
+ prompts:
+ - "a tiger is eating a watermelon"
+ - "a rabbit is eating an orange"
+ - "a rabbit is eating a pizza"
+ - "a puppy is eating an orange"
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/skateboard-dog.yaml b/vid2vid-zero-main/configs/skateboard-dog.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a93935f5cc4599a8749d4e985145478abfc0a09e
--- /dev/null
+++ b/vid2vid-zero-main/configs/skateboard-dog.yaml
@@ -0,0 +1,35 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/skateboard-dog
+input_data:
+ video_path: data/skateboard-dog.avi
+ prompt: A man with a dog skateboarding on the road
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 3
+validation_data:
+ prompts:
+ - A man with a dog skateboarding on the desert
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/skateboard-man.yaml b/vid2vid-zero-main/configs/skateboard-man.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6a08da6050282297bcd15e60d2b21212a3cd032f
--- /dev/null
+++ b/vid2vid-zero-main/configs/skateboard-man.yaml
@@ -0,0 +1,35 @@
+pretrained_model_path: checkpoints/stable-diffusion-v1-4
+output_dir: outputs/skateboard-man
+input_data:
+ video_path: data/skateboard-man.mp4
+ prompt: a man is playing skateboard on the ground
+ n_sample_frames: 8
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 3
+validation_data:
+ prompts:
+ - a boy is playing skateboard on the ground
+ video_length: 8
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/configs/wolf.yaml b/vid2vid-zero-main/configs/wolf.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a957ab59a40a04801c9fb63dc278870e2850f084
--- /dev/null
+++ b/vid2vid-zero-main/configs/wolf.yaml
@@ -0,0 +1,39 @@
+pretrained_model_path: "checkpoints"
+output_dir: "/home/wangjuntong/vid2vid-zero-main/outputs/"
+source_video_base: "/home/wangjuntong/video_editing_dataset/all_sourse/"
+gradient_accumulation_steps: 1
+input_data:
+ video_path: "/home/wangjuntong/vid2vid-zero-main/AI_video/wolf_monn.mp4"
+ prompt: At night, a wolf was walking under the moon, and its companion soon came along.
+ n_sample_frames: 16
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - At night, a tiger was walking under the moonlight, and its companion also rushed over.
+ - At dawn, a deer was strolling in the meadow, and its partner soon showed up.
+ - In the evening, a fox was trotting beneath the starlight, and its fellow quickly joined it.
+ video_length: 16
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ # args for null-text inv
+ use_null_inv: True
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: True
+
+input_batch_size: 1
+seed: 33
+mixed_precision: "no"
+gradient_checkpointing: True
+enable_xformers_memory_efficient_attention: True
+# test-time adaptation
+use_sc_attn: True
+use_st_attn: True
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/requirements.txt b/vid2vid-zero-main/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..953d10e35a623a0b2be1c9d63f4d6a2b95c5f956
--- /dev/null
+++ b/vid2vid-zero-main/requirements.txt
@@ -0,0 +1,14 @@
+torch==1.12.1
+torchvision==0.13.1
+diffusers[torch]==0.11.1
+transformers>=4.25.1
+bitsandbytes==0.35.4
+decord==0.6.0
+accelerate
+tensorboard
+modelcards
+omegaconf
+einops
+imageio
+ftfy
+huggingface_hub<0.26.0
\ No newline at end of file
diff --git a/vid2vid-zero-main/test_vid2vid_zero.py b/vid2vid-zero-main/test_vid2vid_zero.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ddbb85da745d43b663e7b39684393b34a9d7b49
--- /dev/null
+++ b/vid2vid-zero-main/test_vid2vid_zero.py
@@ -0,0 +1,396 @@
+import argparse
+import datetime
+import logging
+import inspect
+import math
+import os
+import warnings
+import json
+from typing import Dict, Optional, Tuple, List
+from omegaconf import OmegaConf
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+
+import diffusers
+import transformers
+from accelerate import Accelerator
+from accelerate.logging import get_logger
+from accelerate.utils import set_seed
+from diffusers import AutoencoderKL, DDPMScheduler, DDIMScheduler
+from diffusers.optimization import get_scheduler
+from diffusers.utils import check_min_version
+from diffusers.utils.import_utils import is_xformers_available
+from tqdm.auto import tqdm
+from transformers import CLIPTextModel, CLIPTokenizer
+
+from vid2vid_zero.models.unet_2d_condition import UNet2DConditionModel
+from vid2vid_zero.data.dataset import VideoDataset
+from vid2vid_zero.pipelines.pipeline_vid2vid_zero import Vid2VidZeroPipeline
+from vid2vid_zero.util import save_videos_grid, save_videos_as_images, ddim_inversion
+from einops import rearrange
+
+from vid2vid_zero.p2p.p2p_stable import AttentionReplace, AttentionRefine
+from vid2vid_zero.p2p.ptp_utils import register_attention_control
+from vid2vid_zero.p2p.null_text_w_ptp import NullInversion
+
+# Version check
+check_min_version("0.10.0.dev0")
+
+logger = get_logger(__name__, log_level="INFO")
+
+def prepare_control(unet, prompts: List[str], validation_data: Dict):
+ """Prepare attention controller for video editing"""
+ assert len(prompts) == 2, "Requires exactly two prompts for editing"
+
+ # Calculate word counts for both prompts
+ length1 = len(prompts[0].split())
+ length2 = len(prompts[1].split())
+
+ # Configure controller based on prompt similarity
+ if length1 == length2:
+ controller = AttentionReplace(
+ prompts,
+ validation_data['num_inference_steps'],
+ cross_replace_steps=0.8,
+ self_replace_steps=0.4
+ )
+ else:
+ controller = AttentionRefine(
+ prompts,
+ validation_data['num_inference_steps'],
+ cross_replace_steps=0.8,
+ self_replace_steps=0.4
+ )
+
+ # Register attention control with UNet
+ register_attention_control(unet, controller)
+ return controller
+
+def process_single_video(
+ pretrained_model_path: str,
+ base_output_dir: str,
+ video_relative_path: str,
+ source_prompt: str,
+ edit_prompt: str,
+ source_video_base: str,
+ # Video parameters
+ width: int = 512,
+ height: int = 512,
+ n_sample_frames: int = 16,
+ # Model parameters
+ input_batch_size: int = 1,
+ gradient_accumulation_steps: int = 1,
+ gradient_checkpointing: bool = True,
+ mixed_precision: Optional[str] = "fp16",
+ enable_xformers_memory_efficient_attention: bool = True,
+ seed: Optional[int] = None,
+ use_sc_attn: bool = True,
+ use_st_attn: bool = True,
+ st_attn_idx: int = 0,
+ fps: int = 8,
+ # Inversion parameters
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ num_inv_steps: int = 50,
+ use_null_inv: bool = True,
+ null_inner_steps: int = 1,
+ null_base_lr: float = 1e-2,
+ null_normal_infer: bool = True
+):
+ """Process a single video editing task"""
+ # Generate output directory path
+ video_name = os.path.splitext(video_relative_path)[0]
+ output_dir = os.path.join(base_output_dir, video_name)
+ os.makedirs(output_dir, exist_ok=True)
+
+ # Build input data configuration
+ input_data = {
+ "video_path": os.path.join(source_video_base, video_relative_path),
+ "prompt": source_prompt,
+ "n_sample_frames": n_sample_frames,
+ "width": width,
+ "height": height,
+ "sample_start_idx": 0,
+ "sample_frame_rate": 1
+ }
+
+ # Build validation data configuration
+ validation_data = {
+ "prompts": [edit_prompt],
+ "video_length": n_sample_frames,
+ "width": width,
+ "height": height,
+ "num_inference_steps": num_inference_steps,
+ "guidance_scale": guidance_scale,
+ "num_inv_steps": num_inv_steps,
+ "use_null_inv": use_null_inv,
+ "null_inner_steps": null_inner_steps,
+ "null_base_lr": null_base_lr,
+ "null_normal_infer": null_normal_infer
+ }
+
+ # Call main processing function with configured parameters
+ main(
+ video_name=video_name,
+ pretrained_model_path=pretrained_model_path,
+ output_dir=output_dir,
+ input_data=input_data,
+ validation_data=validation_data,
+ input_batch_size=input_batch_size,
+ gradient_accumulation_steps=gradient_accumulation_steps,
+ gradient_checkpointing=gradient_checkpointing,
+ mixed_precision=mixed_precision,
+ enable_xformers_memory_efficient_attention=enable_xformers_memory_efficient_attention,
+ seed=seed,
+ use_sc_attn=use_sc_attn,
+ use_st_attn=use_st_attn,
+ st_attn_idx=st_attn_idx,
+ fps=fps
+ )
+
+def main(
+ video_name: str,
+ pretrained_model_path: str,
+ output_dir: str,
+ input_data: Dict,
+ validation_data: Dict,
+ input_batch_size: int = 1,
+ gradient_accumulation_steps: int = 1,
+ gradient_checkpointing: bool = True,
+ mixed_precision: Optional[str] = "fp16",
+ enable_xformers_memory_efficient_attention: bool = True,
+ seed: Optional[int] = None,
+ use_sc_attn: bool = True,
+ use_st_attn: bool = True,
+ st_attn_idx: int = 0,
+ fps: int = 8
+):
+ """Main video processing pipeline"""
+ # Initialize accelerator
+ accelerator = Accelerator(
+ gradient_accumulation_steps=gradient_accumulation_steps,
+ mixed_precision=mixed_precision,
+ )
+
+ # Configure logging
+ logging.basicConfig(
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
+ datefmt="%m/%d/%Y %H:%M:%S",
+ level=logging.INFO,
+ )
+ logger.info(accelerator.state, main_process_only=False)
+
+ # Set seed if specified
+ if seed is not None:
+ set_seed(seed)
+
+ # Initialize models
+ tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
+ text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
+ vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
+ unet = UNet2DConditionModel.from_pretrained(
+ pretrained_model_path,
+ subfolder="unet",
+ use_sc_attn=use_sc_attn,
+ use_st_attn=use_st_attn,
+ st_attn_idx=st_attn_idx
+ )
+
+ # Freeze models
+ vae.requires_grad_(False)
+ text_encoder.requires_grad_(False)
+ unet.requires_grad_(False)
+
+ # Enable xformers if available
+ if enable_xformers_memory_efficient_attention and is_xformers_available():
+ unet.enable_xformers_memory_efficient_attention()
+
+ # Configure gradient checkpointing
+ if gradient_checkpointing:
+ unet.enable_gradient_checkpointing()
+
+ # Prepare dataset
+ input_dataset = VideoDataset(**input_data)
+ input_dataset.prompt_ids = tokenizer(
+ input_dataset.prompt,
+ max_length=tokenizer.model_max_length,
+ padding="max_length",
+ truncation=True,
+ return_tensors="pt"
+ ).input_ids[0]
+
+ # Prepare data loader
+ input_dataloader = torch.utils.data.DataLoader(
+ input_dataset, batch_size=input_batch_size
+ )
+
+ # Initialize pipeline
+ validation_pipeline = Vid2VidZeroPipeline(
+ vae=vae,
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=DDIMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler"),
+ safety_checker=None,
+ feature_extractor=None,
+ )
+ validation_pipeline.enable_vae_slicing()
+
+ # Prepare for inversion
+ ddim_inv_scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder='scheduler')
+ ddim_inv_scheduler.set_timesteps(validation_data['num_inv_steps'])
+
+ # Prepare accelerator
+ unet, input_dataloader = accelerator.prepare(unet, input_dataloader)
+
+ # Main processing loop
+ weight_dtype = torch.float32
+ if accelerator.mixed_precision == "fp16":
+ weight_dtype = torch.float16
+ elif accelerator.mixed_precision == "bf16":
+ weight_dtype = torch.bfloat16
+
+ # Move models to device
+ text_encoder.to(accelerator.device, dtype=weight_dtype)
+ vae.to(accelerator.device, dtype=weight_dtype)
+
+ # Processing loop
+# Processing loop
+ unet.eval()
+ for step, batch in enumerate(input_dataloader):
+ samples = []
+
+
+ pixel_values = batch["pixel_values"].to(weight_dtype)
+
+
+ video = (pixel_values / 2 + 0.5).clamp(0, 1).detach().cpu()
+ video = video.permute(0, 2, 1, 3, 4) # (b, f, c, h, w)
+ samples.append(video)
+
+
+ video_length = pixel_values.shape[1]
+ pixel_values = rearrange(pixel_values, "b f c h w -> (b f) c h w")
+ latents = vae.encode(pixel_values).latent_dist.sample()
+ latents = rearrange(latents, "(b f) c h w -> b c f h w", f=video_length)
+ latents = latents * 0.18215
+
+
+ generator = torch.Generator(device="cuda")
+ if seed is not None:
+ generator.manual_seed(seed)
+
+
+ ddim_inv_latent = None
+ if validation_data.get('use_null_inv', False):
+
+ null_inversion = NullInversion(
+ model=validation_pipeline,
+ guidance_scale=validation_data['guidance_scale'],
+ null_inv_with_prompt=False,
+ null_normal_infer=validation_data.get('null_normal_infer', True)
+ )
+ ddim_inv_latent, uncond_embeddings = null_inversion.invert(
+ latents,
+ input_dataset.prompt,
+ verbose=True,
+ null_inner_steps=validation_data.get('null_inner_steps', 1),
+ null_base_lr=validation_data.get('null_base_lr', 1e-2)
+ )
+ ddim_inv_latent = ddim_inv_latent.to(weight_dtype)
+ uncond_embeddings = [embed.to(weight_dtype) for embed in uncond_embeddings]
+ else:
+
+ ddim_inv_latent = ddim_inversion(
+ validation_pipeline,
+ ddim_inv_scheduler,
+ video_latent=latents,
+ num_inv_steps=validation_data['num_inv_steps'],
+ prompt="",
+ normal_infer=True
+ )[-1].to(weight_dtype)
+ uncond_embeddings = None
+
+
+ ddim_inv_latent = ddim_inv_latent.repeat(2, 1, 1, 1, 1)
+
+
+ for idx, prompt in enumerate(validation_data['prompts']):
+ prompts = [input_dataset.prompt, prompt]
+ prepare_control(unet=unet, prompts=prompts, validation_data=validation_data)
+
+ sample = validation_pipeline(
+ prompts,
+ generator=generator,
+ latents=ddim_inv_latent,
+ uncond_embeddings=uncond_embeddings,
+ **validation_data
+ ).images
+
+
+ if sample.shape[0] == 2:
+ sample_inv, sample_gen = sample.chunk(2)
+ save_videos_grid(sample_gen, f"{output_dir}/sample/{video_name}.gif", fps=fps)
+ samples.append(sample_gen)
+
+
+ if samples:
+ samples = torch.cat(samples)
+ save_path = f"{output_dir}/sample-all.gif"
+ save_videos_grid(samples, save_path, fps=fps)
+ save_videos_grid(samples, save_path.replace(".gif", ".mp4"), fps=fps)
+ logger.info(f"Saved samples to {save_path}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--config", type=str, required=True, help="Base configuration file")
+ parser.add_argument("--jsonl", type=str, required=True, help="Path to JSONL input file")
+ args = parser.parse_args()
+
+ # Load base configuration
+ base_config = OmegaConf.load(args.config)
+
+ # Process each entry in JSONL file
+ with open(args.jsonl, "r") as f:
+ for line in f:
+ entry = json.loads(line)
+
+ try:
+ # Process single video task
+ process_single_video(
+ pretrained_model_path=base_config.pretrained_model_path,
+ base_output_dir=base_config.output_dir,
+ video_relative_path=entry["video"],
+ source_prompt=entry["prompt"],
+ edit_prompt=entry["edit_prompt"],
+ source_video_base=base_config.source_video_base,
+ # Video parameters
+ width=base_config.input_data.width,
+ height=base_config.input_data.height,
+ n_sample_frames=base_config.input_data.n_sample_frames,
+ # Model parameters
+ input_batch_size=base_config.input_batch_size,
+ gradient_accumulation_steps=base_config.gradient_accumulation_steps,
+ gradient_checkpointing=base_config.gradient_checkpointing,
+ mixed_precision=base_config.mixed_precision,
+ enable_xformers_memory_efficient_attention=base_config.enable_xformers_memory_efficient_attention,
+ seed=base_config.seed,
+ use_sc_attn=base_config.use_sc_attn,
+ use_st_attn=base_config.use_st_attn,
+ st_attn_idx=base_config.st_attn_idx,
+ fps=base_config.get("fps", 8),
+ # Inversion parameters
+ num_inference_steps=base_config.validation_data.num_inference_steps,
+ guidance_scale=base_config.validation_data.guidance_scale,
+ num_inv_steps=base_config.validation_data.num_inv_steps,
+ use_null_inv=base_config.validation_data.use_null_inv,
+ null_inner_steps=base_config.validation_data.null_inner_steps,
+ null_base_lr=base_config.validation_data.null_base_lr,
+ null_normal_infer=base_config.validation_data.null_normal_infer
+ )
+ except Exception as e:
+ logger.error(f"Failed to process entry {entry}: {str(e)}")
+ continue
\ No newline at end of file
diff --git a/vid2vid-zero-main/tokenlong.py b/vid2vid-zero-main/tokenlong.py
new file mode 100644
index 0000000000000000000000000000000000000000..81fbb27c75087bbbf22b2ef6f10cd9c6148c3c70
--- /dev/null
+++ b/vid2vid-zero-main/tokenlong.py
@@ -0,0 +1,10 @@
+from transformers import CLIPTokenizer
+tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
+prompt = "The waves crash against the rugged cliffs of Cape Cergarre Beach in the view of a drone. The lapping blue waters form white-tipped waves, while the golden glow of the setting sun illuminates the rocky shores. In the distance there is a small island with a lighthouse, and green bushes cover the edge of the cliff. The steep descent from the road to the beach is a spectacular feat, with the edge of the cliff jutting out into the sea. It's a view that captures the pristine beauty of the coast and the rugged landscape of the Pacific Coast Highway."
+tokens = tokenizer(
+ prompt,
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ return_tensors="pt"
+)
+print(tokens.input_ids.shape[1])
\ No newline at end of file
diff --git a/vid2vid-zero-main/vid2vid_real_config/airbrush-painting.yaml b/vid2vid-zero-main/vid2vid_real_config/airbrush-painting.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..682a0035e92da3baa745189614f54eef5c3a18f7
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/airbrush-painting.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/airbrush-painting/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/airbrush-painting.mp4
+ prompt: 'A man is painting a picture related to horses. '
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman is painting a picture related to cats.
+ - A child is drawing a picture about birds in a park.
+ - An artist is creating a painting related to flowers in a studio.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/aircraft-landing.yaml b/vid2vid-zero-main/vid2vid_real_config/aircraft-landing.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3df753355fc52e1914148e641adee0ee332460d0
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/aircraft-landing.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/aircraft-landing/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/aircraft-landing.mp4
+ prompt: A plane is landing.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A helicopter is landing on a helipad.
+ - A small private plane is landing at a rural airfield.
+ - A cargo plane is landing at a busy airport.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/american-flag-in-wind.yaml b/vid2vid-zero-main/vid2vid_real_config/american-flag-in-wind.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..918f7e42a3529017455f0f7b949c8c7f3ff045f7
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/american-flag-in-wind.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/american-flag-in-wind/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/american-flag-in-wind.mp4
+ prompt: The American flag is fluttering in the wind.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The British flag is fluttering in the wind outside a historical building.
+ - A rainbow - colored flag is fluttering in the wind at a festival.
+ - The Japanese flag is fluttering in the wind near a traditional temple.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/audi-snow-trail.yaml b/vid2vid-zero-main/vid2vid_real_config/audi-snow-trail.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9b3a485215b8b94fc23b31108c20bbda968aa3ad
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/audi-snow-trail.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/audi-snow-trail/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/audi-snow-trail.mp4
+ prompt: 'Audi is driving on the forest path in winter. '
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - BMW is driving on the mountain road in autumn.
+ - A Jeep is driving on the beach in summer.
+ - A Tesla is driving on the city street in spring.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/basketball-players-scoring.yaml b/vid2vid-zero-main/vid2vid_real_config/basketball-players-scoring.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..25a2ed78b74d75624b892c0e0e7446f8324dd0b0
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/basketball-players-scoring.yaml
@@ -0,0 +1,38 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/basketball-players-scoring/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/basketball-players-scoring.mp4
+ prompt: 'A group of people are playing basketball, and one of them makes a shot
+ and scores. '
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A team of people are playing basketball, and their star player makes a three -
+ point shot and scores.
+ - A group of friends are playing basketball, and one of them makes a difficult layup
+ and scores.
+ - A group of students are playing basketball, and the point guard makes a shot from
+ the perimeter and scores.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/basketball-shot.yaml b/vid2vid-zero-main/vid2vid_real_config/basketball-shot.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..35b44e875388eb7a7825fa7ddf6d0834466aead2
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/basketball-shot.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/basketball-shot/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/basketball-shot.mp4
+ prompt: A person is practicing shooting.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A basketball player is practicing free - throw shooting on a court.
+ - A soccer player is practicing penalty kick shooting on a soccer field.
+ - An archer is practicing arrow shooting at a target range.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/bmx-rider.yaml b/vid2vid-zero-main/vid2vid_real_config/bmx-rider.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d61851545c34bcca78893c2dcfb50ec589edc629
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/bmx-rider.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/bmx-rider/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/bmx-rider.mp4
+ prompt: A man demonstrates bicycle acrobatics.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman demonstrates skateboard acrobatics in a skate park.
+ - A child demonstrates scooter tricks in a playground.
+ - A teenager demonstrates unicycle acrobatics in a circus - like setting.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/boxer-punching-towards-camera.yaml b/vid2vid-zero-main/vid2vid_real_config/boxer-punching-towards-camera.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0ad2ad821b8a5588c42b4c985e636c6f0a5eed1a
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/boxer-punching-towards-camera.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/boxer-punching-towards-camera/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/boxer-punching-towards-camera.mp4
+ prompt: A man practicing boxing, facing the camera.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman practicing karate, facing the camera.
+ - A young boxer practicing jabs and crosses, facing away from the camera.
+ - A martial artist practicing Taekwondo, side - on to the camera.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/cat-dog-play.yaml b/vid2vid-zero-main/vid2vid_real_config/cat-dog-play.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b02ebefe4d76f36227c50759a29f0f7f4ac8119e
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/cat-dog-play.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/cat-dog-play/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/cat-dog-play.mp4
+ prompt: A dog and a cat are playing.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A dog and a rabbit are playing in a grassy field.
+ - Two cats are playing with a ball of yarn.
+ - A puppy and a kitten are playing around a tree.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/cat-in-the-sun.yaml b/vid2vid-zero-main/vid2vid_real_config/cat-in-the-sun.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f460b2f2043da4bf268b454a9e38c767b8fbe217
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/cat-in-the-sun.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/cat-in-the-sun/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/cat-in-the-sun.mp4
+ prompt: A cat in a sunny field.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A dog in a shaded backyard.
+ - A rabbit in a carrot - filled garden.
+ - A chicken in a barnyard under the sun.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/cows-grazing.yaml b/vid2vid-zero-main/vid2vid_real_config/cows-grazing.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..90216038d11099297d610ee0a5f12293b3744a21
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/cows-grazing.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/cows-grazing/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/cows-grazing.mp4
+ prompt: Cows are grazing.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Sheep are grazing in a meadow.
+ - Horses are grazing in a pasture.
+ - Goats are grazing on a hillside.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/deer-eating-leaves.yaml b/vid2vid-zero-main/vid2vid_real_config/deer-eating-leaves.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..918e11101db88567845b0444e90f3a88b8be66e6
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/deer-eating-leaves.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/deer-eating-leaves/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/deer-eating-leaves.mp4
+ prompt: The deer is eating the leaves.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The goat is eating the grass.
+ - The rabbit is eating the carrots.
+ - The giraffe is eating the leaves from a tall tree.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/dirt-road-driving.yaml b/vid2vid-zero-main/vid2vid_real_config/dirt-road-driving.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a74fe16aecece2e8104b4f4e16305b5ae19931fc
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/dirt-road-driving.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/dirt-road-driving/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/dirt-road-driving.mp4
+ prompt: A car is driving on a dirt road.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A motorcycle is driving on a gravel path.
+ - A truck is driving on a muddy road.
+ - A bicycle is riding on a forest trail.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/dog-competition.yaml b/vid2vid-zero-main/vid2vid_real_config/dog-competition.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bf9477f07ebb8362baa32230a274f701ccc5c72e
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/dog-competition.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/dog-competition/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/dog-competition.mp4
+ prompt: The dog is doing an obstacle course race.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The cat is doing a small - scale agility course.
+ - The rabbit is running through a maze - like obstacle course.
+ - The horse is jumping over obstacles in an equestrian event.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/drift-turn.yaml b/vid2vid-zero-main/vid2vid_real_config/drift-turn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fbe9e4176a2b8626c28ec4170c7b4ff110582112
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/drift-turn.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/drift-turn/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/drift-turn.mp4
+ prompt: The racing car is taking a turn in the curve.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The sports car is taking a sharp turn on a mountain pass.
+ - The rally car is sliding through a turn on a dirt track.
+ - The formula - one car is accelerating out of a turn on a race circuit.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/earth-full-view.yaml b/vid2vid-zero-main/vid2vid_real_config/earth-full-view.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d6026460ff16944777b64515f608345abf628e6a
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/earth-full-view.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/earth-full-view/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/earth-full-view.mp4
+ prompt: The complete picture of the Earth.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The complete picture of the Moon.
+ - The detailed view of Mars.
+ - The panoramic view of Jupiter's surface.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/eating-pizza.yaml b/vid2vid-zero-main/vid2vid_real_config/eating-pizza.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2f4332e10eb4e1c023f2d2f25e9fdb2a73202fb7
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/eating-pizza.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/eating-pizza/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/eating-pizza.mp4
+ prompt: A woman is eating pizza.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A man is eating a hamburger.
+ - A child is eating an ice - cream cone.
+ - A couple is sharing a plate of pasta.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/geometric-video-background.yaml b/vid2vid-zero-main/vid2vid_real_config/geometric-video-background.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..46a0307c9cc15849afaca7910c9a93daf34ea114
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/geometric-video-background.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/geometric-video-background/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/geometric-video-background.mp4
+ prompt: Geometric video background
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Floral video background.
+ - Abstract video background.
+ - Animal - themed video background.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/gold-fish.yaml b/vid2vid-zero-main/vid2vid_real_config/gold-fish.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..96da834431400d82c48668e5493025ec87cd4fe1
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/gold-fish.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/gold-fish/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/gold-fish.mp4
+ prompt: Goldfish swim in the water.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Tropical fish swim in an aquarium.
+ - Dolphins swim in the ocean.
+ - Turtles swim in a pond.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/hurdles-race.yaml b/vid2vid-zero-main/vid2vid_real_config/hurdles-race.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..553660ce6acb63b597efe1df5f8084f5babb09ae
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/hurdles-race.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/hurdles-race/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/hurdles-race.mp4
+ prompt: Women's hurdles competition.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Men's hurdles competition.
+ - Children's hurdles race in a school sports event.
+ - Senior citizens' hurdles challenge in a community sports day.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/ice-hockey.yaml b/vid2vid-zero-main/vid2vid_real_config/ice-hockey.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2eeb0b484efa2b7140bb77f5c5a58e55db9c523c
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/ice-hockey.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/ice-hockey/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/ice-hockey.mp4
+ prompt: Men's hockey game.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Women's hockey game.
+ - Youth hockey game.
+ - Amateur hockey game.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/judo.yaml b/vid2vid-zero-main/vid2vid_real_config/judo.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..30bada5d3c007e8e561d0d019de7b48171b62327
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/judo.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/judo/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/judo.mp4
+ prompt: Judo competition.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Karate competition.
+ - Taekwondo competition.
+ - Wrestling competition.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/kettleball-training.yaml b/vid2vid-zero-main/vid2vid_real_config/kettleball-training.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef901ee326a453f30b2ca67173d8cc11a5a146cf
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/kettleball-training.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/kettleball-training/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/kettleball-training.mp4
+ prompt: A man lifts a kettle ball continuously on the beach.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman lifts dumbbells continuously in a gym.
+ - A man lifts a barbell continuously in a weightlifting arena.
+ - A teenager lifts a medicine ball continuously in a park.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/kitesurfing-fall.yaml b/vid2vid-zero-main/vid2vid_real_config/kitesurfing-fall.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0b41f615b64783548dc28cb66d1a154adc7b4829
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/kitesurfing-fall.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/kitesurfing-fall/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/kitesurfing-fall.mp4
+ prompt: A man kitesurfing.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman windsurfing.
+ - A child boogie - boarding.
+ - A group of friends surfing together.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/las-vegas-time-lapse.yaml b/vid2vid-zero-main/vid2vid_real_config/las-vegas-time-lapse.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7a4a8aab25a50140d39235bed91f6a2f56b4252d
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/las-vegas-time-lapse.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/las-vegas-time-lapse/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/las-vegas-time-lapse.mp4
+ prompt: The streets of the city at night are filled with cars.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The alleys of the old town at night are filled with pedestrians.
+ - The avenues of the business district during rush hour are filled with buses.
+ - The boulevards of the city in the morning are filled with cyclists.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/lindy-hop.yaml b/vid2vid-zero-main/vid2vid_real_config/lindy-hop.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0ca1c104ad754788bb939e86a3b35668327ed725
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/lindy-hop.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/lindy-hop/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/lindy-hop.mp4
+ prompt: People are doing ballroom dancing.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - People are doing hip - hop dancing.
+ - People are doing salsa dancing.
+ - People are doing square dancing.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/loading.yaml b/vid2vid-zero-main/vid2vid_real_config/loading.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b173cc9ec020bab6112b28e84fbf465223f18950
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/loading.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/loading/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/loading.mp4
+ prompt: People are stacking goods.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - People are loading boxes onto a truck.
+ - People are arranging books on a shelf.
+ - People are piling up logs in a backyard.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/lotus.yaml b/vid2vid-zero-main/vid2vid_real_config/lotus.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b759da3d5f0a20fe726872b93871f929c12878de
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/lotus.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/lotus/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/lotus.mp4
+ prompt: Lotus flowers on a rainy day.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Sunflowers on a sunny day.
+ - Roses in a gentle breeze.
+ - Daisies in a meadow on a spring day.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/mallard-duck-flight.yaml b/vid2vid-zero-main/vid2vid_real_config/mallard-duck-flight.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f2074fd29c64fe04576f258f6c99998591ff8023
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/mallard-duck-flight.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/mallard-duck-flight/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/mallard-duck-flight.mp4
+ prompt: Mallard duck is flying.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Pigeon is flying over a city.
+ - Eagle is soaring in the sky.
+ - Sparrow is flying between trees.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/raindrops.yaml b/vid2vid-zero-main/vid2vid_real_config/raindrops.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2b2c03e445a12e5fc627c070b054cb0c46973488
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/raindrops.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/raindrops/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/raindrops.mp4
+ prompt: Rain falls on the ground.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Snow falls on the ground.
+ - Hailstones fall on the roof.
+ - Dew drops form on the grass.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/salsa.yaml b/vid2vid-zero-main/vid2vid_real_config/salsa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fe888e39ca0ca76e608272b9d332945790e17383
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/salsa.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/salsa/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/salsa.mp4
+ prompt: People are dancing.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - People are singing.
+ - People are performing a play.
+ - People are doing a group yoga session.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/sharks-swimming.yaml b/vid2vid-zero-main/vid2vid_real_config/sharks-swimming.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ce0907de5eb5e400499e47dba7bb51559e5e4eac
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/sharks-swimming.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/sharks-swimming/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/sharks-swimming.mp4
+ prompt: Sharks swimming in the sea.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Whales swimming in the ocean.
+ - Octopuses moving in the deep sea.
+ - Dolphins playing in the waves.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/shopping-entertainment-center.yaml b/vid2vid-zero-main/vid2vid_real_config/shopping-entertainment-center.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8961fb7e678e6aff390a080326f4e462ee7a72fc
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/shopping-entertainment-center.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/shopping-entertainment-center/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/shopping-entertainment-center.mp4
+ prompt: In the mall, people go up and down on the elevator.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - In the hotel, people go up and down on the escalator.
+ - In the office building, people use the stairs.
+ - In the department store, people ride the moving walkway.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/singapore-airbus-a380-landing.yaml b/vid2vid-zero-main/vid2vid_real_config/singapore-airbus-a380-landing.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4aaa8a4ee5035ba74c4d6eca9e79871a0f22669d
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/singapore-airbus-a380-landing.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/singapore-airbus-a380-landing/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/singapore-airbus-a380-landing.mp4
+ prompt: The plane is in the process of landing.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The helicopter is in the process of taking off.
+ - The hot air balloon is in the process of descending.
+ - The glider is in the process of landing on a grassy field.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/ski-follow.yaml b/vid2vid-zero-main/vid2vid_real_config/ski-follow.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cc0c2f5163d91c2a6730b33d32ec294a5f9ca2bf
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/ski-follow.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/ski-follow/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/ski-follow.mp4
+ prompt: A man skiing, follower's perspective.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman snowboarding, leader's perspective.
+ - A child sledding, parent's perspective.
+ - A group of skiers skiing, bird's - eye perspective.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/ski-lift-time-lapse.yaml b/vid2vid-zero-main/vid2vid_real_config/ski-lift-time-lapse.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e7a5c975e135bbc1c134adeea4c7dec9d49ac230
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/ski-lift-time-lapse.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/ski-lift-time-lapse/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/ski-lift-time-lapse.mp4
+ prompt: The gondola is running.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The ski lift chair is moving.
+ - The cable - car is ascending.
+ - The tram is running along the tracks.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/street-artist-painting.yaml b/vid2vid-zero-main/vid2vid_real_config/street-artist-painting.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1d5612642b07040839bf3d7e97e0da9f1d6c28cf
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/street-artist-painting.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/street-artist-painting/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/street-artist-painting.mp4
+ prompt: Street artist is painting.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Street musician is playing an instrument.
+ - Street performer is doing a magic show.
+ - Street vendor is selling handicrafts.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/stunt.yaml b/vid2vid-zero-main/vid2vid_real_config/stunt.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fa8ca0122672da2b96e05640dc187e5a9d808a7b
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/stunt.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/stunt/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/stunt.mp4
+ prompt: A man doing stunts on a single scooter.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman doing stunts on a skateboard.
+ - A child doing tricks on a balance bike.
+ - A group of people doing stunts on segways.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/swans.yaml b/vid2vid-zero-main/vid2vid_real_config/swans.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f80d17703b39080d419eaa857ea545ea789e3f80
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/swans.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/swans/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/swans.mp4
+ prompt: A flock of swans swims in the river.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A group of ducks swims in a pond.
+ - A school of fish swims in a lake.
+ - A pod of seals swims in the ocean.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/swimmer.yaml b/vid2vid-zero-main/vid2vid_real_config/swimmer.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8330d7625cef5d812bed1287ffe21af769c1174d
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/swimmer.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/swimmer/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/swimmer.mp4
+ prompt: A man jumps into the pool for a swim.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman dives into the pool for a lap.
+ - A child splashes into the pool for a play.
+ - A diver jumps off a diving board into the pool.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/triple-check-knit.yaml b/vid2vid-zero-main/vid2vid_real_config/triple-check-knit.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..303589b9d80c1c492496a0766a5e025f9f037f07
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/triple-check-knit.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/triple-check-knit/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/triple-check-knit.mp4
+ prompt: A man weaves things with thread.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A woman knits a scarf with yarn.
+ - A child makes a friendship bracelet with string.
+ - A weaver creates a tapestry with colored threads.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/trucks-race.yaml b/vid2vid-zero-main/vid2vid_real_config/trucks-race.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cc9dace3e66d95416583fa6128513c23929c4e96
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/trucks-race.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/trucks-race/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/trucks-race.mp4
+ prompt: Truck Racing.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - Car Racing.
+ - Motorcycle Racing.
+ - Boat Racing.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/varanus-cage.yaml b/vid2vid-zero-main/vid2vid_real_config/varanus-cage.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..97386dd60dc0db44e468a35cbd9e515ff5aba0cc
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/varanus-cage.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/varanus-cage/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/varanus-cage.mp4
+ prompt: A gecko climbing an ash tree.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A lizard climbing a palm tree.
+ - A spider crawling on a wall.
+ - A snail moving up a plant stem.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/vid2vid.py b/vid2vid-zero-main/vid2vid_real_config/vid2vid.py
new file mode 100644
index 0000000000000000000000000000000000000000..82132a8f0cb81106bb735cfb21c5018ebd9f331e
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/vid2vid.py
@@ -0,0 +1,46 @@
+import jsonlines
+import yaml
+from pathlib import Path
+
+
+
+
+# 读取模板配置文件
+with open(r"D:\video_editing_dataset\prompt\vid2vid_config\plane.yaml") as f:
+ template = yaml.safe_load(f)
+
+# 读取两个JSONL文件
+with jsonlines.open(r"D:\video_editing_dataset\prompt\real\prompt.jsonl") as prompt_reader, \
+ jsonlines.open(r"D:\video_editing_dataset\prompt\real\edit_prompt.jsonl") as edit_prompt_reader:
+ for prompt_line, edit_line in zip(prompt_reader, edit_prompt_reader):
+ # 提取需要替换的值
+ video_name = Path(prompt_line["video"]).stem # 去除.mp4后缀
+ video_path = prompt_line["video"]
+
+ # 构建新配置
+ new_config = template.copy()
+
+ # 修改output_dir
+ new_output_dir = f"/home/wangjuntong/vid2vid-zero-main/outputs/{video_name}/"
+ new_config["output_dir"] = new_output_dir
+
+ # 修改video_path
+ new_video_path = f"/home/wangjuntong/video_editing_dataset/real_video/{video_path}"
+ new_config["input_data"]["video_path"] = new_video_path
+
+ # 修改prompt
+ new_config["input_data"]["prompt"] = prompt_line["prompt"]
+
+ # 修改validation prompts
+ new_config["validation_data"]["prompts"] = [
+ edit_line["edit_prompt1"],
+ edit_line["edit_prompt2"],
+ edit_line["edit_prompt3"]
+ ]
+
+ # 生成文件名并保存
+ output_filename = f"{video_name}.yaml"
+ with open(output_filename, "w") as f:
+ yaml.dump(new_config, f, sort_keys=False, default_flow_style=False)
+
+print("配置文件生成完成!")
\ No newline at end of file
diff --git a/vid2vid-zero-main/vid2vid_real_config/volleyball.yaml b/vid2vid-zero-main/vid2vid_real_config/volleyball.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5b1248476b886dad7ea2007c85f8da928783f6a5
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/volleyball.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/volleyball/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/volleyball.mp4
+ prompt: A woman practicing volleyball.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A man practicing tennis.
+ - A child practicing badminton.
+ - A group practicing beach volleyball.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/weightlifting-sofa.yaml b/vid2vid-zero-main/vid2vid_real_config/weightlifting-sofa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..523300b7ccf865eaac05b8d0504cae35d91b5b46
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/weightlifting-sofa.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/weightlifting-sofa/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/weightlifting-sofa.mp4
+ prompt: A woman lifts weights continuously next to a couch.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - A man lifts weights continuously in a fitness studio.
+ - A teenager lifts weights continuously in a garage gym.
+ - A group lifts weights continuously in a park workout area.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0
diff --git a/vid2vid-zero-main/vid2vid_real_config/wind-turbines-at-dusk.yaml b/vid2vid-zero-main/vid2vid_real_config/wind-turbines-at-dusk.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f6960adf574321ee11b3bc82973c2d4987999c05
--- /dev/null
+++ b/vid2vid-zero-main/vid2vid_real_config/wind-turbines-at-dusk.yaml
@@ -0,0 +1,34 @@
+pretrained_model_path: checkpoints
+output_dir: /home/wangjuntong/vid2vid-zero-main/outputs/wind-turbines-at-dusk/
+input_data:
+ video_path: /home/wangjuntong/video_editing_dataset/real_video/wind-turbines-at-dusk.mp4
+ prompt: The wind turbine is spinning.
+ n_sample_frames: 24
+ width: 512
+ height: 512
+ sample_start_idx: 0
+ sample_frame_rate: 1
+validation_data:
+ prompts:
+ - The windmill is turning.
+ - The ceiling fan is rotating.
+ - The propeller of an airplane is spinning.
+ video_length: 24
+ width: 512
+ height: 512
+ num_inference_steps: 50
+ guidance_scale: 7.5
+ num_inv_steps: 50
+ use_null_inv: true
+ null_inner_steps: 1
+ null_base_lr: 1e-2
+ null_uncond_ratio: -0.5
+ null_normal_infer: true
+input_batch_size: 1
+seed: 33
+mixed_precision: 'no'
+gradient_checkpointing: true
+enable_xformers_memory_efficient_attention: true
+use_sc_attn: true
+use_st_attn: true
+st_attn_idx: 0