Initial upload of directory
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +8 -0
- .gitignore +10 -0
- .python-version +1 -0
- LICENSE +201 -0
- README.md +15 -0
- assets/demo1_audio.wav +3 -0
- assets/demo1_video.mp4 +3 -0
- assets/demo2_audio.wav +3 -0
- assets/demo2_video.mp4 +3 -0
- assets/demo3_audio.wav +3 -0
- assets/demo3_video.mp4 +3 -0
- assets/framework.png +3 -0
- checkpoints/.gitattributes +33 -0
- checkpoints/.gitignore +1 -0
- checkpoints/auxiliary/2DFAN4-cd938726ad.zip +3 -0
- checkpoints/auxiliary/i3d_torchscript.pt +3 -0
- checkpoints/auxiliary/koniq_pretrained.pkl +3 -0
- checkpoints/auxiliary/s3fd-619a316812.pth +3 -0
- checkpoints/auxiliary/sfd_face.pth +3 -0
- checkpoints/auxiliary/syncnet_v2.model +3 -0
- checkpoints/auxiliary/vgg16-397923af.pth +3 -0
- checkpoints/auxiliary/vit_g_hybrid_pt_1200e_ssv2_ft.pth +3 -0
- checkpoints/latentsync/README.md +14 -0
- checkpoints/latentsync/config.json +3 -0
- checkpoints/latentsync/latentsync_syncnet.pt +3 -0
- checkpoints/latentsync/latentsync_unet.pt +3 -0
- checkpoints/sd-vae-ft-mse/README.md +83 -0
- checkpoints/sd-vae-ft-mse/config.json +29 -0
- checkpoints/sd-vae-ft-mse/diffusion_pytorch_model.bin +3 -0
- checkpoints/sd-vae-ft-mse/diffusion_pytorch_model.safetensors +3 -0
- checkpoints/whisper/tiny.pt +3 -0
- configs/audio.yaml +23 -0
- configs/scheduler_config.json +13 -0
- configs/syncnet/syncnet_16_latent.yaml +46 -0
- configs/syncnet/syncnet_16_pixel.yaml +45 -0
- configs/syncnet/syncnet_25_pixel.yaml +45 -0
- configs/unet/first_stage.yaml +103 -0
- configs/unet/second_stage.yaml +103 -0
- debian_deps.sh +10 -0
- inference.sh +2 -0
- preprocess/affine_transform.py +137 -0
- preprocess/data_processing_pipeline.py +85 -0
- preprocess/detect_shot.py +62 -0
- preprocess/filter_high_resolution.py +112 -0
- preprocess/filter_visual_quality.py +127 -0
- preprocess/remove_broken_videos.py +43 -0
- preprocess/remove_incorrect_affined.py +81 -0
- preprocess/resample_fps_hz.py +70 -0
- preprocess/segment_videos.py +62 -0
- preprocess/sync_av.py +113 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
assets/demo1_audio.wav filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
assets/demo1_video.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
assets/demo2_audio.wav filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
assets/demo2_video.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
assets/demo3_audio.wav filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
assets/demo3_video.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
assets/framework.png filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
temp/video.mp4 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python-generated files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[oc]
|
| 4 |
+
build/
|
| 5 |
+
dist/
|
| 6 |
+
wheels/
|
| 7 |
+
*.egg-info
|
| 8 |
+
|
| 9 |
+
# Virtual environments
|
| 10 |
+
.venv
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.11
|
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#OpenLipSync
|
| 2 |
+
|
| 3 |
+
This is a small repo containing all the required files to run inference for LatentSync1.5.
|
| 4 |
+
|
| 5 |
+
TODO:
|
| 6 |
+
add MuseTalk checkpoints
|
| 7 |
+
add LatentSync16 checkpoints
|
| 8 |
+
|
| 9 |
+
Installation
|
| 10 |
+
- clone the repo
|
| 11 |
+
- On debian based systems run bash debian_setup.sh
|
| 12 |
+
|
| 13 |
+
Run
|
| 14 |
+
- for inference modify the scritpts/inference.py file add your video and audio path
|
| 15 |
+
- run with uv run python -m scripts.inference
|
assets/demo1_audio.wav
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f7dd2112dbdc0bece5ee6f26553a4867b65740eb53187ecc8b1a3c1618b2405
|
| 3 |
+
size 307278
|
assets/demo1_video.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed2dd1e2001aa605c3f2d77672a8af4ed55e427a85c55d408adfc3d5076bc872
|
| 3 |
+
size 1240008
|
assets/demo2_audio.wav
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4916574779fb975367ddcb1f12597205ae15ea8aeaa61ad92d2c1c5d719c3607
|
| 3 |
+
size 634958
|
assets/demo2_video.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8c3f10288e0642e587a95c0040e6966f8f6b7e003c3a17b572f72472b896d8ff
|
| 3 |
+
size 1772492
|
assets/demo3_audio.wav
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5014567b03d35e0bd813a3725c3129a99722497cd4cf8e036d2c304530ea432
|
| 3 |
+
size 593998
|
assets/demo3_video.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cfa177b2a44f7809f606285c120e270d526caa50d708ec95e0f614d220970e0f
|
| 3 |
+
size 2112370
|
assets/framework.png
ADDED
|
Git LFS Details
|
checkpoints/.gitattributes
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
diffusion_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
|
checkpoints/.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
.DS_Store
|
checkpoints/auxiliary/2DFAN4-cd938726ad.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd938726adb1f15f361263cce2db9cb820c42585fa8796ec72ce19107f369a46
|
| 3 |
+
size 96316515
|
checkpoints/auxiliary/i3d_torchscript.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bec6519f66ea534e953026b4ae2c65553c17bf105611c746d904657e5860a5e2
|
| 3 |
+
size 51235320
|
checkpoints/auxiliary/koniq_pretrained.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ff9277bcc68ecc10e77d88b6d0a32825ec3c85562095542734ec6212eaaf6d81
|
| 3 |
+
size 109768650
|
checkpoints/auxiliary/s3fd-619a316812.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:619a31681264d3f7f7fc7a16a42cbbe8b23f31a256f75a366e5a1bcd59b33543
|
| 3 |
+
size 89843225
|
checkpoints/auxiliary/sfd_face.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d54a87c2b7543b64729c9a25eafd188da15fd3f6e02f0ecec76ae1b30d86c491
|
| 3 |
+
size 89844381
|
checkpoints/auxiliary/syncnet_v2.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:961e8696f888fce4f3f3a6c3d5b3267cf5b343100b238e79b2659bff2c605442
|
| 3 |
+
size 54573114
|
checkpoints/auxiliary/vgg16-397923af.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:397923af8e79cdbb6a7127f12361acd7a2f83e06b05044ddf496e83de57a5bf0
|
| 3 |
+
size 553433881
|
checkpoints/auxiliary/vit_g_hybrid_pt_1200e_ssv2_ft.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5a210a92f035dff30c53b46157b612e7a1a5d3c99700e1b2d71da5c399ca7e70
|
| 3 |
+
size 2023804201
|
checkpoints/latentsync/README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: openrail++
|
| 3 |
+
library_name: diffusers
|
| 4 |
+
tags:
|
| 5 |
+
- video-to-video
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
# The checkpoints of LatentSync
|
| 9 |
+
|
| 10 |
+
This repo not only stores the pretrained U-Net and SyncNet checkpoints of LatentSync, but also stores the whisper checkpoints, auxiliary checkpoints for detecting face, calculating syncnet confidence score and so on. They have covered all you need for both inference and training of LatentSync
|
| 11 |
+
|
| 12 |
+
Paper: https://arxiv.org/abs/2412.09262
|
| 13 |
+
|
| 14 |
+
Code: https://github.com/bytedance/LatentSync
|
checkpoints/latentsync/config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"Name": "LatentSync"
|
| 3 |
+
}
|
checkpoints/latentsync/latentsync_syncnet.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:38fa63bad3ed2332f647c40a5dc616cb0e233db8579f698f62af4c41965c4da5
|
| 3 |
+
size 1488019828
|
checkpoints/latentsync/latentsync_unet.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:63197c73d21ad55ddf2b6e5cc38d0a19a1e494317aefe2707c6b6c6fc952f3c7
|
| 3 |
+
size 3400080614
|
checkpoints/sd-vae-ft-mse/README.md
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
tags:
|
| 4 |
+
- stable-diffusion
|
| 5 |
+
- stable-diffusion-diffusers
|
| 6 |
+
inference: false
|
| 7 |
+
---
|
| 8 |
+
# Improved Autoencoders
|
| 9 |
+
|
| 10 |
+
## Utilizing
|
| 11 |
+
These weights are intended to be used with the [🧨 diffusers library](https://github.com/huggingface/diffusers). If you are looking for the model to use with the original [CompVis Stable Diffusion codebase](https://github.com/CompVis/stable-diffusion), [come here](https://huggingface.co/stabilityai/sd-vae-ft-mse-original).
|
| 12 |
+
|
| 13 |
+
#### How to use with 🧨 diffusers
|
| 14 |
+
You can integrate this fine-tuned VAE decoder to your existing `diffusers` workflows, by including a `vae` argument to the `StableDiffusionPipeline`
|
| 15 |
+
```py
|
| 16 |
+
from diffusers.models import AutoencoderKL
|
| 17 |
+
from diffusers import StableDiffusionPipeline
|
| 18 |
+
|
| 19 |
+
model = "CompVis/stable-diffusion-v1-4"
|
| 20 |
+
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")
|
| 21 |
+
pipe = StableDiffusionPipeline.from_pretrained(model, vae=vae)
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
## Decoder Finetuning
|
| 25 |
+
We publish two kl-f8 autoencoder versions, finetuned from the original [kl-f8 autoencoder](https://github.com/CompVis/latent-diffusion#pretrained-autoencoding-models) on a 1:1 ratio of [LAION-Aesthetics](https://laion.ai/blog/laion-aesthetics/) and LAION-Humans, an unreleased subset containing only SFW images of humans. The intent was to fine-tune on the Stable Diffusion training set (the autoencoder was originally trained on OpenImages) but also enrich the dataset with images of humans to improve the reconstruction of faces.
|
| 26 |
+
The first, _ft-EMA_, was resumed from the original checkpoint, trained for 313198 steps and uses EMA weights. It uses the same loss configuration as the original checkpoint (L1 + LPIPS).
|
| 27 |
+
The second, _ft-MSE_, was resumed from _ft-EMA_ and uses EMA weights and was trained for another 280k steps using a different loss, with more emphasis
|
| 28 |
+
on MSE reconstruction (MSE + 0.1 * LPIPS). It produces somewhat ``smoother'' outputs. The batch size for both versions was 192 (16 A100s, batch size 12 per GPU).
|
| 29 |
+
To keep compatibility with existing models, only the decoder part was finetuned; the checkpoints can be used as a drop-in replacement for the existing autoencoder.
|
| 30 |
+
|
| 31 |
+
_Original kl-f8 VAE vs f8-ft-EMA vs f8-ft-MSE_
|
| 32 |
+
|
| 33 |
+
## Evaluation
|
| 34 |
+
### COCO 2017 (256x256, val, 5000 images)
|
| 35 |
+
| Model | train steps | rFID | PSNR | SSIM | PSIM | Link | Comments
|
| 36 |
+
|----------|---------|------|--------------|---------------|---------------|-----------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
|
| 37 |
+
| | | | | | | | |
|
| 38 |
+
| original | 246803 | 4.99 | 23.4 +/- 3.8 | 0.69 +/- 0.14 | 1.01 +/- 0.28 | https://ommer-lab.com/files/latent-diffusion/kl-f8.zip | as used in SD |
|
| 39 |
+
| ft-EMA | 560001 | 4.42 | 23.8 +/- 3.9 | 0.69 +/- 0.13 | 0.96 +/- 0.27 | https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.ckpt | slightly better overall, with EMA |
|
| 40 |
+
| ft-MSE | 840001 | 4.70 | 24.5 +/- 3.7 | 0.71 +/- 0.13 | 0.92 +/- 0.27 | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt | resumed with EMA from ft-EMA, emphasis on MSE (rec. loss = MSE + 0.1 * LPIPS), smoother outputs |
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
### LAION-Aesthetics 5+ (256x256, subset, 10000 images)
|
| 44 |
+
| Model | train steps | rFID | PSNR | SSIM | PSIM | Link | Comments
|
| 45 |
+
|----------|-----------|------|--------------|---------------|---------------|-----------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
|
| 46 |
+
| | | | | | | | |
|
| 47 |
+
| original | 246803 | 2.61 | 26.0 +/- 4.4 | 0.81 +/- 0.12 | 0.75 +/- 0.36 | https://ommer-lab.com/files/latent-diffusion/kl-f8.zip | as used in SD |
|
| 48 |
+
| ft-EMA | 560001 | 1.77 | 26.7 +/- 4.8 | 0.82 +/- 0.12 | 0.67 +/- 0.34 | https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.ckpt | slightly better overall, with EMA |
|
| 49 |
+
| ft-MSE | 840001 | 1.88 | 27.3 +/- 4.7 | 0.83 +/- 0.11 | 0.65 +/- 0.34 | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt | resumed with EMA from ft-EMA, emphasis on MSE (rec. loss = MSE + 0.1 * LPIPS), smoother outputs |
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
### Visual
|
| 53 |
+
_Visualization of reconstructions on 256x256 images from the COCO2017 validation dataset._
|
| 54 |
+
|
| 55 |
+
<p align="center">
|
| 56 |
+
<br>
|
| 57 |
+
<b>
|
| 58 |
+
256x256: ft-EMA (left), ft-MSE (middle), original (right)</b>
|
| 59 |
+
</p>
|
| 60 |
+
|
| 61 |
+
<p align="center">
|
| 62 |
+
<img src=https://huggingface.co/stabilityai/stable-diffusion-decoder-finetune/resolve/main/eval/ae-decoder-tuning-reconstructions/merged/00025_merged.png />
|
| 63 |
+
</p>
|
| 64 |
+
|
| 65 |
+
<p align="center">
|
| 66 |
+
<img src=https://huggingface.co/stabilityai/stable-diffusion-decoder-finetune/resolve/main/eval/ae-decoder-tuning-reconstructions/merged/00011_merged.png />
|
| 67 |
+
</p>
|
| 68 |
+
|
| 69 |
+
<p align="center">
|
| 70 |
+
<img src=https://huggingface.co/stabilityai/stable-diffusion-decoder-finetune/resolve/main/eval/ae-decoder-tuning-reconstructions/merged/00037_merged.png />
|
| 71 |
+
</p>
|
| 72 |
+
|
| 73 |
+
<p align="center">
|
| 74 |
+
<img src=https://huggingface.co/stabilityai/stable-diffusion-decoder-finetune/resolve/main/eval/ae-decoder-tuning-reconstructions/merged/00043_merged.png />
|
| 75 |
+
</p>
|
| 76 |
+
|
| 77 |
+
<p align="center">
|
| 78 |
+
<img src=https://huggingface.co/stabilityai/stable-diffusion-decoder-finetune/resolve/main/eval/ae-decoder-tuning-reconstructions/merged/00053_merged.png />
|
| 79 |
+
</p>
|
| 80 |
+
|
| 81 |
+
<p align="center">
|
| 82 |
+
<img src=https://huggingface.co/stabilityai/stable-diffusion-decoder-finetune/resolve/main/eval/ae-decoder-tuning-reconstructions/merged/00029_merged.png />
|
| 83 |
+
</p>
|
checkpoints/sd-vae-ft-mse/config.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "AutoencoderKL",
|
| 3 |
+
"_diffusers_version": "0.4.2",
|
| 4 |
+
"act_fn": "silu",
|
| 5 |
+
"block_out_channels": [
|
| 6 |
+
128,
|
| 7 |
+
256,
|
| 8 |
+
512,
|
| 9 |
+
512
|
| 10 |
+
],
|
| 11 |
+
"down_block_types": [
|
| 12 |
+
"DownEncoderBlock2D",
|
| 13 |
+
"DownEncoderBlock2D",
|
| 14 |
+
"DownEncoderBlock2D",
|
| 15 |
+
"DownEncoderBlock2D"
|
| 16 |
+
],
|
| 17 |
+
"in_channels": 3,
|
| 18 |
+
"latent_channels": 4,
|
| 19 |
+
"layers_per_block": 2,
|
| 20 |
+
"norm_num_groups": 32,
|
| 21 |
+
"out_channels": 3,
|
| 22 |
+
"sample_size": 256,
|
| 23 |
+
"up_block_types": [
|
| 24 |
+
"UpDecoderBlock2D",
|
| 25 |
+
"UpDecoderBlock2D",
|
| 26 |
+
"UpDecoderBlock2D",
|
| 27 |
+
"UpDecoderBlock2D"
|
| 28 |
+
]
|
| 29 |
+
}
|
checkpoints/sd-vae-ft-mse/diffusion_pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b4889b6b1d4ce7ae320a02dedaeff1780ad77d415ea0d744b476155c6377ddc
|
| 3 |
+
size 334707217
|
checkpoints/sd-vae-ft-mse/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a1d993488569e928462932c8c38a0760b874d166399b14414135bd9c42df5815
|
| 3 |
+
size 334643276
|
checkpoints/whisper/tiny.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9
|
| 3 |
+
size 75572083
|
configs/audio.yaml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
audio:
|
| 2 |
+
num_mels: 80 # Number of mel-spectrogram channels and local conditioning dimensionality
|
| 3 |
+
rescale: true # Whether to rescale audio prior to preprocessing
|
| 4 |
+
rescaling_max: 0.9 # Rescaling value
|
| 5 |
+
use_lws:
|
| 6 |
+
false # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
|
| 7 |
+
# It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
|
| 8 |
+
# Does not work if n_ffit is not multiple of hop_size!!
|
| 9 |
+
n_fft: 800 # Extra window size is filled with 0 paddings to match this parameter
|
| 10 |
+
hop_size: 200 # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
|
| 11 |
+
win_size: 800 # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
|
| 12 |
+
sample_rate: 16000 # 16000Hz (corresponding to librispeech) (sox --i <filename>)
|
| 13 |
+
frame_shift_ms: null
|
| 14 |
+
signal_normalization: true
|
| 15 |
+
allow_clipping_in_normalization: true
|
| 16 |
+
symmetric_mels: true
|
| 17 |
+
max_abs_value: 4.0
|
| 18 |
+
preemphasize: true # whether to apply filter
|
| 19 |
+
preemphasis: 0.97 # filter coefficient.
|
| 20 |
+
min_level_db: -100
|
| 21 |
+
ref_level_db: 20
|
| 22 |
+
fmin: 55
|
| 23 |
+
fmax: 7600
|
configs/scheduler_config.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "DDIMScheduler",
|
| 3 |
+
"_diffusers_version": "0.6.0.dev0",
|
| 4 |
+
"beta_end": 0.012,
|
| 5 |
+
"beta_schedule": "scaled_linear",
|
| 6 |
+
"beta_start": 0.00085,
|
| 7 |
+
"clip_sample": false,
|
| 8 |
+
"num_train_timesteps": 1000,
|
| 9 |
+
"set_alpha_to_one": false,
|
| 10 |
+
"steps_offset": 1,
|
| 11 |
+
"trained_betas": null,
|
| 12 |
+
"skip_prk_steps": true
|
| 13 |
+
}
|
configs/syncnet/syncnet_16_latent.yaml
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
audio_encoder: # input (1, 80, 52)
|
| 3 |
+
in_channels: 1
|
| 4 |
+
block_out_channels: [32, 64, 128, 256, 512, 1024]
|
| 5 |
+
downsample_factors: [[2, 1], 2, 2, 2, 2, [2, 3]]
|
| 6 |
+
attn_blocks: [0, 0, 0, 0, 0, 0]
|
| 7 |
+
dropout: 0.0
|
| 8 |
+
visual_encoder: # input (64, 32, 32)
|
| 9 |
+
in_channels: 64
|
| 10 |
+
block_out_channels: [64, 128, 256, 256, 512, 1024]
|
| 11 |
+
downsample_factors: [2, 2, 2, 1, 2, 2]
|
| 12 |
+
attn_blocks: [0, 0, 0, 0, 0, 0]
|
| 13 |
+
dropout: 0.0
|
| 14 |
+
|
| 15 |
+
ckpt:
|
| 16 |
+
resume_ckpt_path: ""
|
| 17 |
+
inference_ckpt_path: ""
|
| 18 |
+
save_ckpt_steps: 2500
|
| 19 |
+
|
| 20 |
+
data:
|
| 21 |
+
train_output_dir: output/syncnet
|
| 22 |
+
num_val_samples: 1200
|
| 23 |
+
batch_size: 120 # 40
|
| 24 |
+
num_workers: 11 # 11
|
| 25 |
+
latent_space: true
|
| 26 |
+
num_frames: 16
|
| 27 |
+
resolution: 256
|
| 28 |
+
train_fileslist: ""
|
| 29 |
+
train_data_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/high_visual_quality/train
|
| 30 |
+
val_fileslist: ""
|
| 31 |
+
val_data_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/high_visual_quality/val
|
| 32 |
+
audio_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel_new
|
| 33 |
+
lower_half: false
|
| 34 |
+
pretrained_audio_model_path: facebook/wav2vec2-large-xlsr-53
|
| 35 |
+
audio_sample_rate: 16000
|
| 36 |
+
video_fps: 25
|
| 37 |
+
|
| 38 |
+
optimizer:
|
| 39 |
+
lr: 1e-5
|
| 40 |
+
max_grad_norm: 1.0
|
| 41 |
+
|
| 42 |
+
run:
|
| 43 |
+
max_train_steps: 10000000
|
| 44 |
+
validation_steps: 2500
|
| 45 |
+
mixed_precision_training: true
|
| 46 |
+
seed: 42
|
configs/syncnet/syncnet_16_pixel.yaml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
audio_encoder: # input (1, 80, 52)
|
| 3 |
+
in_channels: 1
|
| 4 |
+
block_out_channels: [32, 64, 128, 256, 512, 1024, 2048]
|
| 5 |
+
downsample_factors: [[2, 1], 2, 2, 1, 2, 2, [2, 3]]
|
| 6 |
+
attn_blocks: [0, 0, 0, 0, 0, 0, 0]
|
| 7 |
+
dropout: 0.0
|
| 8 |
+
visual_encoder: # input (48, 128, 256)
|
| 9 |
+
in_channels: 48
|
| 10 |
+
block_out_channels: [64, 128, 256, 256, 512, 1024, 2048, 2048]
|
| 11 |
+
downsample_factors: [[1, 2], 2, 2, 2, 2, 2, 2, 2]
|
| 12 |
+
attn_blocks: [0, 0, 0, 0, 0, 0, 0, 0]
|
| 13 |
+
dropout: 0.0
|
| 14 |
+
|
| 15 |
+
ckpt:
|
| 16 |
+
resume_ckpt_path: ""
|
| 17 |
+
inference_ckpt_path: checkpoints/latentsync_syncnet.pt
|
| 18 |
+
save_ckpt_steps: 2500
|
| 19 |
+
|
| 20 |
+
data:
|
| 21 |
+
train_output_dir: debug/syncnet
|
| 22 |
+
num_val_samples: 2048
|
| 23 |
+
batch_size: 128 # 128
|
| 24 |
+
num_workers: 11 # 11
|
| 25 |
+
latent_space: false
|
| 26 |
+
num_frames: 16
|
| 27 |
+
resolution: 256
|
| 28 |
+
train_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/all_data_v6.txt
|
| 29 |
+
train_data_dir: ""
|
| 30 |
+
val_fileslist: ""
|
| 31 |
+
val_data_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/high_visual_quality/val
|
| 32 |
+
audio_mel_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel_new
|
| 33 |
+
lower_half: true
|
| 34 |
+
audio_sample_rate: 16000
|
| 35 |
+
video_fps: 25
|
| 36 |
+
|
| 37 |
+
optimizer:
|
| 38 |
+
lr: 1e-5
|
| 39 |
+
max_grad_norm: 1.0
|
| 40 |
+
|
| 41 |
+
run:
|
| 42 |
+
max_train_steps: 10000000
|
| 43 |
+
validation_steps: 2500
|
| 44 |
+
mixed_precision_training: true
|
| 45 |
+
seed: 42
|
configs/syncnet/syncnet_25_pixel.yaml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
audio_encoder: # input (1, 80, 80)
|
| 3 |
+
in_channels: 1
|
| 4 |
+
block_out_channels: [64, 128, 256, 256, 512, 1024]
|
| 5 |
+
downsample_factors: [2, 2, 2, 2, 2, 2]
|
| 6 |
+
dropout: 0.0
|
| 7 |
+
visual_encoder: # input (75, 128, 256)
|
| 8 |
+
in_channels: 75
|
| 9 |
+
block_out_channels: [128, 128, 256, 256, 512, 512, 1024, 1024]
|
| 10 |
+
downsample_factors: [[1, 2], 2, 2, 2, 2, 2, 2, 2]
|
| 11 |
+
dropout: 0.0
|
| 12 |
+
|
| 13 |
+
ckpt:
|
| 14 |
+
resume_ckpt_path: ""
|
| 15 |
+
inference_ckpt_path: ""
|
| 16 |
+
save_ckpt_steps: 2500
|
| 17 |
+
|
| 18 |
+
data:
|
| 19 |
+
train_output_dir: debug/syncnet
|
| 20 |
+
num_val_samples: 2048
|
| 21 |
+
batch_size: 64 # 64
|
| 22 |
+
num_workers: 11 # 11
|
| 23 |
+
latent_space: false
|
| 24 |
+
num_frames: 25
|
| 25 |
+
resolution: 256
|
| 26 |
+
train_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/hdtf_vox_avatars_ads_affine.txt
|
| 27 |
+
# /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/hdtf_voxceleb_avatars_affine.txt
|
| 28 |
+
train_data_dir: ""
|
| 29 |
+
val_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/vox_affine_val.txt
|
| 30 |
+
# /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/voxceleb_val.txt
|
| 31 |
+
val_data_dir: ""
|
| 32 |
+
audio_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel
|
| 33 |
+
lower_half: true
|
| 34 |
+
pretrained_audio_model_path: facebook/wav2vec2-large-xlsr-53
|
| 35 |
+
audio_sample_rate: 16000
|
| 36 |
+
video_fps: 25
|
| 37 |
+
|
| 38 |
+
optimizer:
|
| 39 |
+
lr: 1e-5
|
| 40 |
+
max_grad_norm: 1.0
|
| 41 |
+
|
| 42 |
+
run:
|
| 43 |
+
max_train_steps: 10000000
|
| 44 |
+
mixed_precision_training: true
|
| 45 |
+
seed: 42
|
configs/unet/first_stage.yaml
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
data:
|
| 2 |
+
syncnet_config_path: configs/syncnet/syncnet_16_pixel.yaml
|
| 3 |
+
train_output_dir: debug/unet
|
| 4 |
+
train_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/all_data_v6.txt
|
| 5 |
+
train_data_dir: ""
|
| 6 |
+
audio_embeds_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/whisper_new
|
| 7 |
+
audio_mel_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel_new
|
| 8 |
+
|
| 9 |
+
val_video_path: assets/demo1_video.mp4
|
| 10 |
+
val_audio_path: assets/demo1_audio.wav
|
| 11 |
+
batch_size: 8 # 8
|
| 12 |
+
num_workers: 11 # 11
|
| 13 |
+
num_frames: 16
|
| 14 |
+
resolution: 256
|
| 15 |
+
mask: fix_mask
|
| 16 |
+
audio_sample_rate: 16000
|
| 17 |
+
video_fps: 25
|
| 18 |
+
|
| 19 |
+
ckpt:
|
| 20 |
+
resume_ckpt_path: checkpoints/latentsync_unet.pt
|
| 21 |
+
save_ckpt_steps: 5000
|
| 22 |
+
|
| 23 |
+
run:
|
| 24 |
+
pixel_space_supervise: false
|
| 25 |
+
use_syncnet: false
|
| 26 |
+
sync_loss_weight: 0.05 # 1/283
|
| 27 |
+
perceptual_loss_weight: 0.1 # 0.1
|
| 28 |
+
recon_loss_weight: 1 # 1
|
| 29 |
+
guidance_scale: 1.0 # 1.5 or 1.0
|
| 30 |
+
trepa_loss_weight: 10
|
| 31 |
+
inference_steps: 20
|
| 32 |
+
seed: 1247
|
| 33 |
+
use_mixed_noise: true
|
| 34 |
+
mixed_noise_alpha: 1 # 1
|
| 35 |
+
mixed_precision_training: true
|
| 36 |
+
enable_gradient_checkpointing: false
|
| 37 |
+
enable_xformers_memory_efficient_attention: true
|
| 38 |
+
max_train_steps: 10000000
|
| 39 |
+
max_train_epochs: -1
|
| 40 |
+
|
| 41 |
+
optimizer:
|
| 42 |
+
lr: 1e-5
|
| 43 |
+
scale_lr: false
|
| 44 |
+
max_grad_norm: 1.0
|
| 45 |
+
lr_scheduler: constant
|
| 46 |
+
lr_warmup_steps: 0
|
| 47 |
+
|
| 48 |
+
model:
|
| 49 |
+
act_fn: silu
|
| 50 |
+
add_audio_layer: true
|
| 51 |
+
custom_audio_layer: false
|
| 52 |
+
audio_condition_method: cross_attn # Choose between [cross_attn, group_norm]
|
| 53 |
+
attention_head_dim: 8
|
| 54 |
+
block_out_channels: [320, 640, 1280, 1280]
|
| 55 |
+
center_input_sample: false
|
| 56 |
+
cross_attention_dim: 384
|
| 57 |
+
down_block_types:
|
| 58 |
+
[
|
| 59 |
+
"CrossAttnDownBlock3D",
|
| 60 |
+
"CrossAttnDownBlock3D",
|
| 61 |
+
"CrossAttnDownBlock3D",
|
| 62 |
+
"DownBlock3D",
|
| 63 |
+
]
|
| 64 |
+
mid_block_type: UNetMidBlock3DCrossAttn
|
| 65 |
+
up_block_types:
|
| 66 |
+
[
|
| 67 |
+
"UpBlock3D",
|
| 68 |
+
"CrossAttnUpBlock3D",
|
| 69 |
+
"CrossAttnUpBlock3D",
|
| 70 |
+
"CrossAttnUpBlock3D",
|
| 71 |
+
]
|
| 72 |
+
downsample_padding: 1
|
| 73 |
+
flip_sin_to_cos: true
|
| 74 |
+
freq_shift: 0
|
| 75 |
+
in_channels: 13 # 49
|
| 76 |
+
layers_per_block: 2
|
| 77 |
+
mid_block_scale_factor: 1
|
| 78 |
+
norm_eps: 1e-5
|
| 79 |
+
norm_num_groups: 32
|
| 80 |
+
out_channels: 4 # 16
|
| 81 |
+
sample_size: 64
|
| 82 |
+
resnet_time_scale_shift: default # Choose between [default, scale_shift]
|
| 83 |
+
unet_use_cross_frame_attention: false
|
| 84 |
+
unet_use_temporal_attention: false
|
| 85 |
+
|
| 86 |
+
# Actually we don't use the motion module in the final version of LatentSync
|
| 87 |
+
# When we started the project, we used the codebase of AnimateDiff and tried motion module, the results are poor
|
| 88 |
+
# We decied to leave the code here for possible future usage
|
| 89 |
+
use_motion_module: false
|
| 90 |
+
motion_module_resolutions: [1, 2, 4, 8]
|
| 91 |
+
motion_module_mid_block: false
|
| 92 |
+
motion_module_decoder_only: false
|
| 93 |
+
motion_module_type: Vanilla
|
| 94 |
+
motion_module_kwargs:
|
| 95 |
+
num_attention_heads: 8
|
| 96 |
+
num_transformer_block: 1
|
| 97 |
+
attention_block_types:
|
| 98 |
+
- Temporal_Self
|
| 99 |
+
- Temporal_Self
|
| 100 |
+
temporal_position_encoding: true
|
| 101 |
+
temporal_position_encoding_max_len: 16
|
| 102 |
+
temporal_attention_dim_div: 1
|
| 103 |
+
zero_initialize: true
|
configs/unet/second_stage.yaml
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
data:
|
| 2 |
+
syncnet_config_path: configs/syncnet/syncnet_16_pixel.yaml
|
| 3 |
+
train_output_dir: debug/unet
|
| 4 |
+
train_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/all_data_v6.txt
|
| 5 |
+
train_data_dir: ""
|
| 6 |
+
audio_embeds_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/whisper_new
|
| 7 |
+
audio_mel_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel_new
|
| 8 |
+
|
| 9 |
+
val_video_path: assets/demo1_video.mp4
|
| 10 |
+
val_audio_path: assets/demo1_audio.wav
|
| 11 |
+
batch_size: 2 # 8
|
| 12 |
+
num_workers: 11 # 11
|
| 13 |
+
num_frames: 16
|
| 14 |
+
resolution: 256
|
| 15 |
+
mask: fix_mask
|
| 16 |
+
audio_sample_rate: 16000
|
| 17 |
+
video_fps: 25
|
| 18 |
+
|
| 19 |
+
ckpt:
|
| 20 |
+
resume_ckpt_path: checkpoints/latentsync_unet.pt
|
| 21 |
+
save_ckpt_steps: 5000
|
| 22 |
+
|
| 23 |
+
run:
|
| 24 |
+
pixel_space_supervise: true
|
| 25 |
+
use_syncnet: true
|
| 26 |
+
sync_loss_weight: 0.05 # 1/283
|
| 27 |
+
perceptual_loss_weight: 0.1 # 0.1
|
| 28 |
+
recon_loss_weight: 1 # 1
|
| 29 |
+
guidance_scale: 1.0 # 1.5 or 1.0
|
| 30 |
+
trepa_loss_weight: 10
|
| 31 |
+
inference_steps: 20
|
| 32 |
+
seed: 1247
|
| 33 |
+
use_mixed_noise: true
|
| 34 |
+
mixed_noise_alpha: 1 # 1
|
| 35 |
+
mixed_precision_training: true
|
| 36 |
+
enable_gradient_checkpointing: false
|
| 37 |
+
enable_xformers_memory_efficient_attention: true
|
| 38 |
+
max_train_steps: 10000000
|
| 39 |
+
max_train_epochs: -1
|
| 40 |
+
|
| 41 |
+
optimizer:
|
| 42 |
+
lr: 1e-5
|
| 43 |
+
scale_lr: false
|
| 44 |
+
max_grad_norm: 1.0
|
| 45 |
+
lr_scheduler: constant
|
| 46 |
+
lr_warmup_steps: 0
|
| 47 |
+
|
| 48 |
+
model:
|
| 49 |
+
act_fn: silu
|
| 50 |
+
add_audio_layer: true
|
| 51 |
+
custom_audio_layer: false
|
| 52 |
+
audio_condition_method: cross_attn # Choose between [cross_attn, group_norm]
|
| 53 |
+
attention_head_dim: 8
|
| 54 |
+
block_out_channels: [320, 640, 1280, 1280]
|
| 55 |
+
center_input_sample: false
|
| 56 |
+
cross_attention_dim: 384
|
| 57 |
+
down_block_types:
|
| 58 |
+
[
|
| 59 |
+
"CrossAttnDownBlock3D",
|
| 60 |
+
"CrossAttnDownBlock3D",
|
| 61 |
+
"CrossAttnDownBlock3D",
|
| 62 |
+
"DownBlock3D",
|
| 63 |
+
]
|
| 64 |
+
mid_block_type: UNetMidBlock3DCrossAttn
|
| 65 |
+
up_block_types:
|
| 66 |
+
[
|
| 67 |
+
"UpBlock3D",
|
| 68 |
+
"CrossAttnUpBlock3D",
|
| 69 |
+
"CrossAttnUpBlock3D",
|
| 70 |
+
"CrossAttnUpBlock3D",
|
| 71 |
+
]
|
| 72 |
+
downsample_padding: 1
|
| 73 |
+
flip_sin_to_cos: true
|
| 74 |
+
freq_shift: 0
|
| 75 |
+
in_channels: 13 # 49
|
| 76 |
+
layers_per_block: 2
|
| 77 |
+
mid_block_scale_factor: 1
|
| 78 |
+
norm_eps: 1e-5
|
| 79 |
+
norm_num_groups: 32
|
| 80 |
+
out_channels: 4 # 16
|
| 81 |
+
sample_size: 64
|
| 82 |
+
resnet_time_scale_shift: default # Choose between [default, scale_shift]
|
| 83 |
+
unet_use_cross_frame_attention: false
|
| 84 |
+
unet_use_temporal_attention: false
|
| 85 |
+
|
| 86 |
+
# Actually we don't use the motion module in the final version of LatentSync
|
| 87 |
+
# When we started the project, we used the codebase of AnimateDiff and tried motion module, the results are poor
|
| 88 |
+
# We decied to leave the code here for possible future usage
|
| 89 |
+
use_motion_module: false
|
| 90 |
+
motion_module_resolutions: [1, 2, 4, 8]
|
| 91 |
+
motion_module_mid_block: false
|
| 92 |
+
motion_module_decoder_only: false
|
| 93 |
+
motion_module_type: Vanilla
|
| 94 |
+
motion_module_kwargs:
|
| 95 |
+
num_attention_heads: 8
|
| 96 |
+
num_transformer_block: 1
|
| 97 |
+
attention_block_types:
|
| 98 |
+
- Temporal_Self
|
| 99 |
+
- Temporal_Self
|
| 100 |
+
temporal_position_encoding: true
|
| 101 |
+
temporal_position_encoding_max_len: 16
|
| 102 |
+
temporal_attention_dim_div: 1
|
| 103 |
+
zero_initialize: true
|
debian_deps.sh
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
sudo apt -y install libgl1
|
| 4 |
+
sudo apt -y install ffmpeg
|
| 5 |
+
sudo apt -y install curl
|
| 6 |
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
| 7 |
+
# OpenCV dependencies
|
| 8 |
+
uv init --python=3.11
|
| 9 |
+
uv add pip
|
| 10 |
+
uv run pip install -r requirements.txt
|
inference.sh
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
uv run python -m scripts.inference
|
preprocess/affine_transform.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from latentsync.utils.util import read_video, write_video
|
| 16 |
+
from latentsync.utils.image_processor import ImageProcessor
|
| 17 |
+
import torch
|
| 18 |
+
from einops import rearrange
|
| 19 |
+
import os
|
| 20 |
+
import tqdm
|
| 21 |
+
import subprocess
|
| 22 |
+
from multiprocessing import Process
|
| 23 |
+
import shutil
|
| 24 |
+
|
| 25 |
+
paths = []
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def gather_video_paths(input_dir, output_dir):
|
| 29 |
+
for video in sorted(os.listdir(input_dir)):
|
| 30 |
+
if video.endswith(".mp4"):
|
| 31 |
+
video_input = os.path.join(input_dir, video)
|
| 32 |
+
video_output = os.path.join(output_dir, video)
|
| 33 |
+
if os.path.isfile(video_output):
|
| 34 |
+
continue
|
| 35 |
+
paths.append((video_input, video_output))
|
| 36 |
+
elif os.path.isdir(os.path.join(input_dir, video)):
|
| 37 |
+
gather_video_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class FaceDetector:
|
| 41 |
+
def __init__(self, resolution: int = 512, device: str = "cpu"):
|
| 42 |
+
self.image_processor = ImageProcessor(resolution, "fix_mask", device)
|
| 43 |
+
|
| 44 |
+
def affine_transform_video(self, video_path):
|
| 45 |
+
video_frames = read_video(video_path, change_fps=False)
|
| 46 |
+
results = []
|
| 47 |
+
for frame in video_frames:
|
| 48 |
+
frame, _, _ = self.image_processor.affine_transform(frame)
|
| 49 |
+
results.append(frame)
|
| 50 |
+
results = torch.stack(results)
|
| 51 |
+
|
| 52 |
+
results = rearrange(results, "f c h w -> f h w c").numpy()
|
| 53 |
+
return results
|
| 54 |
+
|
| 55 |
+
def close(self):
|
| 56 |
+
self.image_processor.close()
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def combine_video_audio(video_frames, video_input_path, video_output_path, process_temp_dir):
|
| 60 |
+
video_name = os.path.basename(video_input_path)[:-4]
|
| 61 |
+
audio_temp = os.path.join(process_temp_dir, f"{video_name}_temp.wav")
|
| 62 |
+
video_temp = os.path.join(process_temp_dir, f"{video_name}_temp.mp4")
|
| 63 |
+
|
| 64 |
+
write_video(video_temp, video_frames, fps=25)
|
| 65 |
+
|
| 66 |
+
command = f"ffmpeg -y -loglevel error -i {video_input_path} -q:a 0 -map a {audio_temp}"
|
| 67 |
+
subprocess.run(command, shell=True)
|
| 68 |
+
|
| 69 |
+
os.makedirs(os.path.dirname(video_output_path), exist_ok=True)
|
| 70 |
+
command = f"ffmpeg -y -loglevel error -i {video_temp} -i {audio_temp} -c:v libx264 -c:a aac -map 0:v -map 1:a -q:v 0 -q:a 0 {video_output_path}"
|
| 71 |
+
subprocess.run(command, shell=True)
|
| 72 |
+
|
| 73 |
+
os.remove(audio_temp)
|
| 74 |
+
os.remove(video_temp)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def func(paths, process_temp_dir, device_id, resolution):
|
| 78 |
+
os.makedirs(process_temp_dir, exist_ok=True)
|
| 79 |
+
face_detector = FaceDetector(resolution, f"cuda:{device_id}")
|
| 80 |
+
|
| 81 |
+
for video_input, video_output in paths:
|
| 82 |
+
if os.path.isfile(video_output):
|
| 83 |
+
continue
|
| 84 |
+
try:
|
| 85 |
+
video_frames = face_detector.affine_transform_video(video_input)
|
| 86 |
+
except Exception as e: # Handle the exception of face not detcted
|
| 87 |
+
print(f"Exception: {e} - {video_input}")
|
| 88 |
+
continue
|
| 89 |
+
|
| 90 |
+
os.makedirs(os.path.dirname(video_output), exist_ok=True)
|
| 91 |
+
combine_video_audio(video_frames, video_input, video_output, process_temp_dir)
|
| 92 |
+
print(f"Saved: {video_output}")
|
| 93 |
+
|
| 94 |
+
face_detector.close()
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def split(a, n):
|
| 98 |
+
k, m = divmod(len(a), n)
|
| 99 |
+
return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def affine_transform_multi_gpus(input_dir, output_dir, temp_dir, resolution, num_workers):
|
| 103 |
+
print(f"Recursively gathering video paths of {input_dir} ...")
|
| 104 |
+
gather_video_paths(input_dir, output_dir)
|
| 105 |
+
num_devices = torch.cuda.device_count()
|
| 106 |
+
if num_devices == 0:
|
| 107 |
+
raise RuntimeError("No GPUs found")
|
| 108 |
+
|
| 109 |
+
if os.path.exists(temp_dir):
|
| 110 |
+
shutil.rmtree(temp_dir)
|
| 111 |
+
os.makedirs(temp_dir, exist_ok=True)
|
| 112 |
+
|
| 113 |
+
split_paths = list(split(paths, num_workers * num_devices))
|
| 114 |
+
|
| 115 |
+
processes = []
|
| 116 |
+
|
| 117 |
+
for i in range(num_devices):
|
| 118 |
+
for j in range(num_workers):
|
| 119 |
+
process_index = i * num_workers + j
|
| 120 |
+
process = Process(
|
| 121 |
+
target=func, args=(split_paths[process_index], os.path.join(temp_dir, f"process_{i}"), i, resolution)
|
| 122 |
+
)
|
| 123 |
+
process.start()
|
| 124 |
+
processes.append(process)
|
| 125 |
+
|
| 126 |
+
for process in processes:
|
| 127 |
+
process.join()
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
if __name__ == "__main__":
|
| 131 |
+
input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/avatars/resampled/train"
|
| 132 |
+
output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/avatars/affine_transformed/train"
|
| 133 |
+
temp_dir = "temp"
|
| 134 |
+
resolution = 256
|
| 135 |
+
num_workers = 10 # How many processes per device
|
| 136 |
+
|
| 137 |
+
affine_transform_multi_gpus(input_dir, output_dir, temp_dir, resolution, num_workers)
|
preprocess/data_processing_pipeline.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import os
|
| 17 |
+
from preprocess.affine_transform import affine_transform_multi_gpus
|
| 18 |
+
from preprocess.remove_broken_videos import remove_broken_videos_multiprocessing
|
| 19 |
+
from preprocess.detect_shot import detect_shot_multiprocessing
|
| 20 |
+
from preprocess.filter_high_resolution import filter_high_resolution_multiprocessing
|
| 21 |
+
from preprocess.resample_fps_hz import resample_fps_hz_multiprocessing
|
| 22 |
+
from preprocess.segment_videos import segment_videos_multiprocessing
|
| 23 |
+
from preprocess.sync_av import sync_av_multi_gpus
|
| 24 |
+
from preprocess.filter_visual_quality import filter_visual_quality_multi_gpus
|
| 25 |
+
from preprocess.remove_incorrect_affined import remove_incorrect_affined_multiprocessing
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def data_processing_pipeline(
|
| 29 |
+
total_num_workers, per_gpu_num_workers, resolution, sync_conf_threshold, temp_dir, input_dir
|
| 30 |
+
):
|
| 31 |
+
print("Removing broken videos...")
|
| 32 |
+
remove_broken_videos_multiprocessing(input_dir, total_num_workers)
|
| 33 |
+
|
| 34 |
+
print("Resampling FPS hz...")
|
| 35 |
+
resampled_dir = os.path.join(os.path.dirname(input_dir), "resampled")
|
| 36 |
+
resample_fps_hz_multiprocessing(input_dir, resampled_dir, total_num_workers)
|
| 37 |
+
|
| 38 |
+
print("Detecting shot...")
|
| 39 |
+
shot_dir = os.path.join(os.path.dirname(input_dir), "shot")
|
| 40 |
+
detect_shot_multiprocessing(resampled_dir, shot_dir, total_num_workers)
|
| 41 |
+
|
| 42 |
+
print("Segmenting videos...")
|
| 43 |
+
segmented_dir = os.path.join(os.path.dirname(input_dir), "segmented")
|
| 44 |
+
segment_videos_multiprocessing(shot_dir, segmented_dir, total_num_workers)
|
| 45 |
+
|
| 46 |
+
print("Filtering high resolution...")
|
| 47 |
+
high_resolution_dir = os.path.join(os.path.dirname(input_dir), "high_resolution")
|
| 48 |
+
filter_high_resolution_multiprocessing(segmented_dir, high_resolution_dir, resolution, total_num_workers)
|
| 49 |
+
|
| 50 |
+
print("Affine transforming videos...")
|
| 51 |
+
affine_transformed_dir = os.path.join(os.path.dirname(input_dir), "affine_transformed")
|
| 52 |
+
affine_transform_multi_gpus(
|
| 53 |
+
high_resolution_dir, affine_transformed_dir, temp_dir, resolution, per_gpu_num_workers // 2
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
print("Removing incorrect affined videos...")
|
| 57 |
+
remove_incorrect_affined_multiprocessing(affine_transformed_dir, total_num_workers)
|
| 58 |
+
|
| 59 |
+
print("Syncing audio and video...")
|
| 60 |
+
av_synced_dir = os.path.join(os.path.dirname(input_dir), f"av_synced_{sync_conf_threshold}")
|
| 61 |
+
sync_av_multi_gpus(affine_transformed_dir, av_synced_dir, temp_dir, per_gpu_num_workers, sync_conf_threshold)
|
| 62 |
+
|
| 63 |
+
print("Filtering visual quality...")
|
| 64 |
+
high_visual_quality_dir = os.path.join(os.path.dirname(input_dir), "high_visual_quality")
|
| 65 |
+
filter_visual_quality_multi_gpus(av_synced_dir, high_visual_quality_dir, per_gpu_num_workers)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
parser = argparse.ArgumentParser()
|
| 70 |
+
parser.add_argument("--total_num_workers", type=int, default=100)
|
| 71 |
+
parser.add_argument("--per_gpu_num_workers", type=int, default=20)
|
| 72 |
+
parser.add_argument("--resolution", type=int, default=256)
|
| 73 |
+
parser.add_argument("--sync_conf_threshold", type=int, default=3)
|
| 74 |
+
parser.add_argument("--temp_dir", type=str, default="temp")
|
| 75 |
+
parser.add_argument("--input_dir", type=str, required=True)
|
| 76 |
+
args = parser.parse_args()
|
| 77 |
+
|
| 78 |
+
data_processing_pipeline(
|
| 79 |
+
args.total_num_workers,
|
| 80 |
+
args.per_gpu_num_workers,
|
| 81 |
+
args.resolution,
|
| 82 |
+
args.sync_conf_threshold,
|
| 83 |
+
args.temp_dir,
|
| 84 |
+
args.input_dir,
|
| 85 |
+
)
|
preprocess/detect_shot.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import subprocess
|
| 17 |
+
import tqdm
|
| 18 |
+
from multiprocessing import Pool
|
| 19 |
+
|
| 20 |
+
paths = []
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def gather_paths(input_dir, output_dir):
|
| 24 |
+
for video in sorted(os.listdir(input_dir)):
|
| 25 |
+
if video.endswith(".mp4"):
|
| 26 |
+
video_input = os.path.join(input_dir, video)
|
| 27 |
+
video_output = os.path.join(output_dir, video)
|
| 28 |
+
if os.path.isfile(video_output):
|
| 29 |
+
continue
|
| 30 |
+
paths.append([video_input, output_dir])
|
| 31 |
+
elif os.path.isdir(os.path.join(input_dir, video)):
|
| 32 |
+
gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def detect_shot(video_input, output_dir):
|
| 36 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 37 |
+
video = os.path.basename(video_input)[:-4]
|
| 38 |
+
command = f"scenedetect --quiet -i {video_input} detect-adaptive --threshold 2 split-video --filename '{video}_shot_$SCENE_NUMBER' --output {output_dir}"
|
| 39 |
+
# command = f"scenedetect --quiet -i {video_input} detect-adaptive --threshold 2 split-video --high-quality --filename '{video}_shot_$SCENE_NUMBER' --output {output_dir}"
|
| 40 |
+
subprocess.run(command, shell=True)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def multi_run_wrapper(args):
|
| 44 |
+
return detect_shot(*args)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def detect_shot_multiprocessing(input_dir, output_dir, num_workers):
|
| 48 |
+
print(f"Recursively gathering video paths of {input_dir} ...")
|
| 49 |
+
gather_paths(input_dir, output_dir)
|
| 50 |
+
|
| 51 |
+
print(f"Detecting shot of {input_dir} ...")
|
| 52 |
+
with Pool(num_workers) as pool:
|
| 53 |
+
for _ in tqdm.tqdm(pool.imap_unordered(multi_run_wrapper, paths), total=len(paths)):
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
if __name__ == "__main__":
|
| 58 |
+
input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/ads/high-resolution"
|
| 59 |
+
output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/ads/shot"
|
| 60 |
+
num_workers = 50
|
| 61 |
+
|
| 62 |
+
detect_shot_multiprocessing(input_dir, output_dir, num_workers)
|
preprocess/filter_high_resolution.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import mediapipe as mp
|
| 16 |
+
from latentsync.utils.util import read_video
|
| 17 |
+
import os
|
| 18 |
+
import tqdm
|
| 19 |
+
import shutil
|
| 20 |
+
from multiprocessing import Pool
|
| 21 |
+
|
| 22 |
+
paths = []
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def gather_video_paths(input_dir, output_dir, resolution):
|
| 26 |
+
for video in sorted(os.listdir(input_dir)):
|
| 27 |
+
if video.endswith(".mp4"):
|
| 28 |
+
video_input = os.path.join(input_dir, video)
|
| 29 |
+
video_output = os.path.join(output_dir, video)
|
| 30 |
+
if os.path.isfile(video_output):
|
| 31 |
+
continue
|
| 32 |
+
paths.append([video_input, video_output, resolution])
|
| 33 |
+
elif os.path.isdir(os.path.join(input_dir, video)):
|
| 34 |
+
gather_video_paths(os.path.join(input_dir, video), os.path.join(output_dir, video), resolution)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class FaceDetector:
|
| 38 |
+
def __init__(self, resolution=256):
|
| 39 |
+
self.face_detection = mp.solutions.face_detection.FaceDetection(
|
| 40 |
+
model_selection=0, min_detection_confidence=0.5
|
| 41 |
+
)
|
| 42 |
+
self.resolution = resolution
|
| 43 |
+
|
| 44 |
+
def detect_face(self, image):
|
| 45 |
+
height, width = image.shape[:2]
|
| 46 |
+
# Process the image and detect faces.
|
| 47 |
+
results = self.face_detection.process(image)
|
| 48 |
+
|
| 49 |
+
if not results.detections: # Face not detected
|
| 50 |
+
raise Exception("Face not detected")
|
| 51 |
+
|
| 52 |
+
if len(results.detections) != 1:
|
| 53 |
+
return False
|
| 54 |
+
detection = results.detections[0] # Only use the first face in the image
|
| 55 |
+
|
| 56 |
+
bounding_box = detection.location_data.relative_bounding_box
|
| 57 |
+
face_width = int(bounding_box.width * width)
|
| 58 |
+
face_height = int(bounding_box.height * height)
|
| 59 |
+
if face_width < self.resolution or face_height < self.resolution:
|
| 60 |
+
return False
|
| 61 |
+
return True
|
| 62 |
+
|
| 63 |
+
def detect_video(self, video_path):
|
| 64 |
+
video_frames = read_video(video_path, change_fps=False)
|
| 65 |
+
if len(video_frames) == 0:
|
| 66 |
+
return False
|
| 67 |
+
for frame in video_frames:
|
| 68 |
+
if not self.detect_face(frame):
|
| 69 |
+
return False
|
| 70 |
+
return True
|
| 71 |
+
|
| 72 |
+
def close(self):
|
| 73 |
+
self.face_detection.close()
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def filter_video(video_input, video_out, resolution):
|
| 77 |
+
if os.path.isfile(video_out):
|
| 78 |
+
return
|
| 79 |
+
face_detector = FaceDetector(resolution)
|
| 80 |
+
try:
|
| 81 |
+
save = face_detector.detect_video(video_input)
|
| 82 |
+
except Exception as e:
|
| 83 |
+
# print(f"Exception: {e} Input video: {video_input}")
|
| 84 |
+
face_detector.close()
|
| 85 |
+
return
|
| 86 |
+
if save:
|
| 87 |
+
os.makedirs(os.path.dirname(video_out), exist_ok=True)
|
| 88 |
+
shutil.copy(video_input, video_out)
|
| 89 |
+
face_detector.close()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def multi_run_wrapper(args):
|
| 93 |
+
return filter_video(*args)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def filter_high_resolution_multiprocessing(input_dir, output_dir, resolution, num_workers):
|
| 97 |
+
print(f"Recursively gathering video paths of {input_dir} ...")
|
| 98 |
+
gather_video_paths(input_dir, output_dir, resolution)
|
| 99 |
+
|
| 100 |
+
print(f"Filtering high resolution videos in {input_dir} ...")
|
| 101 |
+
with Pool(num_workers) as pool:
|
| 102 |
+
for _ in tqdm.tqdm(pool.imap_unordered(multi_run_wrapper, paths), total=len(paths)):
|
| 103 |
+
pass
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
if __name__ == "__main__":
|
| 107 |
+
input_dir = "/mnt/bn/maliva-gen-ai/lichunyu/HDTF/original/train"
|
| 108 |
+
output_dir = "/mnt/bn/maliva-gen-ai/lichunyu/HDTF/detected/train"
|
| 109 |
+
resolution = 256
|
| 110 |
+
num_workers = 50
|
| 111 |
+
|
| 112 |
+
filter_high_resolution_multiprocessing(input_dir, output_dir, resolution, num_workers)
|
preprocess/filter_visual_quality.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import tqdm
|
| 17 |
+
import torch
|
| 18 |
+
import torchvision
|
| 19 |
+
import shutil
|
| 20 |
+
from multiprocessing import Process
|
| 21 |
+
import numpy as np
|
| 22 |
+
from decord import VideoReader
|
| 23 |
+
from einops import rearrange
|
| 24 |
+
from eval.hyper_iqa import HyperNet, TargetNet
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
paths = []
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def gather_paths(input_dir, output_dir):
|
| 31 |
+
# os.makedirs(output_dir, exist_ok=True)
|
| 32 |
+
|
| 33 |
+
for video in tqdm.tqdm(sorted(os.listdir(input_dir))):
|
| 34 |
+
if video.endswith(".mp4"):
|
| 35 |
+
video_input = os.path.join(input_dir, video)
|
| 36 |
+
video_output = os.path.join(output_dir, video)
|
| 37 |
+
if os.path.isfile(video_output):
|
| 38 |
+
continue
|
| 39 |
+
paths.append((video_input, video_output))
|
| 40 |
+
elif os.path.isdir(os.path.join(input_dir, video)):
|
| 41 |
+
gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def read_video(video_path: str):
|
| 45 |
+
vr = VideoReader(video_path)
|
| 46 |
+
first_frame = vr[0].asnumpy()
|
| 47 |
+
middle_frame = vr[len(vr) // 2].asnumpy()
|
| 48 |
+
last_frame = vr[-1].asnumpy()
|
| 49 |
+
vr.seek(0)
|
| 50 |
+
video_frames = np.stack([first_frame, middle_frame, last_frame], axis=0)
|
| 51 |
+
video_frames = torch.from_numpy(rearrange(video_frames, "b h w c -> b c h w"))
|
| 52 |
+
video_frames = video_frames / 255.0
|
| 53 |
+
return video_frames
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def func(paths, device_id):
|
| 57 |
+
device = f"cuda:{device_id}"
|
| 58 |
+
|
| 59 |
+
model_hyper = HyperNet(16, 112, 224, 112, 56, 28, 14, 7).to(device)
|
| 60 |
+
model_hyper.train(False)
|
| 61 |
+
|
| 62 |
+
# load the pre-trained model on the koniq-10k dataset
|
| 63 |
+
model_hyper.load_state_dict((torch.load("checkpoints/auxiliary/koniq_pretrained.pkl")))
|
| 64 |
+
|
| 65 |
+
transforms = torchvision.transforms.Compose(
|
| 66 |
+
[
|
| 67 |
+
torchvision.transforms.CenterCrop(size=224),
|
| 68 |
+
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
|
| 69 |
+
]
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
for video_input, video_output in paths:
|
| 73 |
+
try:
|
| 74 |
+
video_frames = read_video(video_input)
|
| 75 |
+
video_frames = transforms(video_frames)
|
| 76 |
+
video_frames = video_frames.clone().detach().to(device)
|
| 77 |
+
paras = model_hyper(video_frames) # 'paras' contains the network weights conveyed to target network
|
| 78 |
+
|
| 79 |
+
# Building target network
|
| 80 |
+
model_target = TargetNet(paras).cuda()
|
| 81 |
+
for param in model_target.parameters():
|
| 82 |
+
param.requires_grad = False
|
| 83 |
+
|
| 84 |
+
# Quality prediction
|
| 85 |
+
pred = model_target(paras["target_in_vec"]) # 'paras['target_in_vec']' is the input to target net
|
| 86 |
+
|
| 87 |
+
# quality score ranges from 0-100, a higher score indicates a better quality
|
| 88 |
+
quality_score = pred.mean().item()
|
| 89 |
+
print(f"Input video: {video_input}\nVisual quality score: {quality_score:.2f}")
|
| 90 |
+
|
| 91 |
+
if quality_score >= 40:
|
| 92 |
+
os.makedirs(os.path.dirname(video_output), exist_ok=True)
|
| 93 |
+
shutil.copy(video_input, video_output)
|
| 94 |
+
except Exception as e:
|
| 95 |
+
print(e)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def split(a, n):
|
| 99 |
+
k, m = divmod(len(a), n)
|
| 100 |
+
return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def filter_visual_quality_multi_gpus(input_dir, output_dir, num_workers):
|
| 104 |
+
gather_paths(input_dir, output_dir)
|
| 105 |
+
num_devices = torch.cuda.device_count()
|
| 106 |
+
if num_devices == 0:
|
| 107 |
+
raise RuntimeError("No GPUs found")
|
| 108 |
+
split_paths = list(split(paths, num_workers * num_devices))
|
| 109 |
+
processes = []
|
| 110 |
+
|
| 111 |
+
for i in range(num_devices):
|
| 112 |
+
for j in range(num_workers):
|
| 113 |
+
process_index = i * num_workers + j
|
| 114 |
+
process = Process(target=func, args=(split_paths[process_index], i))
|
| 115 |
+
process.start()
|
| 116 |
+
processes.append(process)
|
| 117 |
+
|
| 118 |
+
for process in processes:
|
| 119 |
+
process.join()
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/av_synced_high"
|
| 124 |
+
output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/high_visual_quality"
|
| 125 |
+
num_workers = 20 # How many processes per device
|
| 126 |
+
|
| 127 |
+
filter_visual_quality_multi_gpus(input_dir, output_dir, num_workers)
|
preprocess/remove_broken_videos.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from multiprocessing import Pool
|
| 17 |
+
import tqdm
|
| 18 |
+
|
| 19 |
+
from latentsync.utils.av_reader import AVReader
|
| 20 |
+
from latentsync.utils.util import gather_video_paths_recursively
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def remove_broken_video(video_path):
|
| 24 |
+
try:
|
| 25 |
+
AVReader(video_path)
|
| 26 |
+
except Exception:
|
| 27 |
+
os.remove(video_path)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def remove_broken_videos_multiprocessing(input_dir, num_workers):
|
| 31 |
+
video_paths = gather_video_paths_recursively(input_dir)
|
| 32 |
+
|
| 33 |
+
print("Removing broken videos...")
|
| 34 |
+
with Pool(num_workers) as pool:
|
| 35 |
+
for _ in tqdm.tqdm(pool.imap_unordered(remove_broken_video, video_paths), total=len(video_paths)):
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
if __name__ == "__main__":
|
| 40 |
+
input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/multilingual/affine_transformed"
|
| 41 |
+
num_workers = 50
|
| 42 |
+
|
| 43 |
+
remove_broken_videos_multiprocessing(input_dir, num_workers)
|
preprocess/remove_incorrect_affined.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import mediapipe as mp
|
| 16 |
+
from latentsync.utils.util import read_video, gather_video_paths_recursively
|
| 17 |
+
import os
|
| 18 |
+
import tqdm
|
| 19 |
+
from multiprocessing import Pool
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class FaceDetector:
|
| 23 |
+
def __init__(self):
|
| 24 |
+
self.face_detection = mp.solutions.face_detection.FaceDetection(
|
| 25 |
+
model_selection=0, min_detection_confidence=0.5
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
def detect_face(self, image):
|
| 29 |
+
# Process the image and detect faces.
|
| 30 |
+
results = self.face_detection.process(image)
|
| 31 |
+
|
| 32 |
+
if not results.detections: # Face not detected
|
| 33 |
+
return False
|
| 34 |
+
|
| 35 |
+
if len(results.detections) != 1:
|
| 36 |
+
return False
|
| 37 |
+
return True
|
| 38 |
+
|
| 39 |
+
def detect_video(self, video_path):
|
| 40 |
+
try:
|
| 41 |
+
video_frames = read_video(video_path, change_fps=False)
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"Exception: {e} - {video_path}")
|
| 44 |
+
return False
|
| 45 |
+
if len(video_frames) == 0:
|
| 46 |
+
return False
|
| 47 |
+
for frame in video_frames:
|
| 48 |
+
if not self.detect_face(frame):
|
| 49 |
+
return False
|
| 50 |
+
return True
|
| 51 |
+
|
| 52 |
+
def close(self):
|
| 53 |
+
self.face_detection.close()
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def remove_incorrect_affined(video_path):
|
| 57 |
+
if not os.path.isfile(video_path):
|
| 58 |
+
return
|
| 59 |
+
face_detector = FaceDetector()
|
| 60 |
+
has_face = face_detector.detect_video(video_path)
|
| 61 |
+
if not has_face:
|
| 62 |
+
os.remove(video_path)
|
| 63 |
+
print(f"Removed: {video_path}")
|
| 64 |
+
face_detector.close()
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def remove_incorrect_affined_multiprocessing(input_dir, num_workers):
|
| 68 |
+
video_paths = gather_video_paths_recursively(input_dir)
|
| 69 |
+
print(f"Total videos: {len(video_paths)}")
|
| 70 |
+
|
| 71 |
+
print(f"Removing incorrect affined videos in {input_dir} ...")
|
| 72 |
+
with Pool(num_workers) as pool:
|
| 73 |
+
for _ in tqdm.tqdm(pool.imap_unordered(remove_incorrect_affined, video_paths), total=len(video_paths)):
|
| 74 |
+
pass
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/multilingual_dcc/high_visual_quality"
|
| 79 |
+
num_workers = 50
|
| 80 |
+
|
| 81 |
+
remove_incorrect_affined_multiprocessing(input_dir, num_workers)
|
preprocess/resample_fps_hz.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import subprocess
|
| 17 |
+
import tqdm
|
| 18 |
+
from multiprocessing import Pool
|
| 19 |
+
import cv2
|
| 20 |
+
|
| 21 |
+
paths = []
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def gather_paths(input_dir, output_dir):
|
| 25 |
+
for video in sorted(os.listdir(input_dir)):
|
| 26 |
+
if video.endswith(".mp4"):
|
| 27 |
+
video_input = os.path.join(input_dir, video)
|
| 28 |
+
video_output = os.path.join(output_dir, video)
|
| 29 |
+
if os.path.isfile(video_output):
|
| 30 |
+
continue
|
| 31 |
+
paths.append([video_input, video_output])
|
| 32 |
+
elif os.path.isdir(os.path.join(input_dir, video)):
|
| 33 |
+
gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_video_fps(video_path: str):
|
| 37 |
+
cam = cv2.VideoCapture(video_path)
|
| 38 |
+
fps = cam.get(cv2.CAP_PROP_FPS)
|
| 39 |
+
return fps
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def resample_fps_hz(video_input, video_output):
|
| 43 |
+
os.makedirs(os.path.dirname(video_output), exist_ok=True)
|
| 44 |
+
if get_video_fps(video_input) == 25:
|
| 45 |
+
command = f"ffmpeg -loglevel error -y -i {video_input} -c:v copy -ar 16000 -q:a 0 {video_output}"
|
| 46 |
+
else:
|
| 47 |
+
command = f"ffmpeg -loglevel error -y -i {video_input} -r 25 -ar 16000 -q:a 0 {video_output}"
|
| 48 |
+
subprocess.run(command, shell=True)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def multi_run_wrapper(args):
|
| 52 |
+
return resample_fps_hz(*args)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def resample_fps_hz_multiprocessing(input_dir, output_dir, num_workers):
|
| 56 |
+
print(f"Recursively gathering video paths of {input_dir} ...")
|
| 57 |
+
gather_paths(input_dir, output_dir)
|
| 58 |
+
|
| 59 |
+
print(f"Resampling FPS and Hz of {input_dir} ...")
|
| 60 |
+
with Pool(num_workers) as pool:
|
| 61 |
+
for _ in tqdm.tqdm(pool.imap_unordered(multi_run_wrapper, paths), total=len(paths)):
|
| 62 |
+
pass
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/HDTF/segmented/train"
|
| 67 |
+
output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/HDTF/resampled_test"
|
| 68 |
+
num_workers = 20
|
| 69 |
+
|
| 70 |
+
resample_fps_hz_multiprocessing(input_dir, output_dir, num_workers)
|
preprocess/segment_videos.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import subprocess
|
| 17 |
+
import tqdm
|
| 18 |
+
from multiprocessing import Pool
|
| 19 |
+
|
| 20 |
+
paths = []
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def gather_paths(input_dir, output_dir):
|
| 24 |
+
for video in sorted(os.listdir(input_dir)):
|
| 25 |
+
if video.endswith(".mp4"):
|
| 26 |
+
video_basename = video[:-4]
|
| 27 |
+
video_input = os.path.join(input_dir, video)
|
| 28 |
+
video_output = os.path.join(output_dir, f"{video_basename}_%03d.mp4")
|
| 29 |
+
if os.path.isfile(video_output):
|
| 30 |
+
continue
|
| 31 |
+
paths.append([video_input, video_output])
|
| 32 |
+
elif os.path.isdir(os.path.join(input_dir, video)):
|
| 33 |
+
gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def segment_video(video_input, video_output):
|
| 37 |
+
os.makedirs(os.path.dirname(video_output), exist_ok=True)
|
| 38 |
+
command = f"ffmpeg -loglevel error -y -i {video_input} -map 0 -c:v copy -segment_time 5 -f segment -reset_timestamps 1 -q:a 0 {video_output}"
|
| 39 |
+
# command = f'ffmpeg -loglevel error -y -i {video_input} -map 0 -segment_time 5 -f segment -reset_timestamps 1 -force_key_frames "expr:gte(t,n_forced*5)" -crf 18 -q:a 0 {video_output}'
|
| 40 |
+
subprocess.run(command, shell=True)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def multi_run_wrapper(args):
|
| 44 |
+
return segment_video(*args)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def segment_videos_multiprocessing(input_dir, output_dir, num_workers):
|
| 48 |
+
print(f"Recursively gathering video paths of {input_dir} ...")
|
| 49 |
+
gather_paths(input_dir, output_dir)
|
| 50 |
+
|
| 51 |
+
print(f"Segmenting videos of {input_dir} ...")
|
| 52 |
+
with Pool(num_workers) as pool:
|
| 53 |
+
for _ in tqdm.tqdm(pool.imap_unordered(multi_run_wrapper, paths), total=len(paths)):
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
if __name__ == "__main__":
|
| 58 |
+
input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/avatars_new/cut"
|
| 59 |
+
output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/avatars_new/segmented"
|
| 60 |
+
num_workers = 50
|
| 61 |
+
|
| 62 |
+
segment_videos_multiprocessing(input_dir, output_dir, num_workers)
|
preprocess/sync_av.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import tqdm
|
| 17 |
+
from eval.syncnet import SyncNetEval
|
| 18 |
+
from eval.syncnet_detect import SyncNetDetector
|
| 19 |
+
from eval.eval_sync_conf import syncnet_eval
|
| 20 |
+
import torch
|
| 21 |
+
import subprocess
|
| 22 |
+
import shutil
|
| 23 |
+
from multiprocessing import Process
|
| 24 |
+
|
| 25 |
+
paths = []
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def gather_paths(input_dir, output_dir):
|
| 29 |
+
# os.makedirs(output_dir, exist_ok=True)
|
| 30 |
+
|
| 31 |
+
for video in tqdm.tqdm(sorted(os.listdir(input_dir))):
|
| 32 |
+
if video.endswith(".mp4"):
|
| 33 |
+
video_input = os.path.join(input_dir, video)
|
| 34 |
+
video_output = os.path.join(output_dir, video)
|
| 35 |
+
if os.path.isfile(video_output):
|
| 36 |
+
continue
|
| 37 |
+
paths.append((video_input, video_output))
|
| 38 |
+
elif os.path.isdir(os.path.join(input_dir, video)):
|
| 39 |
+
gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def adjust_offset(video_input: str, video_output: str, av_offset: int, fps: int = 25):
|
| 43 |
+
command = f"ffmpeg -loglevel error -y -i {video_input} -itsoffset {av_offset/fps} -i {video_input} -map 0:v -map 1:a -c copy -q:v 0 -q:a 0 {video_output}"
|
| 44 |
+
subprocess.run(command, shell=True)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def func(sync_conf_threshold, paths, device_id, process_temp_dir):
|
| 48 |
+
os.makedirs(process_temp_dir, exist_ok=True)
|
| 49 |
+
device = f"cuda:{device_id}"
|
| 50 |
+
|
| 51 |
+
syncnet = SyncNetEval(device=device)
|
| 52 |
+
syncnet.loadParameters("checkpoints/auxiliary/syncnet_v2.model")
|
| 53 |
+
|
| 54 |
+
detect_results_dir = os.path.join(process_temp_dir, "detect_results")
|
| 55 |
+
syncnet_eval_results_dir = os.path.join(process_temp_dir, "syncnet_eval_results")
|
| 56 |
+
|
| 57 |
+
syncnet_detector = SyncNetDetector(device=device, detect_results_dir=detect_results_dir)
|
| 58 |
+
|
| 59 |
+
for video_input, video_output in paths:
|
| 60 |
+
try:
|
| 61 |
+
av_offset, conf = syncnet_eval(
|
| 62 |
+
syncnet, syncnet_detector, video_input, syncnet_eval_results_dir, detect_results_dir
|
| 63 |
+
)
|
| 64 |
+
if conf >= sync_conf_threshold and abs(av_offset) <= 6:
|
| 65 |
+
os.makedirs(os.path.dirname(video_output), exist_ok=True)
|
| 66 |
+
if av_offset == 0:
|
| 67 |
+
shutil.copy(video_input, video_output)
|
| 68 |
+
else:
|
| 69 |
+
adjust_offset(video_input, video_output, av_offset)
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print(e)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def split(a, n):
|
| 75 |
+
k, m = divmod(len(a), n)
|
| 76 |
+
return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def sync_av_multi_gpus(input_dir, output_dir, temp_dir, num_workers, sync_conf_threshold):
|
| 80 |
+
gather_paths(input_dir, output_dir)
|
| 81 |
+
num_devices = torch.cuda.device_count()
|
| 82 |
+
if num_devices == 0:
|
| 83 |
+
raise RuntimeError("No GPUs found")
|
| 84 |
+
split_paths = list(split(paths, num_workers * num_devices))
|
| 85 |
+
processes = []
|
| 86 |
+
|
| 87 |
+
for i in range(num_devices):
|
| 88 |
+
for j in range(num_workers):
|
| 89 |
+
process_index = i * num_workers + j
|
| 90 |
+
process = Process(
|
| 91 |
+
target=func,
|
| 92 |
+
args=(
|
| 93 |
+
sync_conf_threshold,
|
| 94 |
+
split_paths[process_index],
|
| 95 |
+
i,
|
| 96 |
+
os.path.join(temp_dir, f"process_{process_index}"),
|
| 97 |
+
),
|
| 98 |
+
)
|
| 99 |
+
process.start()
|
| 100 |
+
processes.append(process)
|
| 101 |
+
|
| 102 |
+
for process in processes:
|
| 103 |
+
process.join()
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
if __name__ == "__main__":
|
| 107 |
+
input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/ads/affine_transformed"
|
| 108 |
+
output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/temp"
|
| 109 |
+
temp_dir = "temp"
|
| 110 |
+
num_workers = 20 # How many processes per device
|
| 111 |
+
sync_conf_threshold = 3
|
| 112 |
+
|
| 113 |
+
sync_av_multi_gpus(input_dir, output_dir, temp_dir, num_workers, sync_conf_threshold)
|