Add files using upload-large-folder tool
Browse files- .gitattributes +15 -0
- Animation/checkpoint-40000/face_encoder-40000.pth +3 -0
- Animation/checkpoint-40000/model_1.safetensors +3 -0
- Animation/checkpoint-40000/music_encoder-40000.pth +3 -0
- Animation/checkpoint-40000/random_states_0.pkl +3 -0
- Animation/checkpoint-40000/scaler.pt +3 -0
- Animation/checkpoint-40000/scheduler.bin +3 -0
- Animation/face_encoder.pth +3 -0
- Animation/glintr100_torch.pth +3 -0
- Animation/pose_net.pth +3 -0
- DWPose/dw-ll_ucoco_384.onnx +3 -0
- DWPose/yolox_l.onnx +3 -0
- README.md +213 -0
- assets/figures/case-17.gif +3 -0
- assets/figures/case-18.gif +3 -0
- assets/figures/case-24.gif +3 -0
- assets/figures/case-35.gif +3 -0
- assets/figures/case-42.gif +3 -0
- assets/figures/case-45.gif +3 -0
- assets/figures/case-46.gif +3 -0
- assets/figures/case-47.gif +3 -0
- assets/figures/case-5.gif +3 -0
- assets/figures/case-61.gif +3 -0
- assets/figures/framework.jpg +3 -0
- assets/gif/case-35.gif +3 -0
- assets/gif/case-42.gif +3 -0
- config.json +61 -0
- inference.zip +3 -0
- models/antelopev2/.gitattributes +35 -0
- models/antelopev2/1k3d68.onnx +3 -0
- models/antelopev2/2d106det.onnx +3 -0
- models/antelopev2/genderage.onnx +3 -0
- models/antelopev2/glintr100.onnx +3 -0
- models/antelopev2/scrfd_10g_bnkps.onnx +3 -0
- stable-video-diffusion-img2vid-xt/.gitattributes +36 -0
- stable-video-diffusion-img2vid-xt/LICENSE.md +58 -0
- stable-video-diffusion-img2vid-xt/README.md +99 -0
- stable-video-diffusion-img2vid-xt/comparison.png +3 -0
- stable-video-diffusion-img2vid-xt/feature_extractor/preprocessor_config.json +28 -0
- stable-video-diffusion-img2vid-xt/image_encoder/config.json +23 -0
- stable-video-diffusion-img2vid-xt/image_encoder/model.fp16.safetensors +3 -0
- stable-video-diffusion-img2vid-xt/image_encoder/model.safetensors +3 -0
- stable-video-diffusion-img2vid-xt/model_index.json +25 -0
- stable-video-diffusion-img2vid-xt/output_tile.gif +3 -0
- stable-video-diffusion-img2vid-xt/scheduler/scheduler_config.json +20 -0
- stable-video-diffusion-img2vid-xt/unet/config.json +38 -0
- stable-video-diffusion-img2vid-xt/unet/diffusion_pytorch_model.fp16.safetensors +3 -0
- stable-video-diffusion-img2vid-xt/vae/config.json +24 -0
- stable-video-diffusion-img2vid-xt/vae/diffusion_pytorch_model.fp16.safetensors +3 -0
- stable-video-diffusion-img2vid-xt/vae/diffusion_pytorch_model.safetensors +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,18 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
assets/figures/case-17.gif filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
assets/figures/case-18.gif filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
assets/figures/case-24.gif filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
assets/figures/case-35.gif filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
assets/figures/case-42.gif filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
assets/figures/case-45.gif filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
assets/figures/case-46.gif filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
assets/figures/case-47.gif filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
assets/figures/case-5.gif filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
assets/figures/case-61.gif filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
assets/figures/framework.jpg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
assets/gif/case-35.gif filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
assets/gif/case-42.gif filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
stable-video-diffusion-img2vid-xt/output_tile.gif filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
stable-video-diffusion-img2vid-xt/comparison.png filter=lfs diff=lfs merge=lfs -text
|
Animation/checkpoint-40000/face_encoder-40000.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:35b76d92e871b1748138e57981f359b3370243c3a8ab78efe6eae790a81ea19a
|
| 3 |
+
size 228754498
|
Animation/checkpoint-40000/model_1.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3300ed3471982175e860bb6b160cb21526bec77ea8374e756ce454378ac1f99
|
| 3 |
+
size 78662488
|
Animation/checkpoint-40000/music_encoder-40000.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:175f95880adc4b29147cf795011ee3e99c18950fac87ae4a376408780c565c88
|
| 3 |
+
size 78664368
|
Animation/checkpoint-40000/random_states_0.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00e8a66cb12362566477a023f139c92292de53fbb573c05a2671b498e7c8f99e
|
| 3 |
+
size 14668
|
Animation/checkpoint-40000/scaler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1172aaa109f14e53bc5ce155928a5ea942fbe5c293a5d81f6fe8ee20111e777f
|
| 3 |
+
size 988
|
Animation/checkpoint-40000/scheduler.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2f895af1b9a8481335f64297e8e31bff88db8cfe9bb0d026282ddc4cf0ef3dc2
|
| 3 |
+
size 20008
|
Animation/face_encoder.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6c1a3ac2ca22bbb1e6e23d63d6f9f03080d95fbe63fe014fb03244fcb5b003c
|
| 3 |
+
size 228754558
|
Animation/glintr100_torch.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5f631718e783448b41631e15073bdc622eaeef56509bbad4e5085f23bd32db83
|
| 3 |
+
size 261223796
|
Animation/pose_net.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b56297b4a1cccf9fc2995a232ed06c4ad79a4e68ac00d716f6ba2bd8eb2401b2
|
| 3 |
+
size 829269
|
DWPose/dw-ll_ucoco_384.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:724f4ff2439ed61afb86fb8a1951ec39c6220682803b4a8bd4f598cd913b1843
|
| 3 |
+
size 134399116
|
DWPose/yolox_l.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7860ae79de6c89a3c1eb72ae9a2756c0ccfbe04b7791bb5880afabd97855a411
|
| 3 |
+
size 216746733
|
README.md
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
pipeline_tag: image-to-video
|
| 4 |
+
tags:
|
| 5 |
+
- human-animation
|
| 6 |
+
---
|
| 7 |
+
# StableAnimator
|
| 8 |
+
|
| 9 |
+
<a href='https://francis-rings.github.io/StableAnimator'><img src='https://img.shields.io/badge/Project-Page-Green'></a> <a href='https://arxiv.org/abs/2411.17697'><img src='https://img.shields.io/badge/Paper-Arxiv-red'></a> <a href='https://huggingface.co/FrancisRing/StableAnimator/tree/main'><img src='https://img.shields.io/badge/HuggingFace-Model-orange'></a> <a href='https://www.youtube.com/watch?v=7fwFyFDzQgg'><img src='https://img.shields.io/badge/YouTube-Watch-red?style=flat-square&logo=youtube'></a> <a href='https://www.bilibili.com/video/BV1X5zyYUEuD'><img src='https://img.shields.io/badge/Bilibili-Watch-blue?style=flat-square&logo=bilibili'></a>
|
| 10 |
+
|
| 11 |
+
StableAnimator: High-Quality Identity-Preserving Human Image Animation
|
| 12 |
+
<br/>
|
| 13 |
+
*Shuyuan Tu<sup>1</sup>, Zhen Xing<sup>1</sup>, Xintong Han<sup>3</sup>, Zhi-Qi Cheng<sup>4</sup>, Qi Dai<sup>2</sup>, Chong Luo<sup>2</sup>, Zuxuan Wu<sup>1</sup>*
|
| 14 |
+
<br/>
|
| 15 |
+
[<sup>1</sup>Fudan University; <sup>2</sup>Microsoft Research Asia; <sup>3</sup>Huya Inc; <sup>4</sup>Carnegie Mellon University]
|
| 16 |
+
|
| 17 |
+
<p align="center">
|
| 18 |
+
<img src="assets/figures/case-47.gif" width="256" />
|
| 19 |
+
<img src="assets/figures/case-61.gif" width="256" />
|
| 20 |
+
<img src="assets/figures/case-45.gif" width="256" />
|
| 21 |
+
<img src="assets/figures/case-46.gif" width="256" />
|
| 22 |
+
<img src="assets/figures/case-5.gif" width="256" />
|
| 23 |
+
<img src="assets/figures/case-17.gif" width="256" />
|
| 24 |
+
<br/>
|
| 25 |
+
<span>Pose-driven Human image animations generated by StableAnimator, showing its power to synthesize <b>high-fidelity</b> and <b>ID-preserving videos</b>. All animations are <b>directly synthesized by StableAnimator without the use of any face-related post-processing tools</b>, such as the face-swapping tool FaceFusion or face restoration models like GFP-GAN and CodeFormer.</span>
|
| 26 |
+
</p>
|
| 27 |
+
|
| 28 |
+
<p align="center">
|
| 29 |
+
<img src="assets/figures/case-35.gif" width="384" />
|
| 30 |
+
<img src="assets/figures/case-42.gif" width="384" />
|
| 31 |
+
<img src="assets/figures/case-18.gif" width="384" />
|
| 32 |
+
<img src="assets/figures/case-24.gif" width="384" />
|
| 33 |
+
<br/>
|
| 34 |
+
<span>Comparison results between StableAnimator and state-of-the-art (SOTA) human image animation models highlight the superior performance of StableAnimator in delivering <b>high-fidelity, identity-preserving human image animation</b>.</span>
|
| 35 |
+
</p>
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
## Overview
|
| 39 |
+
|
| 40 |
+
<p align="center">
|
| 41 |
+
<img src="assets/figures/framework.jpg" alt="model architecture" width="1280"/>
|
| 42 |
+
</br>
|
| 43 |
+
<i>The overview of the framework of StableAnimator.</i>
|
| 44 |
+
</p>
|
| 45 |
+
|
| 46 |
+
Current diffusion models for human image animation struggle to ensure identity (ID) consistency. This paper presents StableAnimator, <b>the first end-to-end ID-preserving video diffusion framework, which synthesizes high-quality videos without any post-processing, conditioned on a reference image and a sequence of poses.</b> Building upon a video diffusion model, StableAnimator contains carefully designed modules for both training and inference striving for identity consistency. In particular, StableAnimator begins by computing image and face embeddings with off-the-shelf extractors, respectively and face embeddings are further refined by interacting with image embeddings using a global content-aware Face Encoder. Then, StableAnimator introduces a novel distribution-aware ID Adapter that prevents interference caused by temporal layers while preserving ID via alignment. During inference, we propose a novel Hamilton-Jacobi-Bellman (HJB) equation-based optimization to further enhance the face quality. We demonstrate that solving the HJB equation can be integrated into the diffusion denoising process, and the resulting solution constrains the denoising path and thus benefits ID preservation. Experiments on multiple benchmarks show the effectiveness of StableAnimator both qualitatively and quantitatively.
|
| 47 |
+
|
| 48 |
+
## News
|
| 49 |
+
* `[2024-12-10]`:🔥 The gradio interface is released! Many thanks to [@gluttony-10](https://space.bilibili.com/893892) for his contribution! Other codes will be released very soon. Stay tuned!
|
| 50 |
+
* `[2024-12-6]`:🔥 All data preprocessing codes (human skeleton extraction and human face mask extraction) are released! The training code and detailed training tutorial will be released before 2024.12.13. Stay tuned!
|
| 51 |
+
* `[2024-12-4]`:🔥 We are thrilled to release an interesting dance demo (🔥🔥APT Dance🔥🔥)! The generated video can be seen on [YouTube](https://www.youtube.com/watch?v=KNPoAsWr_sk) and [Bilibili](https://www.bilibili.com/video/BV1KczXYhER7).
|
| 52 |
+
* `[2024-11-28]`:🔥 The data pre-processing codes (human skeleton extraction) are available! Other codes will be released very soon. Stay tuned!
|
| 53 |
+
* `[2024-11-26]`:🔥 The project page, code, technical report and [a basic model checkpoint](https://huggingface.co/FrancisRing/StableAnimator/tree/main) are released. Further training codes, data pre-processing codes, the evaluation dataset and StableAnimator-pro will be released very soon. Stay tuned!
|
| 54 |
+
|
| 55 |
+
## To-Do List
|
| 56 |
+
- [x] StableAnimator-basic
|
| 57 |
+
- [x] Inference Code
|
| 58 |
+
- [x] Evaluation Samples
|
| 59 |
+
- [x] Data Pre-Processing Code (Skeleton Extraction)
|
| 60 |
+
- [x] Data Pre-Processing Code (Human Face Mask Extraction)
|
| 61 |
+
- [ ] Evaluation Dataset
|
| 62 |
+
- [ ] Training Code
|
| 63 |
+
- [ ] StableAnimator-pro
|
| 64 |
+
- [ ] Inference Code with HJB-based Face Optimization
|
| 65 |
+
|
| 66 |
+
## Quickstart
|
| 67 |
+
|
| 68 |
+
For the basic version of the model checkpoint, it supports generating videos at a 576x1024 or 512x512 resolution. If you encounter insufficient memory issues, you can appropriately reduce the number of animated frames.
|
| 69 |
+
|
| 70 |
+
### Environment setup
|
| 71 |
+
|
| 72 |
+
```
|
| 73 |
+
pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124
|
| 74 |
+
pip install torch==2.5.1+cu124 xformers --index-url https://download.pytorch.org/whl/cu124
|
| 75 |
+
pip install -r requirements.txt
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
### Download weights
|
| 79 |
+
If you encounter connection issues with Hugging Face, you can utilize the mirror endpoint by setting the environment variable: `export HF_ENDPOINT=https://hf-mirror.com`.
|
| 80 |
+
Please download weights manually as follows:
|
| 81 |
+
```
|
| 82 |
+
cd StableAnimator
|
| 83 |
+
git lfs install
|
| 84 |
+
git clone https://huggingface.co/FrancisRing/StableAnimator checkpoints
|
| 85 |
+
```
|
| 86 |
+
All the weights should be organized in models as follows
|
| 87 |
+
The overall file structure of this project should be organized as follows:
|
| 88 |
+
```
|
| 89 |
+
StableAnimator/
|
| 90 |
+
├── DWPose
|
| 91 |
+
├── animation
|
| 92 |
+
├── checkpoints
|
| 93 |
+
│ ├── DWPose
|
| 94 |
+
│ │ ├── dw-ll_ucoco_384.onnx
|
| 95 |
+
│ │ └── yolox_l.onnx
|
| 96 |
+
│ ├── Animation
|
| 97 |
+
│ │ ├── pose_net.pth
|
| 98 |
+
│ │ ├── face_encoder.pth
|
| 99 |
+
│ │ └── unet.pth
|
| 100 |
+
│ ├── SVD
|
| 101 |
+
│ │ ├── feature_extractor
|
| 102 |
+
│ │ ├── image_encoder
|
| 103 |
+
│ │ ├── scheduler
|
| 104 |
+
│ │ ├── unet
|
| 105 |
+
│ │ ├── vae
|
| 106 |
+
│ │ ├── model_index.json
|
| 107 |
+
│ │ ├── svd_xt.safetensors
|
| 108 |
+
│ │ └── svd_xt_image_decoder.safetensors
|
| 109 |
+
│ └── inference.zip
|
| 110 |
+
├── models
|
| 111 |
+
│ │ └── antelopev2
|
| 112 |
+
│ │ ├── 1k3d68.onnx
|
| 113 |
+
│ │ ├── 2d106det.onnx
|
| 114 |
+
│ │ ├── genderage.onnx
|
| 115 |
+
│ │ ├── glintr100.onnx
|
| 116 |
+
│ │ └── scrfd_10g_bnkps.onnx
|
| 117 |
+
├── app.py
|
| 118 |
+
├── command_basic_infer.sh
|
| 119 |
+
├── inference_basic.py
|
| 120 |
+
├── requirement.txt
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
### Evaluation Samples
|
| 124 |
+
The evaluation samples presented in the paper can be downloaded from [OneDrive](https://1drv.ms/f/c/becb962aad1a1f95/EubdzCAI7BFLhJff2LrHkt8BC9mOiwJ5V67t-ypxRnCK4Q?e=ElEmcn) or `inference.zip` in checkpoints. Please download evaluation samples manually as follows:
|
| 125 |
+
```
|
| 126 |
+
cd StableAnimator
|
| 127 |
+
mkdir inference
|
| 128 |
+
```
|
| 129 |
+
All the evaluation samples should be organized as follows:
|
| 130 |
+
```
|
| 131 |
+
inference/
|
| 132 |
+
├── case-1
|
| 133 |
+
│ ├── poses
|
| 134 |
+
│ ├── faces
|
| 135 |
+
│ └── reference.png
|
| 136 |
+
├── case-2
|
| 137 |
+
│ ├── poses
|
| 138 |
+
│ ├── faces
|
| 139 |
+
│ └── reference.png
|
| 140 |
+
├── case-3
|
| 141 |
+
│ ├── poses
|
| 142 |
+
│ ├── faces
|
| 143 |
+
│ └── reference.png
|
| 144 |
+
```
|
| 145 |
+
|
| 146 |
+
### Human Skeleton Extraction
|
| 147 |
+
We leverage the pre-trained DWPose to extract the human skeletons. In the initialization of DWPose, the pretrained weights should be configured in `/DWPose/dwpose_utils/wholebody.py`:
|
| 148 |
+
```
|
| 149 |
+
onnx_det = 'path/checkpoints/DWPose/yolox_l.onnx'
|
| 150 |
+
onnx_pose = 'path/checkpoints/DWPose/dw-ll_ucoco_384.onnx'
|
| 151 |
+
```
|
| 152 |
+
Given the target image folder containing multiple .png files, you can use the following command to obtain the corresponding human skeleton images:
|
| 153 |
+
```
|
| 154 |
+
python skeleton_extraction.py --target_image_folder_path="path/test/target_images" --ref_image_path="path/test/reference.png" --poses_folder_path="path/test/poses"
|
| 155 |
+
```
|
| 156 |
+
It is worth noting that the .png files in the target image folder are named in the format `frame_i.png`, such as `frame_0.png`, `frame_1.png`, and so on.
|
| 157 |
+
`--ref_image_path` refers to the path of the given reference image. The obtained human skeleton images are saved in `path/test/poses`. It is particularly significant that the target skeleton images should be aligned with the reference image regarding the body shape.
|
| 158 |
+
|
| 159 |
+
If you only have the target MP4 file (target.mp4), we recommend you to use `ffmpeg` to convert the MP4 file to multiple frames (.png files) without any quality loss.
|
| 160 |
+
```
|
| 161 |
+
ffmpeg -i target.mp4 -q:v 1 path/test/target_images/frame_%d.png
|
| 162 |
+
```
|
| 163 |
+
The obtained frames are saved in `path/test/target_images`.
|
| 164 |
+
|
| 165 |
+
### Human Face Mask Extraction
|
| 166 |
+
Given the path to an image folder containing multiple RGB `.png` files, you can run the following command to extract the corresponding human face masks:
|
| 167 |
+
```
|
| 168 |
+
python face_mask_extraction.py --image_folder="path/StableAnimator/inference/your_case/target_images"
|
| 169 |
+
```
|
| 170 |
+
`path/StableAnimator/inference/your_case/target_images` contains multiple `.png` files. The obtained masks are saved in `path/StableAnimator/inference/your_case/faces`.
|
| 171 |
+
|
| 172 |
+
### Model inference
|
| 173 |
+
A sample configuration for testing is provided as `command_basic_infer.sh`. You can also easily modify the various configurations according to your needs.
|
| 174 |
+
|
| 175 |
+
```
|
| 176 |
+
bash command_basic_infer.sh
|
| 177 |
+
```
|
| 178 |
+
StableAnimator supports human image animation at two different resolution settings: 512x512 and 576x1024. You can modify "--width" and "--height" in `command_basic_infer.sh` to set the resolution of the animation. "--output_dir" in `command_basic_infer.sh` refers to the saved path of the generated animation. "--validation_control_folder" and "--validation_image" in `command_basic_infer.sh` refer to the paths of the given pose sequence and the reference image, respectively.
|
| 179 |
+
"--pretrained_model_name_or_path" in `command_basic_infer.sh` is the path of pretrained SVD. "posenet_model_name_or_path", "face_encoder_model_name_or_path", and "unet_model_name_or_path" in `command_basic_infer.sh` refer to paths of pretrained StableAnimator weights.
|
| 180 |
+
If you have enough GPU resources, you can increase the value (4=>8=>16) of "--decode_chunk_size" in `command_basic_infer.sh` to promote the temporal smoothness of the animation.
|
| 181 |
+
|
| 182 |
+
Tips: if your GPU memory is limited, you can reduce the number of animated frames. This command will generate two files: <b>animated_images</b> and <b>animated_images.gif</b>.
|
| 183 |
+
If you want to obtain the high quality MP4 file, we recommend you to leverage ffmpeg on the <b>animated_images</b> as follows:
|
| 184 |
+
```
|
| 185 |
+
cd animated_images
|
| 186 |
+
ffmpeg -framerate 20 -i frame_%d.png -c:v libx264 -crf 10 -pix_fmt yuv420p /path/animation.mp4
|
| 187 |
+
```
|
| 188 |
+
"-framerate" refers to the fps setting. "-crf" indicates the quality of the generated MP4 file, with smaller values corresponding to higher quality.
|
| 189 |
+
Additionally, you can also run the following command to launch a Gradio interface:
|
| 190 |
+
```
|
| 191 |
+
python app.py
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
### VRAM requirement and Runtime
|
| 195 |
+
|
| 196 |
+
For the 15s demo video (512x512, fps=30), the 16-frame basic model requires 8GB VRAM and finishes in 5 minutes on a 4090 GPU.
|
| 197 |
+
|
| 198 |
+
The minimum VRAM requirement for the 16-frame U-Net of the pro model is 10GB (576x1024, fps=30); however, the VAE decoder demands 16GB. You have the option to run the VAE decoder on CPU.
|
| 199 |
+
|
| 200 |
+
## Contact
|
| 201 |
+
If you have any suggestions or find our work helpful, feel free to contact me
|
| 202 |
+
|
| 203 |
+
Email: francisshuyuan@gmail.com
|
| 204 |
+
|
| 205 |
+
If you find our work useful, <b>please consider giving a star to this github repository and citing it</b>:
|
| 206 |
+
```bib
|
| 207 |
+
@article{tu2024stableanimator,
|
| 208 |
+
title={StableAnimator: High-Quality Identity-Preserving Human Image Animation},
|
| 209 |
+
author={Shuyuan Tu and Zhen Xing and Xintong Han and Zhi-Qi Cheng and Qi Dai and Chong Luo and Zuxuan Wu},
|
| 210 |
+
journal={arXiv preprint arXiv:2411.17697},
|
| 211 |
+
year={2024}
|
| 212 |
+
}
|
| 213 |
+
```
|
assets/figures/case-17.gif
ADDED
|
Git LFS Details
|
assets/figures/case-18.gif
ADDED
|
Git LFS Details
|
assets/figures/case-24.gif
ADDED
|
Git LFS Details
|
assets/figures/case-35.gif
ADDED
|
Git LFS Details
|
assets/figures/case-42.gif
ADDED
|
Git LFS Details
|
assets/figures/case-45.gif
ADDED
|
Git LFS Details
|
assets/figures/case-46.gif
ADDED
|
Git LFS Details
|
assets/figures/case-47.gif
ADDED
|
Git LFS Details
|
assets/figures/case-5.gif
ADDED
|
Git LFS Details
|
assets/figures/case-61.gif
ADDED
|
Git LFS Details
|
assets/figures/framework.jpg
ADDED
|
Git LFS Details
|
assets/gif/case-35.gif
ADDED
|
Git LFS Details
|
assets/gif/case-42.gif
ADDED
|
Git LFS Details
|
config.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"ddconfig": {
|
| 3 |
+
"attn_resolutions": [],
|
| 4 |
+
"ch": 128,
|
| 5 |
+
"ch_fuse": [
|
| 6 |
+
-1,
|
| 7 |
+
-1,
|
| 8 |
+
2,
|
| 9 |
+
3,
|
| 10 |
+
-1,
|
| 11 |
+
-1
|
| 12 |
+
],
|
| 13 |
+
"ch_in": 128,
|
| 14 |
+
"ch_mult": [
|
| 15 |
+
1,
|
| 16 |
+
2,
|
| 17 |
+
2,
|
| 18 |
+
4,
|
| 19 |
+
4,
|
| 20 |
+
4
|
| 21 |
+
],
|
| 22 |
+
"f_t": 2,
|
| 23 |
+
"fp32_attention": true,
|
| 24 |
+
"fuse_mid": true,
|
| 25 |
+
"fuse_type": "attn",
|
| 26 |
+
"in_channels": 3,
|
| 27 |
+
"num_res_blocks": 2,
|
| 28 |
+
"out_ch": 3,
|
| 29 |
+
"pos_embed_mode": "t",
|
| 30 |
+
"resolution": 256,
|
| 31 |
+
"temp_res": 16,
|
| 32 |
+
"upsample_first": true,
|
| 33 |
+
"use_3d_conv": true,
|
| 34 |
+
"z_channels": 16
|
| 35 |
+
},
|
| 36 |
+
"ddconfig_2d": {
|
| 37 |
+
"attn_resolutions": [],
|
| 38 |
+
"ch": 128,
|
| 39 |
+
"ch_mult": [
|
| 40 |
+
1,
|
| 41 |
+
2,
|
| 42 |
+
4,
|
| 43 |
+
4
|
| 44 |
+
],
|
| 45 |
+
"dropout": 0.0,
|
| 46 |
+
"fp32_attention": true,
|
| 47 |
+
"in_channels": 3,
|
| 48 |
+
"num_res_blocks": 2,
|
| 49 |
+
"out_ch": 3,
|
| 50 |
+
"out_z": false,
|
| 51 |
+
"resolution": 256,
|
| 52 |
+
"z_channels": 4
|
| 53 |
+
},
|
| 54 |
+
"embed_dim": 16,
|
| 55 |
+
"enable_2d": true,
|
| 56 |
+
"model_type": "VAE",
|
| 57 |
+
"tile_overlap_factor": 0.25,
|
| 58 |
+
"tile_sample_min_size": 256,
|
| 59 |
+
"transformers_version": "4.39.3",
|
| 60 |
+
"use_tiling": true
|
| 61 |
+
}
|
inference.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e6638e7aa456070813786a220ed2752037342cfcd76292de08d928e3d620a39
|
| 3 |
+
size 87334886
|
models/antelopev2/.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
models/antelopev2/1k3d68.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df5c06b8a0c12e422b2ed8947b8869faa4105387f199c477af038aa01f9a45cc
|
| 3 |
+
size 143607619
|
models/antelopev2/2d106det.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f001b856447c413801ef5c42091ed0cd516fcd21f2d6b79635b1e733a7109dbf
|
| 3 |
+
size 5030888
|
models/antelopev2/genderage.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4fde69b1c810857b88c64a335084f1c3fe8f01246c9a191b48c7bb756d6652fb
|
| 3 |
+
size 1322532
|
models/antelopev2/glintr100.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ab1d6435d639628a6f3e5008dd4f929edf4c4124b1a7169e1048f9fef534cdf
|
| 3 |
+
size 260665334
|
models/antelopev2/scrfd_10g_bnkps.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5838f7fe053675b1c7a08b633df49e7af5495cee0493c7dcf6697200b85b5b91
|
| 3 |
+
size 16923827
|
stable-video-diffusion-img2vid-xt/.gitattributes
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
output_tile.gif filter=lfs diff=lfs merge=lfs -text
|
stable-video-diffusion-img2vid-xt/LICENSE.md
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
STABILITY AI COMMUNITY LICENSE AGREEMENT
|
| 2 |
+
|
| 3 |
+
Last Updated: July 5, 2024
|
| 4 |
+
|
| 5 |
+
1. INTRODUCTION
|
| 6 |
+
|
| 7 |
+
This Agreement applies to any individual person or entity (“You”, “Your” or “Licensee”) that uses or distributes any portion or element of the Stability AI Materials or Derivative Works thereof for any Research & Non-Commercial or Commercial purpose. Capitalized terms not otherwise defined herein are defined in Section V below.
|
| 8 |
+
|
| 9 |
+
This Agreement is intended to allow research, non-commercial, and limited commercial uses of the Models free of charge. In order to ensure that certain limited commercial uses of the Models continue to be allowed, this Agreement preserves free access to the Models for people or organizations generating annual revenue of less than US $1,000,000 (or local currency equivalent).
|
| 10 |
+
|
| 11 |
+
By clicking “I Accept” or by using or distributing or using any portion or element of the Stability Materials or Derivative Works, You agree that You have read, understood and are bound by the terms of this Agreement. If You are acting on behalf of a company, organization or other entity, then “You” includes you and that entity, and You agree that You: (i) are an authorized representative of such entity with the authority to bind such entity to this Agreement, and (ii) You agree to the terms of this Agreement on that entity’s behalf.
|
| 12 |
+
|
| 13 |
+
2. RESEARCH & NON-COMMERCIAL USE LICENSE
|
| 14 |
+
|
| 15 |
+
Subject to the terms of this Agreement, Stability AI grants You a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable and royalty-free limited license under Stability AI’s intellectual property or other rights owned by Stability AI embodied in the Stability AI Materials to use, reproduce, distribute, and create Derivative Works of, and make modifications to, the Stability AI Materials for any Research or Non-Commercial Purpose. “Research Purpose” means academic or scientific advancement, and in each case, is not primarily intended for commercial advantage or monetary compensation to You or others. “Non-Commercial Purpose” means any purpose other than a Research Purpose that is not primarily intended for commercial advantage or monetary compensation to You or others, such as personal use (i.e., hobbyist) or evaluation and testing.
|
| 16 |
+
|
| 17 |
+
3. COMMERCIAL USE LICENSE
|
| 18 |
+
|
| 19 |
+
Subject to the terms of this Agreement (including the remainder of this Section III), Stability AI grants You a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable and royalty-free limited license under Stability AI’s intellectual property or other rights owned by Stability AI embodied in the Stability AI Materials to use, reproduce, distribute, and create Derivative Works of, and make modifications to, the Stability AI Materials for any Commercial Purpose. “Commercial Purpose” means any purpose other than a Research Purpose or Non-Commercial Purpose that is primarily intended for commercial advantage or monetary compensation to You or others, including but not limited to, (i) creating, modifying, or distributing Your product or service, including via a hosted service or application programming interface, and (ii) for Your business’s or organization’s internal operations.
|
| 20 |
+
If You are using or distributing the Stability AI Materials for a Commercial Purpose, You must register with Stability AI at (https://stability.ai/community-license). If at any time You or Your Affiliate(s), either individually or in aggregate, generate more than USD $1,000,000 in annual revenue (or the equivalent thereof in Your local currency), regardless of whether that revenue is generated directly or indirectly from the Stability AI Materials or Derivative Works, any licenses granted to You under this Agreement shall terminate as of such date. You must request a license from Stability AI at (https://stability.ai/enterprise) , which Stability AI may grant to You in its sole discretion. If you receive Stability AI Materials, or any Derivative Works thereof, from a Licensee as part of an integrated end user product, then Section III of this Agreement will not apply to you.
|
| 21 |
+
|
| 22 |
+
4. GENERAL TERMS
|
| 23 |
+
|
| 24 |
+
Your Research, Non-Commercial, and Commercial License(s) under this Agreement are subject to the following terms.
|
| 25 |
+
a. Distribution & Attribution. If You distribute or make available the Stability AI Materials or a Derivative Work to a third party, or a product or service that uses any portion of them, You shall: (i) provide a copy of this Agreement to that third party, (ii) retain the following attribution notice within a "Notice" text file distributed as a part of such copies: "This Stability AI Model is licensed under the Stability AI Community License, Copyright © Stability AI Ltd. All Rights Reserved”, and (iii) prominently display “Powered by Stability AI” on a related website, user interface, blogpost, about page, or product documentation. If You create a Derivative Work, You may add your own attribution notice(s) to the “Notice” text file included with that Derivative Work, provided that You clearly indicate which attributions apply to the Stability AI Materials and state in the “Notice” text file that You changed the Stability AI Materials and how it was modified.
|
| 26 |
+
b. Use Restrictions. Your use of the Stability AI Materials and Derivative Works, including any output or results of the Stability AI Materials or Derivative Works, must comply with applicable laws and regulations (including Trade Control Laws and equivalent regulations) and adhere to the Documentation and Stability AI’s AUP, which is hereby incorporated by reference. Furthermore, You will not use the Stability AI Materials or Derivative Works, or any output or results of the Stability AI Materials or Derivative Works, to create or improve any foundational generative AI model (excluding the Models or Derivative Works).
|
| 27 |
+
c. Intellectual Property.
|
| 28 |
+
(i) Trademark License. No trademark licenses are granted under this Agreement, and in connection with the Stability AI Materials or Derivative Works, You may not use any name or mark owned by or associated with Stability AI or any of its Affiliates, except as required under Section IV(a) herein.
|
| 29 |
+
(ii) Ownership of Derivative Works. As between You and Stability AI, You are the owner of Derivative Works You create, subject to Stability AI’s ownership of the Stability AI Materials and any Derivative Works made by or for Stability AI.
|
| 30 |
+
(iii) Ownership of Outputs. As between You and Stability AI, You own any outputs generated from the Models or Derivative Works to the extent permitted by applicable law.
|
| 31 |
+
(iv) Disputes. If You or Your Affiliate(s) institute litigation or other proceedings against Stability AI (including a cross-claim or counterclaim in a lawsuit) alleging that the Stability AI Materials, Derivative Works or associated outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by You, then any licenses granted to You under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Stability AI from and against any claim by any third party arising out of or related to Your use or distribution of the Stability AI Materials or Derivative Works in violation of this Agreement.
|
| 32 |
+
(v) Feedback. From time to time, You may provide Stability AI with verbal and/or written suggestions, comments or other feedback related to Stability AI’s existing or prospective technology, products or services (collectively, “Feedback”). You are not obligated to provide Stability AI with Feedback, but to the extent that You do, You hereby grant Stability AI a perpetual, irrevocable, royalty-free, fully-paid, sub-licensable, transferable, non-exclusive, worldwide right and license to exploit the Feedback in any manner without restriction. Your Feedback is provided “AS IS” and You make no warranties whatsoever about any Feedback.
|
| 33 |
+
d. Disclaimer Of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE STABILITY AI MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OR LAWFULNESS OF USING OR REDISTRIBUTING THE STABILITY AI MATERIALS, DERIVATIVE WORKS OR ANY OUTPUT OR RESULTS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE STABILITY AI MATERIALS, DERIVATIVE WORKS AND ANY OUTPUT AND RESULTS.
|
| 34 |
+
e. Limitation Of Liability. IN NO EVENT WILL STABILITY AI OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF STABILITY AI OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
|
| 35 |
+
f. Term And Termination. The term of this Agreement will commence upon Your acceptance of this Agreement or access to the Stability AI Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Stability AI may terminate this Agreement if You are in breach of any term or condition of this Agreement. Upon termination of this Agreement, You shall delete and cease use of any Stability AI Materials or Derivative Works. Section IV(d), (e), and (g) shall survive the termination of this Agreement.
|
| 36 |
+
g. Governing Law. This Agreement will be governed by and constructed in accordance with the laws of the United States and the State of California without regard to choice of law principles, and the UN Convention on Contracts for International Sale of Goods does not apply to this Agreement.
|
| 37 |
+
|
| 38 |
+
5. DEFINITIONS
|
| 39 |
+
|
| 40 |
+
“Affiliate(s)” means any entity that directly or indirectly controls, is controlled by, or is under common control with the subject entity; for purposes of this definition, “control” means direct or indirect ownership or control of more than 50% of the voting interests of the subject entity.
|
| 41 |
+
|
| 42 |
+
"Agreement" means this Stability AI Community License Agreement.
|
| 43 |
+
|
| 44 |
+
“AUP” means the Stability AI Acceptable Use Policy available at (https://stability.ai/use-policy), as may be updated from time to time.
|
| 45 |
+
|
| 46 |
+
"Derivative Work(s)” means (a) any derivative work of the Stability AI Materials as recognized by U.S. copyright laws and (b) any modifications to a Model, and any other model created which is based on or derived from the Model or the Model’s output, including “fine tune” and “low-rank adaptation” models derived from a Model or a Model’s output, but do not include the output of any Model.
|
| 47 |
+
|
| 48 |
+
“Documentation” means any specifications, manuals, documentation, and other written information provided by Stability AI related to the Software or Models.
|
| 49 |
+
|
| 50 |
+
“Model(s)" means, collectively, Stability AI’s proprietary models and algorithms, including machine-learning models, trained model weights and other elements of the foregoing listed on Stability’s Core Models Webpage available at (https://stability.ai/core-models), as may be updated from time to time.
|
| 51 |
+
|
| 52 |
+
"Stability AI" or "we" means Stability AI Ltd. and its Affiliates.
|
| 53 |
+
|
| 54 |
+
"Software" means Stability AI’s proprietary software made available under this Agreement now or in the future.
|
| 55 |
+
|
| 56 |
+
“Stability AI Materials” means, collectively, Stability’s proprietary Models, Software and Documentation (and any portion or combination thereof) made available under this Agreement.
|
| 57 |
+
|
| 58 |
+
“Trade Control Laws” means any applicable U.S. and non-U.S. export control and trade sanctions laws and regulations.
|
stable-video-diffusion-img2vid-xt/README.md
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
pipeline_tag: image-to-video
|
| 3 |
+
license: other
|
| 4 |
+
license_name: stable-video-diffusion-community
|
| 5 |
+
license_link: LICENSE.md
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
# Stable Video Diffusion Image-to-Video Model Card
|
| 9 |
+
|
| 10 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 11 |
+

|
| 12 |
+
Stable Video Diffusion (SVD) Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it.
|
| 13 |
+
|
| 14 |
+
Please note: For commercial use, please refer to https://stability.ai/license.
|
| 15 |
+
|
| 16 |
+
## Model Details
|
| 17 |
+
|
| 18 |
+
### Model Description
|
| 19 |
+
|
| 20 |
+
(SVD) Image-to-Video is a latent diffusion model trained to generate short video clips from an image conditioning.
|
| 21 |
+
This model was trained to generate 25 frames at resolution 576x1024 given a context frame of the same size, finetuned from [SVD Image-to-Video [14 frames]](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid).
|
| 22 |
+
We also finetune the widely used [f8-decoder](https://huggingface.co/docs/diffusers/api/models/autoencoderkl#loading-from-the-original-format) for temporal consistency.
|
| 23 |
+
For convenience, we additionally provide the model with the
|
| 24 |
+
standard frame-wise decoder [here](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/svd_xt_image_decoder.safetensors).
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
- **Developed by:** Stability AI
|
| 28 |
+
- **Funded by:** Stability AI
|
| 29 |
+
- **Model type:** Generative image-to-video model
|
| 30 |
+
- **Finetuned from model:** SVD Image-to-Video [14 frames]
|
| 31 |
+
|
| 32 |
+
### Model Sources
|
| 33 |
+
|
| 34 |
+
For research purposes, we recommend our `generative-models` Github repository (https://github.com/Stability-AI/generative-models),
|
| 35 |
+
which implements the most popular diffusion frameworks (both training and inference).
|
| 36 |
+
|
| 37 |
+
- **Repository:** https://github.com/Stability-AI/generative-models
|
| 38 |
+
- **Paper:** https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
## Evaluation
|
| 42 |
+

|
| 43 |
+
The chart above evaluates user preference for SVD-Image-to-Video over [GEN-2](https://research.runwayml.com/gen2) and [PikaLabs](https://www.pika.art/).
|
| 44 |
+
SVD-Image-to-Video is preferred by human voters in terms of video quality. For details on the user study, we refer to the [research paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets)
|
| 45 |
+
|
| 46 |
+
## Uses
|
| 47 |
+
|
| 48 |
+
### Direct Use
|
| 49 |
+
|
| 50 |
+
The model is intended for both non-commercial and commercial usage. You can use this model for non-commercial or research purposes under this [license](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE.md). Possible research areas and tasks include
|
| 51 |
+
|
| 52 |
+
- Research on generative models.
|
| 53 |
+
- Safe deployment of models which have the potential to generate harmful content.
|
| 54 |
+
- Probing and understanding the limitations and biases of generative models.
|
| 55 |
+
- Generation of artworks and use in design and other artistic processes.
|
| 56 |
+
- Applications in educational or creative tools.
|
| 57 |
+
|
| 58 |
+
For commercial use, please refer to https://stability.ai/license.
|
| 59 |
+
|
| 60 |
+
Excluded uses are described below.
|
| 61 |
+
|
| 62 |
+
### Out-of-Scope Use
|
| 63 |
+
|
| 64 |
+
The model was not trained to be factual or true representations of people or events,
|
| 65 |
+
and therefore using the model to generate such content is out-of-scope for the abilities of this model.
|
| 66 |
+
The model should not be used in any way that violates Stability AI's [Acceptable Use Policy](https://stability.ai/use-policy).
|
| 67 |
+
|
| 68 |
+
## Limitations and Bias
|
| 69 |
+
|
| 70 |
+
### Limitations
|
| 71 |
+
- The generated videos are rather short (<= 4sec), and the model does not achieve perfect photorealism.
|
| 72 |
+
- The model may generate videos without motion, or very slow camera pans.
|
| 73 |
+
- The model cannot be controlled through text.
|
| 74 |
+
- The model cannot render legible text.
|
| 75 |
+
- Faces and people in general may not be generated properly.
|
| 76 |
+
- The autoencoding part of the model is lossy.
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
### Recommendations
|
| 80 |
+
|
| 81 |
+
The model is intended for both non-commercial and commercial usage.
|
| 82 |
+
|
| 83 |
+
## How to Get Started with the Model
|
| 84 |
+
|
| 85 |
+
Check out https://github.com/Stability-AI/generative-models
|
| 86 |
+
|
| 87 |
+
# Appendix:
|
| 88 |
+
|
| 89 |
+
All considered potential data sources were included for final training, with none held out as the proposed data filtering methods described in the SVD paper handle the quality control/filtering of the dataset. With regards to safety/NSFW filtering, sources considered were either deemed safe or filtered with the in-house NSFW filters.
|
| 90 |
+
No explicit human labor is involved in training data preparation. However, human evaluation for model outputs and quality was extensively used to evaluate model quality and performance. The evaluations were performed with third-party contractor platforms (Amazon Sagemaker, Amazon Mechanical Turk, Prolific) with fluent English-speaking contractors from various countries, primarily from the USA, UK, and Canada. Each worker was paid $12/hr for the time invested in the evaluation.
|
| 91 |
+
No other third party was involved in the development of this model; the model was fully developed in-house at Stability AI.
|
| 92 |
+
Training the SVD checkpoints required a total of approximately 200,000 A100 80GB hours. The majority of the training occurred on 48 * 8 A100s, while some stages took more/less than that. The resulting CO2 emission is ~19,000kg CO2 eq., and energy consumed is ~64000 kWh.
|
| 93 |
+
The released checkpoints (SVD/SVD-XT) are image-to-video models that generate short videos/animations closely following the given input image. Since the model relies on an existing supplied image, the potential risks of disclosing specific material or novel unsafe content are minimal. This was also evaluated by third-party independent red-teaming services, which agree with our conclusion to a high degree of confidence (>90% in various areas of safety red-teaming). The external evaluations were also performed for trustworthiness, leading to >95% confidence in real, trustworthy videos.
|
| 94 |
+
With the default settings at the time of release, SVD takes ~100s for generation, and SVD-XT takes ~180s on an A100 80GB card. Several optimizations to trade off quality / memory / speed can be done to perform faster inference or inference on lower VRAM cards.
|
| 95 |
+
The information related to the model and its development process and usage protocols can be found in the GitHub repo, associated research paper, and HuggingFace model page/cards.
|
| 96 |
+
The released model inference & demo code has image-level watermarking enabled by default, which can be used to detect the outputs. This is done via the imWatermark Python library.
|
| 97 |
+
The model can be used to generate videos from static initial images. However, we prohibit unlawful, obscene, or misleading uses of the model consistent with the terms of our license and Acceptable Use Policy. For the open-weights release, our training data filtering mitigations alleviate this risk to some extent. These restrictions are explicitly enforced on user-facing interfaces at stablevideo.com, where a warning is issued. We do not take any responsibility for third-party interfaces. Submitting initial images that bypass input filters to tease out offensive or inappropriate content listed above is also prohibited. Safety filtering checks at stablevideo.com run on model inputs and outputs independently. More details on our user-facing interfaces can be found here: https://www.stablevideo.com/faq. Beyond the Acceptable Use Policy and other mitigations and conditions described here, the model is not subject to additional model behavior interventions of the type described in the Foundation Model Transparency Index.
|
| 98 |
+
For stablevideo.com, we store preference data in the form of upvotes/downvotes on user-generated videos, and we have a pairwise ranker that runs while a user generates videos. This usage data is solely used for improving Stability AI’s future image/video models and services. No other third-party entities are given access to the usage data beyond Stability AI and maintainers of stablevideo.com.
|
| 99 |
+
For usage statistics of SVD, we refer interested users to HuggingFace model download/usage statistics as a primary indicator. Third-party applications also have reported model usage statistics. We might also consider releasing aggregate usage statistics of stablevideo.com on reaching some milestones.
|
stable-video-diffusion-img2vid-xt/comparison.png
ADDED
|
Git LFS Details
|
stable-video-diffusion-img2vid-xt/feature_extractor/preprocessor_config.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": {
|
| 3 |
+
"height": 224,
|
| 4 |
+
"width": 224
|
| 5 |
+
},
|
| 6 |
+
"do_center_crop": true,
|
| 7 |
+
"do_convert_rgb": true,
|
| 8 |
+
"do_normalize": true,
|
| 9 |
+
"do_rescale": true,
|
| 10 |
+
"do_resize": true,
|
| 11 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
| 12 |
+
"image_mean": [
|
| 13 |
+
0.48145466,
|
| 14 |
+
0.4578275,
|
| 15 |
+
0.40821073
|
| 16 |
+
],
|
| 17 |
+
"image_processor_type": "CLIPImageProcessor",
|
| 18 |
+
"image_std": [
|
| 19 |
+
0.26862954,
|
| 20 |
+
0.26130258,
|
| 21 |
+
0.27577711
|
| 22 |
+
],
|
| 23 |
+
"resample": 3,
|
| 24 |
+
"rescale_factor": 0.00392156862745098,
|
| 25 |
+
"size": {
|
| 26 |
+
"shortest_edge": 224
|
| 27 |
+
}
|
| 28 |
+
}
|
stable-video-diffusion-img2vid-xt/image_encoder/config.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "/home/suraj_huggingface_co/.cache/huggingface/hub/models--diffusers--svd-xt/snapshots/9703ded20c957c340781ee710b75660826deb487/image_encoder",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"CLIPVisionModelWithProjection"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"dropout": 0.0,
|
| 8 |
+
"hidden_act": "gelu",
|
| 9 |
+
"hidden_size": 1280,
|
| 10 |
+
"image_size": 224,
|
| 11 |
+
"initializer_factor": 1.0,
|
| 12 |
+
"initializer_range": 0.02,
|
| 13 |
+
"intermediate_size": 5120,
|
| 14 |
+
"layer_norm_eps": 1e-05,
|
| 15 |
+
"model_type": "clip_vision_model",
|
| 16 |
+
"num_attention_heads": 16,
|
| 17 |
+
"num_channels": 3,
|
| 18 |
+
"num_hidden_layers": 32,
|
| 19 |
+
"patch_size": 14,
|
| 20 |
+
"projection_dim": 1024,
|
| 21 |
+
"torch_dtype": "float16",
|
| 22 |
+
"transformers_version": "4.34.0.dev0"
|
| 23 |
+
}
|
stable-video-diffusion-img2vid-xt/image_encoder/model.fp16.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae616c24393dd1854372b0639e5541666f7521cbe219669255e865cb7f89466a
|
| 3 |
+
size 1264217240
|
stable-video-diffusion-img2vid-xt/image_encoder/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed1e5af7b4042ca30ec29999a4a5cfcac90b7fb610fd05ace834f2dcbb763eab
|
| 3 |
+
size 2528371296
|
stable-video-diffusion-img2vid-xt/model_index.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "StableVideoDiffusionPipeline",
|
| 3 |
+
"_diffusers_version": "0.24.0.dev0",
|
| 4 |
+
"_name_or_path": "diffusers/svd-xt",
|
| 5 |
+
"feature_extractor": [
|
| 6 |
+
"transformers",
|
| 7 |
+
"CLIPImageProcessor"
|
| 8 |
+
],
|
| 9 |
+
"image_encoder": [
|
| 10 |
+
"transformers",
|
| 11 |
+
"CLIPVisionModelWithProjection"
|
| 12 |
+
],
|
| 13 |
+
"scheduler": [
|
| 14 |
+
"diffusers",
|
| 15 |
+
"EulerDiscreteScheduler"
|
| 16 |
+
],
|
| 17 |
+
"unet": [
|
| 18 |
+
"diffusers",
|
| 19 |
+
"UNetSpatioTemporalConditionModel"
|
| 20 |
+
],
|
| 21 |
+
"vae": [
|
| 22 |
+
"diffusers",
|
| 23 |
+
"AutoencoderKLTemporalDecoder"
|
| 24 |
+
]
|
| 25 |
+
}
|
stable-video-diffusion-img2vid-xt/output_tile.gif
ADDED
|
Git LFS Details
|
stable-video-diffusion-img2vid-xt/scheduler/scheduler_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "EulerDiscreteScheduler",
|
| 3 |
+
"_diffusers_version": "0.24.0.dev0",
|
| 4 |
+
"beta_end": 0.012,
|
| 5 |
+
"beta_schedule": "scaled_linear",
|
| 6 |
+
"beta_start": 0.00085,
|
| 7 |
+
"clip_sample": false,
|
| 8 |
+
"interpolation_type": "linear",
|
| 9 |
+
"num_train_timesteps": 1000,
|
| 10 |
+
"prediction_type": "v_prediction",
|
| 11 |
+
"set_alpha_to_one": false,
|
| 12 |
+
"sigma_max": 700.0,
|
| 13 |
+
"sigma_min": 0.002,
|
| 14 |
+
"skip_prk_steps": true,
|
| 15 |
+
"steps_offset": 1,
|
| 16 |
+
"timestep_spacing": "leading",
|
| 17 |
+
"timestep_type": "continuous",
|
| 18 |
+
"trained_betas": null,
|
| 19 |
+
"use_karras_sigmas": true
|
| 20 |
+
}
|
stable-video-diffusion-img2vid-xt/unet/config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "UNetSpatioTemporalConditionModel",
|
| 3 |
+
"_diffusers_version": "0.24.0.dev0",
|
| 4 |
+
"_name_or_path": "/home/suraj_huggingface_co/.cache/huggingface/hub/models--diffusers--svd-xt/snapshots/9703ded20c957c340781ee710b75660826deb487/unet",
|
| 5 |
+
"addition_time_embed_dim": 256,
|
| 6 |
+
"block_out_channels": [
|
| 7 |
+
320,
|
| 8 |
+
640,
|
| 9 |
+
1280,
|
| 10 |
+
1280
|
| 11 |
+
],
|
| 12 |
+
"cross_attention_dim": 1024,
|
| 13 |
+
"down_block_types": [
|
| 14 |
+
"CrossAttnDownBlockSpatioTemporal",
|
| 15 |
+
"CrossAttnDownBlockSpatioTemporal",
|
| 16 |
+
"CrossAttnDownBlockSpatioTemporal",
|
| 17 |
+
"DownBlockSpatioTemporal"
|
| 18 |
+
],
|
| 19 |
+
"in_channels": 8,
|
| 20 |
+
"layers_per_block": 2,
|
| 21 |
+
"num_attention_heads": [
|
| 22 |
+
5,
|
| 23 |
+
10,
|
| 24 |
+
20,
|
| 25 |
+
20
|
| 26 |
+
],
|
| 27 |
+
"num_frames": 25,
|
| 28 |
+
"out_channels": 4,
|
| 29 |
+
"projection_class_embeddings_input_dim": 768,
|
| 30 |
+
"sample_size": 96,
|
| 31 |
+
"transformer_layers_per_block": 1,
|
| 32 |
+
"up_block_types": [
|
| 33 |
+
"UpBlockSpatioTemporal",
|
| 34 |
+
"CrossAttnUpBlockSpatioTemporal",
|
| 35 |
+
"CrossAttnUpBlockSpatioTemporal",
|
| 36 |
+
"CrossAttnUpBlockSpatioTemporal"
|
| 37 |
+
]
|
| 38 |
+
}
|
stable-video-diffusion-img2vid-xt/unet/diffusion_pytorch_model.fp16.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fbc02e90f37d422f5e3a4aeaee95f6629dc8c45ca211b951626e930daf2bddf
|
| 3 |
+
size 3049435868
|
stable-video-diffusion-img2vid-xt/vae/config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "AutoencoderKLTemporalDecoder",
|
| 3 |
+
"_diffusers_version": "0.24.0.dev0",
|
| 4 |
+
"_name_or_path": "/home/suraj_huggingface_co/.cache/huggingface/hub/models--diffusers--svd-xt/snapshots/9703ded20c957c340781ee710b75660826deb487/vae",
|
| 5 |
+
"block_out_channels": [
|
| 6 |
+
128,
|
| 7 |
+
256,
|
| 8 |
+
512,
|
| 9 |
+
512
|
| 10 |
+
],
|
| 11 |
+
"down_block_types": [
|
| 12 |
+
"DownEncoderBlock2D",
|
| 13 |
+
"DownEncoderBlock2D",
|
| 14 |
+
"DownEncoderBlock2D",
|
| 15 |
+
"DownEncoderBlock2D"
|
| 16 |
+
],
|
| 17 |
+
"force_upcast": true,
|
| 18 |
+
"in_channels": 3,
|
| 19 |
+
"latent_channels": 4,
|
| 20 |
+
"layers_per_block": 2,
|
| 21 |
+
"out_channels": 3,
|
| 22 |
+
"sample_size": 768,
|
| 23 |
+
"scaling_factor": 0.18215
|
| 24 |
+
}
|
stable-video-diffusion-img2vid-xt/vae/diffusion_pytorch_model.fp16.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af602cd0eb4ad6086ec94fbf1438dfb1be5ec9ac03fd0215640854e90d6463a3
|
| 3 |
+
size 195531910
|
stable-video-diffusion-img2vid-xt/vae/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d92aa595a53d9da9faf594f09910ee869d5d567c8bb0362d5095673c69997d6
|
| 3 |
+
size 391017740
|