Diffusers
Safetensors
bodhisativa commited on
Commit
5d611d7
·
verified ·
1 Parent(s): 43ee85e

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ <h1 align='center'>EchoMimicV3: 1.3B Parameters are All You Need for Unified Multi-Modal and Multi-Task Human Animation</h1>
5
+
6
+ <div align='center'>
7
+ <a href='https://github.com/mengrang' target='_blank'>Rang Meng</a><sup>1</sup>&emsp;
8
+ <a href='https://github.com/' target='_blank'>Yan Wang</a>&emsp;
9
+ <a href='https://github.com/' target='_blank'>Weipeng Wu</a>&emsp;
10
+ <a href='https://github.com/' target='_blank'>Ruobing Zheng</a>&emsp;
11
+ <a href='https://lymhust.github.io/' target='_blank'>Yuming Li</a><sup>2</sup>&emsp;
12
+ <a href='https://openreview.net/profile?id=~Chenguang_Ma3' target='_blank'>Chenguang Ma</a><sup>2</sup>
13
+ </div>
14
+ <div align='center'>
15
+ Terminal Technology Department, Alipay, Ant Group.
16
+ </div>
17
+ <p align='center'>
18
+ <sup>1</sup>Core Contributor&emsp;
19
+ <sup>2</sup>Corresponding Authors
20
+ </p>
21
+ <div align='center'>
22
+ <a href='https://antgroup.github.io/ai/echomimic_v3/'><img src='https://img.shields.io/badge/Project-Page-blue'></a>
23
+ <!-- <a href='https://huggingface.co/BadToBest/EchoMimicV3'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20HuggingFace-Model-yellow'></a> -->
24
+ <!--<a href='https://antgroup.github.io/ai/echomimic_v2/'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20HuggingFace-Demo-yellow'></a>-->
25
+ <!-- <a href='https://modelscope.cn/models/BadToBest/EchoMimicV3'><img src='https://img.shields.io/badge/ModelScope-Model-purple'></a> -->
26
+ <!--<a href='https://antgroup.github.io/ai/echomimic_v2/'><img src='https://img.shields.io/badge/ModelScope-Demo-purple'></a>-->
27
+ <a href='https://arxiv.org/abs/2507.03905'><img src='https://img.shields.io/badge/Paper-Arxiv-red'></a>
28
+ <!-- <a href='https://openaccess.thecvf.com/content/CVPR2025/papers/Meng_EchoMimicV2_Towards_Striking_Simplified_and_Semi-Body_Human_Animation_CVPR_2025_paper.pdf'><img src='https://img.shields.io/badge/Paper-CVPR2025-blue'></a> -->
29
+ <!-- <a href='https://github.com/antgroup/echomimic_v2/blob/main/assets/halfbody_demo/wechat_group.png'><img src='https://badges.aleen42.com/src/wechat.svg'></a> -->
30
+ </div>
31
+ <!-- <div align='center'>
32
+ <a href='https://github.com/antgroup/echomimic_v3/discussions/0'><img src='https://img.shields.io/badge/English-Common Problems-orange'></a>
33
+ <a href='https://github.com/antgroup/echomimic_v3/discussions/1'><img src='https://img.shields.io/badge/中文版-常见问题汇总-orange'></a>
34
+ </div> -->
35
+
36
+ ## &#x1F680; EchoMimic Series
37
+ * EchoMimicV3: 1.3B Parameters are All You Need for Unified Multi-Modal and Multi-Task Human Animation. [GitHub](https://github.com/antgroup/echomimic_v3)
38
+ * EchoMimicV2: Towards Striking, Simplified, and Semi-Body Human Animation. [GitHub](https://github.com/antgroup/echomimic_v2)
39
+ * EchoMimicV1: Lifelike Audio-Driven Portrait Animations through Editable Landmark Conditioning. [GitHub](https://github.com/antgroup/echomimic)
40
+
41
+
42
+ ## &#x1F4E3; Updates
43
+ <!-- * [2025.02.27] 🔥 EchoMimicV2 is accepted by CVPR 2025.
44
+ * [2025.01.16] 🔥 Please check out the [discussions](https://github.com/antgroup/echomimic_v2/discussions) to learn how to start EchoMimicV2.
45
+ * [2025.01.16] 🚀🔥 [GradioUI for Accelerated EchoMimicV2](https://github.com/antgroup/echomimic_v2/blob/main/app_acc.py) is now available.
46
+ * [2025.01.03] 🚀🔥 **One Minute is All You Need to Generate Video**. [Accelerated EchoMimicV2](https://github.com/antgroup/echomimic_v2/blob/main/infer_acc.py) are released. The inference speed can be improved by 9x (from ~7mins/120frames to ~50s/120frames on A100 GPU).
47
+ * [2024.12.16] 🔥 [RefImg-Pose Alignment Demo](https://github.com/antgroup/echomimic_v2/blob/main/demo.ipynb) is now available, which involves aligning reference image, extracting pose from driving video, and generating video.
48
+ * [2024.11.27] 🔥 [Installation tutorial](https://www.youtube.com/watch?v=2ab6U1-nVTQ) is now available. Thanks [AiMotionStudio](https://www.youtube.com/@AiMotionStudio) for the contribution.
49
+ * [2024.11.22] 🔥 [GradioUI](https://github.com/antgroup/echomimic_v2/blob/main/app.py) is now available. Thanks @gluttony-10 for the contribution.
50
+ * [2024.11.22] 🔥 [ComfyUI](https://github.com/smthemex/ComfyUI_EchoMimic) is now available. Thanks @smthemex for the contribution.
51
+ * [2024.11.21] 🔥 We release the EMTD dataset list and processing scripts.
52
+ * [2024.11.21] 🔥 We release our [EchoMimicV2](https://github.com/antgroup/echomimic_v2) codes and models. -->
53
+ <!-- * [2025.08.08] 🔥 We release our [codes](https://arxiv.org/abs/2507.03905). -->
54
+ * [2025.07.08] 🔥 Our [paper](https://arxiv.org/abs/2507.03905) is in public on arxiv.
55
+
56
+ ## &#x1F305; Gallery
57
+ <p align="center">
58
+ <img src="asset/echomimicv3.jpg" height=700>
59
+ </p>
60
+ <table class="center">
61
+ <tr>
62
+ <td width=100% style="border: none">
63
+ <video controls loop src="https://github.com/user-attachments/assets/f33edb30-66b1-484b-8be0-a5df20a44f3b" muted="false"></video>
64
+ </td>
65
+ </tr>
66
+ </table>
67
+ For more demo videos, please refer to the project page.
68
+
69
+ ## Quick Start
70
+ ### Environment Setup
71
+ - Tested System Environment: Centos 7.2/Ubuntu 22.04, Cuda >= 12.1
72
+ - Tested GPUs: A100(80G) / RTX4090D (24G) / V100(16G)
73
+ - Tested Python Version: 3.10 / 3.11
74
+
75
+ ### 🛠️Installation
76
+ #### 1. Create a conda environment and install pytorch, xformers
77
+ ```
78
+ conda create -n echomimic_v3 python=3.10
79
+ conda activate echomimic_v3
80
+ ```
81
+
82
+ #### 2. Other dependencies
83
+ ```
84
+ pip install -r requirements.txt
85
+ ```
86
+ ### 🧱Model Preparation
87
+
88
+ | Models | Download Link | Notes |
89
+ | --------------|-------------------------------------------------------------------------------|-------------------------------|
90
+ | Wan2.1-Fun-1.3B-InP | 🤗 [Huggingface](https://huggingface.co/spaces/alibaba-pai/Wan2.1-Fun-1.3B-InP) | Base model
91
+ | wav2vec2-base | 🤗 [Huggingface](https://huggingface.co/facebook/wav2vec2-base-960h) | Audio encoder
92
+ | EchoMimicV3 | 🤗 [Huggingface](https://huggingface.co/BadToBest/EchoMimicV3) | Our weights
93
+
94
+ -- The **weights** is organized as follows.
95
+
96
+ ```
97
+ ./models/
98
+ ├── Wan2.1-Fun-1.3B-InP
99
+ ├── wav2vec2-base-960h
100
+ └── transformer
101
+ └── diffusion_pytorch_model.safetensors
102
+ ### 🔑 Quick Inference
103
+ ```
104
+ python infer.py
105
+ ```
106
+ > Tips
107
+ > - Audio CFG: Audio CFG works optimally between 2~3. Increase the audio CFG value for better lip synchronization, while decreasing the audio CFG value can improve the visual quality.
108
+ > - Text CFG: Text CFG works optimally between 4~6. Increase the text CFG value for better prompt following, while decreasing the text CFG value can improve the visual quality.
109
+ > - TeaCache: The optimal range for `--teacache_thresh` is between 0~0.1.
110
+ > - Sampling steps: 5 steps for talking head, 15~25 steps for talking body.
111
+ > - ​Long video generation: If you want to generate a video longer than 138 frames, you can use Long Video CFG.
112
+
113
+
114
+ ## 📝 TODO List
115
+ | Status | Milestone |
116
+ |:--------:|:-------------------------------------------------------------------------|
117
+ | 2025.08.08 | The inference code of EchoMimicV3 meet everyone on GitHub |
118
+ | 🚀 | Preview version Pretrained models trained on English and Chinese on HuggingFace |
119
+ | 🚀 | Preview version Pretrained models trained on English and Chinese on ModelScope |
120
+ | 🚀 | 720P Pretrained models trained on English and Chinese on HuggingFace |
121
+ | 🚀 | 720P Pretrained models trained on English and Chinese on ModelScope |
122
+ | 🚀 | The training code of EchoMimicV3 meet everyone on GitHub |
123
+
124
+
125
+
126
+ ## &#x1F4D2; Citation
127
+
128
+ If you find our work useful for your research, please consider citing the paper :
129
+
130
+ ```
131
+ @misc{meng2025echomimicv3,
132
+ title={EchoMimicV3: 1.3B Parameters are All You Need for Unified Multi-Modal and Multi-Task Human Animation},
133
+ author={Rang Meng, Yan Wang, Weipeng Wu, Ruobing Zheng, Yuming Li, Chenguang Ma},
134
+ year={2025},
135
+ eprint={2507.03905},
136
+ archivePrefix={arXiv}
137
+ }
138
+ ```
139
+
140
+ ## &#x1F31F; Star History
141
+ [![Star History Chart](https://api.star-history.com/svg?repos=antgroup/echomimic_v3&type=Date)](https://star-history.com/#antgroup/echomimic_v3&Date)
echomimicv3-flash-pro/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "WanTransformerAudioMask3DModel",
3
+ "_diffusers_version": "0.31.0",
4
+ "add_control_adapter": false,
5
+ "add_ref_conv": false,
6
+ "cross_attn_norm": true,
7
+ "dim": 1536,
8
+ "eps": 1e-06,
9
+ "ffn_dim": 8960,
10
+ "freq_dim": 256,
11
+ "hidden_size": 1536,
12
+ "in_channels": 36,
13
+ "in_dim": 36,
14
+ "in_dim_control_adapter": 24,
15
+ "in_dim_ref_conv": 16,
16
+ "model_type": "i2v",
17
+ "num_heads": 12,
18
+ "num_layers": 30,
19
+ "out_dim": 16,
20
+ "patch_size": [
21
+ 1,
22
+ 2,
23
+ 2
24
+ ],
25
+ "qk_norm": true,
26
+ "text_dim": 4096,
27
+ "text_len": 512,
28
+ "window_size": [
29
+ -1,
30
+ -1
31
+ ]
32
+ }
echomimicv3-flash-pro/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ebdbb2fc709108bf2a1728fd92eb2874804e4bc0324e92a2cd55425968c85a4
3
+ size 3727671120
transformer/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "WanTransformerAudioMask3DModel",
3
+ "_diffusers_version": "0.32.2",
4
+ "add_control_adapter": false,
5
+ "add_ref_conv": false,
6
+ "cross_attn_norm": true,
7
+ "dim": 1536,
8
+ "eps": 1e-06,
9
+ "ffn_dim": 8960,
10
+ "freq_dim": 256,
11
+ "hidden_size": 1536,
12
+ "in_channels": 36,
13
+ "in_dim": 36,
14
+ "in_dim_control_adapter": 24,
15
+ "in_dim_ref_conv": 16,
16
+ "model_type": "i2v",
17
+ "num_heads": 12,
18
+ "num_layers": 30,
19
+ "out_dim": 16,
20
+ "patch_size": [
21
+ 1,
22
+ 2,
23
+ 2
24
+ ],
25
+ "qk_norm": true,
26
+ "text_dim": 4096,
27
+ "text_len": 512,
28
+ "window_size": [
29
+ -1,
30
+ -1
31
+ ]
32
+ }
transformer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba25a75c7511ba57a346480d50567b57270282c1f3ab121558c07b65a97adf1f
3
+ size 3414541616