sunseeker001 cleardusk commited on
Commit
c577da8
·
0 Parent(s):

Duplicate from KlingTeam/LivePortrait

Browse files

Co-authored-by: Jianzhu Guo <cleardusk@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ liveportrait/retargeting_models/stitching_retargeting_module.pth filter=lfs diff=lfs merge=lfs -text
37
+ liveportrait/base_models/appearance_feature_extractor.pth filter=lfs diff=lfs merge=lfs -text
38
+ liveportrait/base_models/motion_extractor.pth filter=lfs diff=lfs merge=lfs -text
39
+ liveportrait/base_models/spade_generator.pth filter=lfs diff=lfs merge=lfs -text
40
+ liveportrait/base_models/warping_module.pth filter=lfs diff=lfs merge=lfs -text
41
+ insightface/models/buffalo_l/2d106det.onnx filter=lfs diff=lfs merge=lfs -text
42
+ insightface/models/buffalo_l/det_10g.onnx filter=lfs diff=lfs merge=lfs -text
43
+ liveportrait/landmark.onnx filter=lfs diff=lfs merge=lfs -text
44
+ docs/inference.gif filter=lfs diff=lfs merge=lfs -text
45
+ docs/showcase2.gif filter=lfs diff=lfs merge=lfs -text
46
+ liveportrait_animals/base_models/motion_extractor.pth filter=lfs diff=lfs merge=lfs -text
47
+ liveportrait_animals/base_models/spade_generator.pth filter=lfs diff=lfs merge=lfs -text
48
+ liveportrait_animals/base_models/warping_module.pth filter=lfs diff=lfs merge=lfs -text
49
+ liveportrait_animals/retargeting_models/stitching_retargeting_module.pth filter=lfs diff=lfs merge=lfs -text
50
+ liveportrait_animals/xpose.pth filter=lfs diff=lfs merge=lfs -text
51
+ liveportrait_animals/base_models/appearance_feature_extractor.pth filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ **/__pycache__/
4
+ *.py[cod]
5
+ **/*.py[cod]
6
+ *$py.class
7
+
8
+ # Model weights
9
+ #**/*.pth
10
+ #**/*.onnx
11
+
12
+ # Ipython notebook
13
+ *.ipynb
14
+
15
+ # Temporary files or benchmark resources
16
+ animations/*
17
+ tmp/*
18
+ gradio_cached_examples/
.gitkeep ADDED
File without changes
README.md ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: liveportrait
4
+ pipeline_tag: image-to-video
5
+ ---
6
+
7
+ <h1 align="center">LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control</h1>
8
+
9
+ <div align='center'>
10
+ <a href='https://github.com/cleardusk' target='_blank'><strong>Jianzhu Guo</strong></a><sup> 1*†</sup>&emsp;
11
+ <a href='https://github.com/Mystery099' target='_blank'><strong>Dingyun Zhang</strong></a><sup> 1,2*</sup>&emsp;
12
+ <a href='https://github.com/KwaiVGI' target='_blank'><strong>Xiaoqiang Liu</strong></a><sup> 1</sup>&emsp;
13
+ <a href='https://github.com/zzzweakman' target='_blank'><strong>Zhizhou Zhong</strong></a><sup> 1,3</sup>&emsp;
14
+ <a href='https://scholar.google.com.hk/citations?user=_8k1ubAAAAAJ' target='_blank'><strong>Yuan Zhang</strong></a><sup> 1</sup>&emsp;
15
+ </div>
16
+
17
+ <div align='center'>
18
+ <a href='https://scholar.google.com/citations?user=P6MraaYAAAAJ' target='_blank'><strong>Pengfei Wan</strong></a><sup> 1</sup>&emsp;
19
+ <a href='https://openreview.net/profile?id=~Di_ZHANG3' target='_blank'><strong>Di Zhang</strong></a><sup> 1</sup>&emsp;
20
+ </div>
21
+
22
+ <div align='center'>
23
+ <sup>1 </sup>Kuaishou Technology&emsp; <sup>2 </sup>University of Science and Technology of China&emsp; <sup>3 </sup>Fudan University&emsp;
24
+ </div>
25
+ <div align='center'>
26
+ <small><sup>*</sup> Equal contributions</small>
27
+ <small><sup>†</sup> Corresponding author</small>
28
+ </div>
29
+
30
+ <div align="center" style="display: flex; justify-content: center; flex-wrap: wrap;">
31
+ <!-- <a href='LICENSE'><img src='https://img.shields.io/badge/license-MIT-yellow'></a> -->
32
+ <a href='https://arxiv.org/pdf/2407.03168'><img src='https://img.shields.io/badge/arXiv-LivePortrait-red'></a>
33
+ <a href='https://liveportrait.github.io'><img src='https://img.shields.io/badge/Project-LivePortrait-green'></a>
34
+ <a href='https://huggingface.co/spaces/KwaiVGI/liveportrait'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
35
+ <a href="https://github.com/KwaiVGI/LivePortrait"><img src="https://img.shields.io/github/stars/KwaiVGI/LivePortrait"></a>
36
+ </div>
37
+ <br>
38
+
39
+ <p align="center">
40
+ <img src="./docs/showcase2.gif" alt="showcase">
41
+ 🔥 For more results, visit our <a href="https://liveportrait.github.io/"><strong>homepage</strong></a> 🔥
42
+ </p>
43
+
44
+
45
+
46
+ ## 🔥 Updates
47
+ - **`2024/08/02`**: 😸 We released a version of the **Animals model**, along with several other updates and improvements. Check out the details [**here**](https://github.com/KwaiVGI/LivePortrait/blob/main/assets/docs/changelog/2024-08-02.md)!
48
+ - **`2024/07/25`**: 📦 Windows users can now download the package from [HuggingFace](https://huggingface.co/cleardusk/LivePortrait-Windows/tree/main) or [BaiduYun](https://pan.baidu.com/s/1FWsWqKe0eNfXrwjEhhCqlw?pwd=86q2). Simply unzip and double-click `run_windows.bat` to enjoy!
49
+ - **`2024/07/24`**: 🎨 We support pose editing for source portraits in the Gradio interface. We’ve also lowered the default detection threshold to increase recall. [Have fun](https://github.com/KwaiVGI/LivePortrait/blob/main/assets/docs/changelog/2024-07-24.md)!
50
+ - **`2024/07/19`**: ✨ We support 🎞️ portrait video editing (aka v2v)! More to see [here](https://github.com/KwaiVGI/LivePortrait/blob/main/assets/docs/changelog/2024-07-19.md).
51
+ - **`2024/07/17`**: 🍎 We support macOS with Apple Silicon, modified from [jeethu](https://github.com/jeethu)'s PR [#143](https://github.com/KwaiVGI/LivePortrait/pull/143).
52
+ - **`2024/07/10`**: 💪 We support audio and video concatenating, driving video auto-cropping, and template making to protect privacy. More to see [here](https://github.com/KwaiVGI/LivePortrait/blob/main/assets/docs/changelog/2024-07-10.md).
53
+ - **`2024/07/09`**: 🤗 We released the [HuggingFace Space](https://huggingface.co/spaces/KwaiVGI/liveportrait), thanks to the HF team and [Gradio](https://github.com/gradio-app/gradio)!
54
+ - **`2024/07/04`**: 😊 We released the initial version of the inference code and models. Continuous updates, stay tuned!
55
+ - **`2024/07/04`**: 🔥 We released the [homepage](https://liveportrait.github.io) and technical report on [arXiv](https://arxiv.org/pdf/2407.03168).
56
+
57
+
58
+ ## Introduction 📖
59
+ This repo, named **LivePortrait**, contains the official PyTorch implementation of our paper [LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control](https://arxiv.org/pdf/2407.03168).
60
+ We are actively updating and improving this repository. If you find any bugs or have suggestions, welcome to raise issues or submit pull requests (PR) 💖.
61
+
62
+ ## Getting Started 🏁
63
+ ### 1. Clone the code and prepare the environment
64
+ ```bash
65
+ git clone https://github.com/KwaiVGI/LivePortrait
66
+ cd LivePortrait
67
+
68
+ # create env using conda
69
+ conda create -n LivePortrait python==3.9
70
+ conda activate LivePortrait
71
+
72
+ # install dependencies with pip
73
+ # for Linux and Windows users
74
+ pip install -r requirements.txt
75
+ # for macOS with Apple Silicon users
76
+ pip install -r requirements_macOS.txt
77
+ ```
78
+
79
+ **Note:** make sure your system has [FFmpeg](https://ffmpeg.org/download.html) installed, including both `ffmpeg` and `ffprobe`!
80
+
81
+ ### 2. Download pretrained weights
82
+
83
+ The easiest way to download the pretrained weights is from HuggingFace:
84
+ ```bash
85
+ # first, ensure git-lfs is installed, see: https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage
86
+ git lfs install
87
+ # clone and move the weights
88
+ git clone https://huggingface.co/KwaiVGI/LivePortrait temp_pretrained_weights
89
+ mv temp_pretrained_weights/* pretrained_weights/
90
+ rm -rf temp_pretrained_weights
91
+ ```
92
+
93
+ Alternatively, you can download all pretrained weights from [Google Drive](https://drive.google.com/drive/folders/1UtKgzKjFAOmZkhNK-OYT0caJ_w2XAnib) or [Baidu Yun](https://pan.baidu.com/s/1MGctWmNla_vZxDbEp2Dtzw?pwd=z5cn). Unzip and place them in `./pretrained_weights`.
94
+
95
+ Ensuring the directory structure is as follows, or contains:
96
+ ```text
97
+ pretrained_weights
98
+ ├── insightface
99
+ │ └── models
100
+ │ └── buffalo_l
101
+ │ ├── 2d106det.onnx
102
+ │ └── det_10g.onnx
103
+ └── liveportrait
104
+ ├── base_models
105
+ │ ├── appearance_feature_extractor.pth
106
+ │ ├── motion_extractor.pth
107
+ │ ├── spade_generator.pth
108
+ │ └── warping_module.pth
109
+ ├── landmark.onnx
110
+ └── retargeting_models
111
+ └── stitching_retargeting_module.pth
112
+ ```
113
+
114
+ ### 3. Inference 🚀
115
+
116
+ #### Fast hands-on
117
+ ```bash
118
+ # For Linux and Windows
119
+ python inference.py
120
+
121
+ # For macOS with Apple Silicon, Intel not supported, this maybe 20x slower than RTX 4090
122
+ PYTORCH_ENABLE_MPS_FALLBACK=1 python inference.py
123
+ ```
124
+
125
+ If the script runs successfully, you will get an output mp4 file named `animations/s6--d0_concat.mp4`. This file includes the following results: driving video, input image or video, and generated result.
126
+
127
+ <p align="center">
128
+ <img src="./docs/inference.gif" alt="image">
129
+ </p>
130
+
131
+ Or, you can change the input by specifying the `-s` and `-d` arguments:
132
+
133
+ ```bash
134
+ # source input is an image
135
+ python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d0.mp4
136
+
137
+ # source input is a video ✨
138
+ python inference.py -s assets/examples/source/s13.mp4 -d assets/examples/driving/d0.mp4
139
+
140
+ # more options to see
141
+ python inference.py -h
142
+ ```
143
+
144
+ #### Driving video auto-cropping 📢📢📢
145
+ To use your own driving video, we **recommend**: ⬇️
146
+ - Crop it to a **1:1** aspect ratio (e.g., 512x512 or 256x256 pixels), or enable auto-cropping by `--flag_crop_driving_video`.
147
+ - Focus on the head area, similar to the example videos.
148
+ - Minimize shoulder movement.
149
+ - Make sure the first frame of driving video is a frontal face with **neutral expression**.
150
+
151
+ Below is a auto-cropping case by `--flag_crop_driving_video`:
152
+ ```bash
153
+ python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d13.mp4 --flag_crop_driving_video
154
+ ```
155
+
156
+ If you find the results of auto-cropping is not well, you can modify the `--scale_crop_driving_video`, `--vy_ratio_crop_driving_video` options to adjust the scale and offset, or do it manually.
157
+
158
+ #### Motion template making
159
+ You can also use the auto-generated motion template files ending with `.pkl` to speed up inference, and **protect privacy**, such as:
160
+ ```bash
161
+ python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d5.pkl # portrait animation
162
+ python inference.py -s assets/examples/source/s13.mp4 -d assets/examples/driving/d5.pkl # portrait video editing
163
+ ```
164
+
165
+ ### 4. Gradio interface 🤗
166
+
167
+ We also provide a Gradio <a href='https://github.com/gradio-app/gradio'><img src='https://img.shields.io/github/stars/gradio-app/gradio'></a> interface for a better experience, just run by:
168
+
169
+ ```bash
170
+ # For Linux and Windows users (and macOS with Intel??)
171
+ python app.py
172
+
173
+ # For macOS with Apple Silicon users, Intel not supported, this maybe 20x slower than RTX 4090
174
+ PYTORCH_ENABLE_MPS_FALLBACK=1 python app.py
175
+ ```
176
+
177
+ You can specify the `--server_port`, `--share`, `--server_name` arguments to satisfy your needs!
178
+
179
+ 🚀 We also provide an acceleration option `--flag_do_torch_compile`. The first-time inference triggers an optimization process (about one minute), making subsequent inferences 20-30% faster. Performance gains may vary with different CUDA versions.
180
+ ```bash
181
+ # enable torch.compile for faster inference
182
+ python app.py --flag_do_torch_compile
183
+ ```
184
+ **Note**: This method is not supported on Windows and macOS.
185
+
186
+ **Or, try it out effortlessly on [HuggingFace](https://huggingface.co/spaces/KwaiVGI/LivePortrait) 🤗**
187
+
188
+ ### 5. Inference speed evaluation 🚀🚀🚀
189
+ We have also provided a script to evaluate the inference speed of each module:
190
+
191
+ ```bash
192
+ # For NVIDIA GPU
193
+ python speed.py
194
+ ```
195
+
196
+ Below are the results of inferring one frame on an RTX 4090 GPU using the native PyTorch framework with `torch.compile`:
197
+
198
+ | Model | Parameters(M) | Model Size(MB) | Inference(ms) |
199
+ |-----------------------------------|:-------------:|:--------------:|:-------------:|
200
+ | Appearance Feature Extractor | 0.84 | 3.3 | 0.82 |
201
+ | Motion Extractor | 28.12 | 108 | 0.84 |
202
+ | Spade Generator | 55.37 | 212 | 7.59 |
203
+ | Warping Module | 45.53 | 174 | 5.21 |
204
+ | Stitching and Retargeting Modules | 0.23 | 2.3 | 0.31 |
205
+
206
+ *Note: The values for the Stitching and Retargeting Modules represent the combined parameter counts and total inference time of three sequential MLP networks.*
207
+
208
+ ## Community Resources 🤗
209
+
210
+ Discover the invaluable resources contributed by our community to enhance your LivePortrait experience:
211
+
212
+ - [ComfyUI-LivePortraitKJ](https://github.com/kijai/ComfyUI-LivePortraitKJ) by [@kijai](https://github.com/kijai)
213
+ - [comfyui-liveportrait](https://github.com/shadowcz007/comfyui-liveportrait) by [@shadowcz007](https://github.com/shadowcz007)
214
+ - [LivePortrait In ComfyUI](https://www.youtube.com/watch?v=aFcS31OWMjE) by [@Benji](https://www.youtube.com/@TheFutureThinker)
215
+ - [LivePortrait hands-on tutorial](https://www.youtube.com/watch?v=uyjSTAOY7yI) by [@AI Search](https://www.youtube.com/@theAIsearch)
216
+ - [ComfyUI tutorial](https://www.youtube.com/watch?v=8-IcDDmiUMM) by [@Sebastian Kamph](https://www.youtube.com/@sebastiankamph)
217
+ - [Replicate Playground](https://replicate.com/fofr/live-portrait) and [cog-comfyui](https://github.com/fofr/cog-comfyui) by [@fofr](https://github.com/fofr)
218
+
219
+ And many more amazing contributions from our community!
220
+
221
+ ## Acknowledgements 💐
222
+ We would like to thank the contributors of [FOMM](https://github.com/AliaksandrSiarohin/first-order-model), [Open Facevid2vid](https://github.com/zhanglonghao1992/One-Shot_Free-View_Neural_Talking_Head_Synthesis), [SPADE](https://github.com/NVlabs/SPADE), [InsightFace](https://github.com/deepinsight/insightface) repositories, for their open research and contributions.
223
+
224
+ ## Citation 💖
225
+ If you find LivePortrait useful for your research, welcome to 🌟 this repo and cite our work using the following BibTeX:
226
+ ```bibtex
227
+ @article{guo2024liveportrait,
228
+ title = {LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control},
229
+ author = {Guo, Jianzhu and Zhang, Dingyun and Liu, Xiaoqiang and Zhong, Zhizhou and Zhang, Yuan and Wan, Pengfei and Zhang, Di},
230
+ journal = {arXiv preprint arXiv:2407.03168},
231
+ year = {2024}
232
+ }
233
+ ```
234
+
235
+ *Long live in arXiv.*
236
+
237
+ ## Contact 📧
238
+ [**Jianzhu Guo (郭建珠)**](https://guojianzhu.com); **guojianzhu1994@gmail.com**
docs/inference.gif ADDED

Git LFS Details

  • SHA256: e1316eca5556ba5a8da7c53bcadbc1df26aa822bbde68fbad94813139803d0c6
  • Pointer size: 131 Bytes
  • Size of remote file: 820 kB
docs/showcase2.gif ADDED

Git LFS Details

  • SHA256: eb1fffb139681775780b2956e7d0289f55d199c1a3e14ab263887864d4b0d586
  • Pointer size: 132 Bytes
  • Size of remote file: 2.88 MB
insightface/models/buffalo_l/2d106det.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f001b856447c413801ef5c42091ed0cd516fcd21f2d6b79635b1e733a7109dbf
3
+ size 5030888
insightface/models/buffalo_l/det_10g.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5838f7fe053675b1c7a08b633df49e7af5495cee0493c7dcf6697200b85b5b91
3
+ size 16923827
liveportrait/base_models/appearance_feature_extractor.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5279bb8654293dbdf327030b397f107237dd9212fb11dd75b83dfb635211ceb5
3
+ size 3387959
liveportrait/base_models/motion_extractor.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:251e6a94ad667a1d0c69526d292677165110ef7f0cf0f6d199f0e414e8aa0ca5
3
+ size 112545506
liveportrait/base_models/spade_generator.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4780afc7909a9f84e24c01d73b31a555ef651521a1fe3b2429bd04534d992aee
3
+ size 221813590
liveportrait/base_models/warping_module.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f61a6f265fe344f14132364859a78bdbbc2068577170693da57fb96d636e282
3
+ size 182180086
liveportrait/landmark.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31d22a5041326c31f19b78886939a634a5aedcaa5ab8b9b951a1167595d147db
3
+ size 114666491
liveportrait/retargeting_models/stitching_retargeting_module.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3652d5a3f95099141a56986aaddec92fadf0a73c87a20fac9a2c07c32b28b611
3
+ size 2393098
liveportrait_animals/base_models/appearance_feature_extractor.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2cd1d5d67c0457229e9736d401d39225e096895b869f34234978082561af6de
3
+ size 3387959
liveportrait_animals/base_models/motion_extractor.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63c0d450099ef6ebece788ab711cb012509712e23fd1200b79fb65ef980adbb9
3
+ size 112545506
liveportrait_animals/base_models/spade_generator.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fafa1e31c7c72c9384310d679e32af3fbf214e241fb657df8c3b18ad826f336
3
+ size 221813590
liveportrait_animals/base_models/warping_module.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9719ea184ca9da059f4eee8a8c8c7c6bd46a2b1e40a241ea5490cc42ce6b79b
3
+ size 182180086
liveportrait_animals/base_models_v1.1/appearance_feature_extractor.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e320d545579caa83c7b094cef8b7b43fe92a2e410c219ffa97b08be549f45bf
3
+ size 3386487
liveportrait_animals/base_models_v1.1/motion_extractor.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:827d0ea4c56ff252dba50feece3bc62ced365ecae5edb86db07eb71b2f39a696
3
+ size 112542562
liveportrait_animals/base_models_v1.1/spade_generator.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ac31a9b608f3920ec41a402b1c4e29d22007dafd79a855f204aae9307039445
3
+ size 221811030
liveportrait_animals/base_models_v1.1/warping_module.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dfd251dc6b3a1baefebfc658f5bb2a2c565649cdb9aa75032591e824a0bfcee
3
+ size 182178742
liveportrait_animals/retargeting_models/stitching_retargeting_module.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3652d5a3f95099141a56986aaddec92fadf0a73c87a20fac9a2c07c32b28b611
3
+ size 2393098
liveportrait_animals/xpose.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf58e5a3c4a3a017198edc69e33f89c9a37adc856fe6b1776059b2d4a524a7dd
3
+ size 435089171