MichaelT8093 jarvis1997 commited on
Commit
dab7c2d
·
0 Parent(s):

Duplicate from jarvis1997/fr_demo1

Browse files

Co-authored-by: jarvis <jarvis1997@users.noreply.huggingface.co>

Files changed (7) hide show
  1. .gitattributes +27 -0
  2. README.md +13 -0
  3. app.py +120 -0
  4. packages.txt +3 -0
  5. requirements.txt +26 -0
  6. requirements.txt +25 -0
  7. style.css +19 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.onnx filter=lfs diff=lfs merge=lfs -text
13
+ *.ot filter=lfs diff=lfs merge=lfs -text
14
+ *.parquet filter=lfs diff=lfs merge=lfs -text
15
+ *.pb filter=lfs diff=lfs merge=lfs -text
16
+ *.pt filter=lfs diff=lfs merge=lfs -text
17
+ *.pth filter=lfs diff=lfs merge=lfs -text
18
+ *.rar filter=lfs diff=lfs merge=lfs -text
19
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
21
+ *.tflite filter=lfs diff=lfs merge=lfs -text
22
+ *.tgz filter=lfs diff=lfs merge=lfs -text
23
+ *.wasm filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Image Animation Using Thin Plate Spline Motion Model
3
+ emoji: 👁
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.0.19
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: jarvis1997/fr_demo1
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import shutil
4
+ import torch
5
+ from PIL import Image
6
+ import argparse
7
+ import pathlib
8
+
9
+ os.system("git clone https://github.com/yoyo-nb/Thin-Plate-Spline-Motion-Model")
10
+ os.chdir("Thin-Plate-Spline-Motion-Model")
11
+ os.system("mkdir checkpoints")
12
+ os.system("wget -c https://cloud.tsinghua.edu.cn/f/da8d61d012014b12a9e4/?dl=1 -O checkpoints/vox.pth.tar")
13
+
14
+
15
+
16
+ title = "# 表情驱动"
17
+
18
+
19
+ def get_style_image_path(style_name: str) -> str:
20
+ base_path = 'assets'
21
+ filenames = {
22
+ 'source': 'source.png',
23
+ 'driving': 'driving.mp4',
24
+ }
25
+ return f'{base_path}/{filenames[style_name]}'
26
+
27
+
28
+ def get_style_image_markdown_text(style_name: str) -> str:
29
+ url = get_style_image_path(style_name)
30
+ return f'<img id="style-image" src="{url}" alt="style image">'
31
+
32
+
33
+ def update_style_image(style_name: str) -> dict:
34
+ text = get_style_image_markdown_text(style_name)
35
+ return gr.Markdown.update(value=text)
36
+
37
+
38
+ def set_example_image(example: list) -> dict:
39
+ return gr.Image.update(value=example[0])
40
+
41
+ def set_example_video(example: list) -> dict:
42
+ return gr.Video.update(value=example[0])
43
+
44
+ def inference(img,vid):
45
+ if not os.path.exists('temp'):
46
+ os.system('mkdir temp')
47
+
48
+ img.save("temp/image.jpg", "JPEG")
49
+ os.system(f"python demo.py --config config/vox-256.yaml --checkpoint ./checkpoints/vox.pth.tar --source_image 'temp/image.jpg' --driving_video {vid} --result_video './temp/result.mp4' --cpu")
50
+ return './temp/result.mp4'
51
+
52
+
53
+
54
+ def main():
55
+ with gr.Blocks(theme="huggingface", css='style.css') as demo:
56
+
57
+ with gr.Box():
58
+ gr.Markdown('''## Step 1 (Provide Input Face Image)
59
+ - Drop an image containing a face to the **Input Image**.
60
+ - If there are multiple faces in the image, use Edit button in the upper right corner and crop the input image beforehand.
61
+ ''')
62
+ with gr.Row():
63
+ with gr.Column():
64
+ with gr.Row():
65
+ input_image = gr.Image(label='Input Image',
66
+ type="pil")
67
+
68
+ with gr.Row():
69
+ paths = sorted(pathlib.Path('assets').glob('*.png'))
70
+ example_images = gr.Dataset(components=[input_image],
71
+ samples=[[path.as_posix()]
72
+ for path in paths])
73
+
74
+ with gr.Box():
75
+ gr.Markdown('''## Step 2 (Select Driving Video)
76
+ - Select **Style Driving Video for the face image animation**.
77
+ ''')
78
+ with gr.Row():
79
+ with gr.Column():
80
+ with gr.Row():
81
+ driving_video = gr.Video(label='Driving Video',
82
+ format="mp4")
83
+
84
+ with gr.Row():
85
+ paths = sorted(pathlib.Path('assets').glob('*.mp4'))
86
+ example_video = gr.Dataset(components=[driving_video],
87
+ samples=[[path.as_posix()]
88
+ for path in paths])
89
+
90
+ with gr.Box():
91
+ gr.Markdown('''## Step 3 (Generate Animated Image based on the Video)
92
+ - Hit the **Generate** button. (Note: As it runs on the CPU, it takes ~ 3 minutes to generate final results.)
93
+ ''')
94
+ with gr.Row():
95
+ with gr.Column():
96
+ with gr.Row():
97
+ generate_button = gr.Button('Generate')
98
+
99
+ with gr.Column():
100
+ result = gr.Video(type="file", label="Output")
101
+ generate_button.click(fn=inference,
102
+ inputs=[
103
+ input_image,
104
+ driving_video
105
+ ],
106
+ outputs=result)
107
+ example_images.click(fn=set_example_image,
108
+ inputs=example_images,
109
+ outputs=example_images.components)
110
+ example_video.click(fn=set_example_video,
111
+ inputs=example_video,
112
+ outputs=example_video.components)
113
+
114
+ demo.launch(
115
+ enable_queue=True,
116
+ debug=True
117
+ )
118
+
119
+ if __name__ == '__main__':
120
+ main()
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ bzip2
2
+ cmake
3
+ ninja-build
requirements.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cffi==1.14.6
2
+ cycler==0.10.0
3
+ decorator==5.1.0
4
+ face-alignment==1.3.5
5
+ imageio==2.9.0
6
+ imageio-ffmpeg==0.4.5
7
+ kiwisolver==1.3.2
8
+ matplotlib==3.4.3
9
+ networkx==2.6.3
10
+ numpy==1.20.3
11
+ pandas==1.3.3
12
+ Pillow
13
+ pycparser==2.20
14
+ pyparsing==2.4.7
15
+ python-dateutil==2.8.2
16
+ pytz==2021.1
17
+ PyWavelets==1.1.1
18
+ PyYAML==5.4.1
19
+ scikit-image==0.18.3
20
+ scikit-learn==1.0
21
+ scipy==1.7.1
22
+ six==1.16.0
23
+ torch==1.11.0
24
+ torchvision==0.12.0
25
+ tqdm==4.62.3
26
+ gradio
requirements.txt ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cffi==1.14.6
2
+ cycler==0.10.0
3
+ decorator==5.1.0
4
+ face-alignment==1.3.5
5
+ imageio==2.9.0
6
+ imageio-ffmpeg==0.4.5
7
+ kiwisolver==1.3.2
8
+ matplotlib==3.4.3
9
+ networkx==2.6.3
10
+ numpy==1.20.3
11
+ pandas==1.3.3
12
+ Pillow==8.3.2
13
+ pycparser==2.20
14
+ pyparsing==2.4.7
15
+ python-dateutil==2.8.2
16
+ pytz==2021.1
17
+ PyWavelets==1.1.1
18
+ PyYAML==5.4.1
19
+ scikit-image==0.18.3
20
+ scikit-learn==1.0
21
+ scipy==1.7.1
22
+ six==1.16.0
23
+ torch==1.10.0
24
+ torchvision==0.11.0
25
+ tqdm==4.62.3
style.css ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+ img#overview {
5
+ max-width: 1000px;
6
+ max-height: 600px;
7
+ display: block;
8
+ margin: auto;
9
+ }
10
+ img#style-image {
11
+ max-width: 1000px;
12
+ max-height: 600px;
13
+ display: block;
14
+ margin: auto;
15
+ }
16
+ img#visitor-badge {
17
+ display: block;
18
+ margin: auto;
19
+ }