Files changed (1) hide show
  1. app.py +63 -213
app.py CHANGED
@@ -1,223 +1,73 @@
1
- import os, sys
2
- import tempfile
3
  import gradio as gr
4
- from src.gradio_demo import SadTalker
5
- # from src.utils.text2speech import TTSTalker
6
- from huggingface_hub import snapshot_download
7
 
8
- def get_source_image(image):
9
- return image
 
10
 
11
- try:
12
- import webui # in webui
13
- in_webui = True
14
- except:
15
- in_webui = False
16
 
 
 
 
17
 
18
- def toggle_audio_file(choice):
19
- if choice == False:
20
- return gr.update(visible=True), gr.update(visible=False)
21
- else:
22
- return gr.update(visible=False), gr.update(visible=True)
23
-
24
- def ref_video_fn(path_of_ref_video):
25
- if path_of_ref_video is not None:
26
- return gr.update(value=True)
27
- else:
28
- return gr.update(value=False)
29
-
30
- def download_model():
31
- REPO_ID = 'vinthony/SadTalker-V002rc'
32
- snapshot_download(repo_id=REPO_ID, local_dir='./checkpoints', local_dir_use_symlinks=True)
33
-
34
- def sadtalker_demo():
35
-
36
- download_model()
37
-
38
- sad_talker = SadTalker(lazy_load=True)
39
- # tts_talker = TTSTalker()
40
-
41
- with gr.Blocks(analytics_enabled=False) as sadtalker_interface:
42
- gr.Markdown("<div align='center'> <h2> 😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023) </span> </h2> \
43
- <a style='font-size:18px;color: #efefef' href='https://arxiv.org/abs/2211.12194'>Arxiv</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
44
- <a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
45
- <a style='font-size:18px;color: #efefef' href='https://github.com/Winfredy/SadTalker'> Github </div>")
46
-
47
-
48
  gr.Markdown("""
49
- <b>You may duplicate the space and upgrade to GPU in settings for better performance and faster inference without waiting in the queue. <a style='display:inline-block' href="https://huggingface.co/spaces/vinthony/SadTalker?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></b> \
50
- <br/><b>Alternatively, try our GitHub <a href=https://github.com/Winfredy/SadTalker> code </a> on your own GPU. </b> <a style='display:inline-block' href="https://github.com/Winfredy/SadTalker"><img src="https://img.shields.io/github/stars/Winfredy/SadTalker?style=social"/></a> \
51
- """)
52
-
53
- with gr.Row(): #.style(equal_height=False):
54
- with gr.Column(variant='panel'):
55
- with gr.Tabs(elem_id="sadtalker_source_image"):
56
- with gr.TabItem('Source image'):
57
- with gr.Row():
58
- source_image = gr.Image(label="Source image", source="upload", type="filepath", elem_id="img2img_image") # .style(width=512)
59
-
60
-
61
- with gr.Tabs(elem_id="sadtalker_driven_audio"):
62
- with gr.TabItem('Driving Methods'):
63
- gr.Markdown("Possible driving combinations: <br> 1. Audio only 2. Audio/IDLE Mode + Ref Video(pose, blink, pose+blink) 3. IDLE Mode only 4. Ref Video only (all) ")
64
-
65
- with gr.Row():
66
- driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath", max_length=180) # 180s
67
- driven_audio_no = gr.Audio(label="Use IDLE mode, no audio is required", source="upload", type="filepath", visible=False)
68
-
69
- with gr.Column():
70
- use_idle_mode = gr.Checkbox(label="Use Idle Animation")
71
- length_of_audio = gr.Number(value=5, label="The length(seconds) of the generated video.")
72
- use_idle_mode.change(toggle_audio_file, inputs=use_idle_mode, outputs=[driven_audio, driven_audio_no]) # todo
73
-
74
- with gr.Row():
75
- ref_video = gr.Video(label="Reference Video", source="upload", type="filepath", elem_id="vidref") # .style(width=512)
76
-
77
- with gr.Column():
78
- use_ref_video = gr.Checkbox(label="Use Reference Video")
79
- ref_info = gr.Radio(['pose', 'blink','pose+blink', 'all'], value='pose', label='Reference Video',info="How to borrow from reference Video?((fully transfer, aka, video driving mode))")
80
 
81
- ref_video.change(ref_video_fn, inputs=ref_video, outputs=[use_ref_video]) # todo
82
-
83
-
84
- with gr.Column(variant='panel'):
85
- with gr.Tabs(elem_id="sadtalker_checkbox"):
86
- with gr.TabItem('Settings'):
87
- gr.Markdown("need help? please visit our [[best practice page](https://github.com/OpenTalker/SadTalker/blob/main/docs/best_practice.md)] for more detials")
88
- with gr.Column(variant='panel'):
89
- # width = gr.Slider(minimum=64, elem_id="img2img_width", maximum=2048, step=8, label="Manually Crop Width", value=512) # img2img_width
90
- # height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
91
- with gr.Row():
92
- pose_style = gr.Slider(minimum=0, maximum=45, step=1, label="Pose style", value=0) #
93
- exp_weight = gr.Slider(minimum=0, maximum=3, step=0.1, label="expression scale", value=1) #
94
- blink_every = gr.Checkbox(label="use eye blink", value=True)
95
-
96
- with gr.Row():
97
- size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="use 256/512 model?") #
98
- preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="How to handle input image?")
99
-
100
- with gr.Row():
101
- is_still_mode = gr.Checkbox(label="Still Mode (fewer head motion, works with preprocess `full`)")
102
- facerender = gr.Radio(['facevid2vid','pirender'], value='facevid2vid', label='facerender', info="which face render?")
103
-
104
- with gr.Row():
105
- batch_size = gr.Slider(label="batch size in generation", step=1, maximum=10, value=1)
106
- enhancer = gr.Checkbox(label="GFPGAN as Face enhancer")
107
-
108
- submit = gr.Button('Generate', elem_id="sadtalker_generate", variant='primary')
109
-
110
- with gr.Tabs(elem_id="sadtalker_genearted"):
111
- gen_video = gr.Video(label="Generated video", format="mp4", scale=1) # .style(width=256)
112
-
113
-
114
-
115
- submit.click(
116
- fn=sad_talker.test,
117
- inputs=[source_image,
118
- driven_audio,
119
- preprocess_type,
120
- is_still_mode,
121
- enhancer,
122
- batch_size,
123
- size_of_image,
124
- pose_style,
125
- facerender,
126
- exp_weight,
127
- use_ref_video,
128
- ref_video,
129
- ref_info,
130
- use_idle_mode,
131
- length_of_audio,
132
- blink_every
133
- ],
134
- outputs=[gen_video],
135
- )
136
 
137
  with gr.Row():
138
- examples = [
139
- [
140
- 'examples/source_image/full_body_1.png',
141
- 'examples/driven_audio/bus_chinese.wav',
142
- 'crop',
143
- True,
144
- False
145
- ],
146
- [
147
- 'examples/source_image/full_body_2.png',
148
- 'examples/driven_audio/japanese.wav',
149
- 'crop',
150
- False,
151
- False
152
- ],
153
- [
154
- 'examples/source_image/full3.png',
155
- 'examples/driven_audio/deyu.wav',
156
- 'crop',
157
- False,
158
- True
159
- ],
160
- [
161
- 'examples/source_image/full4.jpeg',
162
- 'examples/driven_audio/eluosi.wav',
163
- 'full',
164
- False,
165
- True
166
- ],
167
- [
168
- 'examples/source_image/full4.jpeg',
169
- 'examples/driven_audio/imagine.wav',
170
- 'full',
171
- True,
172
- True
173
- ],
174
- [
175
- 'examples/source_image/full_body_1.png',
176
- 'examples/driven_audio/bus_chinese.wav',
177
- 'full',
178
- True,
179
- False
180
- ],
181
- [
182
- 'examples/source_image/art_13.png',
183
- 'examples/driven_audio/fayu.wav',
184
- 'resize',
185
- True,
186
- False
187
- ],
188
- [
189
- 'examples/source_image/art_5.png',
190
- 'examples/driven_audio/chinese_news.wav',
191
- 'resize',
192
- False,
193
- False
194
- ],
195
- [
196
- 'examples/source_image/art_5.png',
197
- 'examples/driven_audio/RD_Radio31_000.wav',
198
- 'resize',
199
- True,
200
- True
201
- ],
202
- ]
203
- gr.Examples(examples=examples,
204
- inputs=[
205
- source_image,
206
- driven_audio,
207
- preprocess_type,
208
- is_still_mode,
209
- enhancer],
210
- outputs=[gen_video],
211
- fn=sad_talker.test,
212
- cache_examples=os.getenv('SYSTEM') == 'spaces') #
213
-
214
- return sadtalker_interface
215
-
216
-
217
- if __name__ == "__main__":
218
-
219
- demo = sadtalker_demo()
220
- demo.queue(max_size=10, api_open=True)
221
- demo.launch(debug=True)
222
-
223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
 
2
  import gradio as gr
 
 
 
3
 
4
+ # IMPORTANT: SadTalker expects checkpoints in ./checkpoints
5
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
6
+ os.environ["SADTALKER_CHECKPOINTS"] = os.path.join(BASE_DIR, "checkpoints")
7
 
8
+ # SadTalker internal gradio demo class (used in vinthony space)
9
+ from src.gradio_demo import SadTalker
 
 
 
10
 
11
+ def launch_app():
12
+ # Create SadTalker object
13
+ sadtalker = SadTalker(checkpoint_path=os.environ["SADTALKER_CHECKPOINTS"])
14
 
15
+ with gr.Blocks(title="Talking Photo (SadTalker CPU)") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  gr.Markdown("""
17
+ # 🗣️ Talking Photo Generator (SadTalker)
18
+ Upload face image + audio video with lip movement.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ ⚠️ CPU is slow, test with 5–10 sec audio first.
21
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  with gr.Row():
24
+ source_image = gr.Image(type="filepath", label="Face Image (jpg/png)")
25
+ driven_audio = gr.Audio(type="filepath", label="Voice Audio (wav/mp3)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ with gr.Row():
28
+ preprocess = gr.Dropdown(
29
+ choices=["crop", "full"],
30
+ value="crop",
31
+ label="Preprocess (CPU recommended: crop)"
32
+ )
33
+ size = gr.Dropdown(
34
+ choices=["256", "512"],
35
+ value="256",
36
+ label="Output Size (CPU recommended: 256)"
37
+ )
38
+
39
+ generate_btn = gr.Button("🎬 Generate Video", variant="primary")
40
+
41
+ result_video = gr.Video(label="Output Video")
42
+ logs = gr.Textbox(label="Logs", lines=12)
43
+
44
+ def run(image_path, audio_path, preprocess_mode, out_size):
45
+ if not image_path:
46
+ raise gr.Error("Upload image first.")
47
+ if not audio_path:
48
+ raise gr.Error("Upload audio first.")
49
+
50
+ # SadTalker infer (matches internal demo pipeline)
51
+ # returns: (video_path, log)
52
+ return sadtalker.test(
53
+ source_image=image_path,
54
+ driven_audio=audio_path,
55
+ preprocess=preprocess_mode,
56
+ size=int(out_size),
57
+ still=True,
58
+ enhancer=None, # CPU friendly
59
+ background_enhancer=None
60
+ )
61
+
62
+ generate_btn.click(
63
+ fn=run,
64
+ inputs=[source_image, driven_audio, preprocess, size],
65
+ outputs=[result_video, logs],
66
+ queue=True
67
+ )
68
+
69
+ return demo
70
+
71
+
72
+ demo = launch_app()
73
+ demo.queue(max_size=3).launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)