Oviya commited on
Commit
ad8f099
·
1 Parent(s): 35698c9

add app.py

Browse files
Files changed (2) hide show
  1. app.py +70 -32
  2. requirements.txt +5 -0
app.py CHANGED
@@ -1,53 +1,62 @@
1
  import os
 
 
2
  import uuid
3
- import gradio as gr
4
- from wyn_wav2lip.wav2lip import Wav2Lip
5
 
6
- MEDIA_DIR = "media"
7
- os.makedirs(MEDIA_DIR, exist_ok=True)
 
 
 
8
 
9
- wav2lip = Wav2Lip()
10
- wav2lip.setup()
 
11
 
12
- def lipsync_func(image, audio):
13
- # image: PIL.Image
14
- # audio: tuple (sample_rate, data) or filepath depending on Gradio component
15
 
16
- # Save inputs
17
- image_path = os.path.join(MEDIA_DIR, f"{uuid.uuid4()}.png")
18
- audio_path = os.path.join(MEDIA_DIR, f"{uuid.uuid4()}.wav")
19
 
20
- image.save(image_path)
 
21
 
22
- import soundfile as sf
23
- sr, data = audio # if using gr.Audio(type="numpy")
24
- sf.write(audio_path, data, sr)
 
 
 
25
 
26
- # Run Wav2Lip like we did before
27
- # (Use the same logic as in the Colab example)
28
- video_path = run_wav2lip(image_path, audio_path)
 
 
 
 
 
29
 
30
- return video_path
31
 
32
- def run_wav2lip(image_path, audio_path):
33
- # similar to the Colab sample before
34
  existing_mp4 = {
35
  f for f in os.listdir(MEDIA_DIR)
36
  if f.lower().endswith(".mp4")
37
  }
38
 
39
- import os
40
- from os import getcwd, chdir
41
-
42
- old = getcwd()
43
- chdir(MEDIA_DIR)
44
  try:
 
45
  wav2lip.run(
46
  video_file=os.path.basename(image_path),
47
  vocal_file=os.path.basename(audio_path),
48
  )
49
  finally:
50
- chdir(old)
51
 
52
  new_mp4 = [
53
  f for f in os.listdir(MEDIA_DIR)
@@ -60,16 +69,45 @@ def run_wav2lip(image_path, audio_path):
60
  if f.lower().endswith(".mp4")
61
  ]
62
  if not mp4_candidates:
63
- raise RuntimeError("No video created")
64
  return max(mp4_candidates, key=os.path.getmtime)
 
65
  return os.path.join(MEDIA_DIR, new_mp4[0])
66
 
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  demo = gr.Interface(
69
  fn=lipsync_func,
70
- inputs=[gr.Image(type="pil"), gr.Audio(type="numpy")],
71
- outputs=gr.Video(),
72
- title="Wav2Lip Lipsync Service"
 
 
 
 
73
  )
74
 
75
  if __name__ == "__main__":
 
1
  import os
2
+ import sys
3
+ import types
4
  import uuid
 
 
5
 
6
+ # --------------------------------------------------------------------
7
+ # Fix environment issues
8
+ # --------------------------------------------------------------------
9
+ # Fix OMP_NUM_THREADS error
10
+ os.environ["OMP_NUM_THREADS"] = "1"
11
 
12
+ # Dummy google.colab for wyn_wav2lip
13
+ google = types.ModuleType("google")
14
+ colab = types.ModuleType("google.colab")
15
 
16
+ class _DummyDrive:
17
+ def mount(self, *args, **kwargs):
18
+ print("google.colab.drive.mount() called (dummy).")
19
 
20
+ colab.drive = _DummyDrive()
21
+ google.colab = colab
 
22
 
23
+ sys.modules["google"] = google
24
+ sys.modules["google.colab"] = colab
25
 
26
+ # --------------------------------------------------------------------
27
+ # Imports
28
+ # --------------------------------------------------------------------
29
+ import gradio as gr
30
+ from wyn_wav2lip.wav2lip import Wav2Lip
31
+ import soundfile as sf
32
 
33
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
34
+ MEDIA_DIR = os.path.join(BASE_DIR, "media")
35
+ os.makedirs(MEDIA_DIR, exist_ok=True)
36
+
37
+ print("Initialising Wav2Lip...")
38
+ wav2lip = Wav2Lip()
39
+ wav2lip.setup()
40
+ print("Wav2Lip ready.")
41
 
 
42
 
43
+ def run_wav2lip(image_path: str, audio_path: str) -> str:
44
+ """Run Wav2Lip and return absolute path to created video file."""
45
  existing_mp4 = {
46
  f for f in os.listdir(MEDIA_DIR)
47
  if f.lower().endswith(".mp4")
48
  }
49
 
50
+ old_cwd = os.getcwd()
51
+ os.chdir(MEDIA_DIR)
 
 
 
52
  try:
53
+ # Wav2Lip expects filenames relative to current working dir
54
  wav2lip.run(
55
  video_file=os.path.basename(image_path),
56
  vocal_file=os.path.basename(audio_path),
57
  )
58
  finally:
59
+ os.chdir(old_cwd)
60
 
61
  new_mp4 = [
62
  f for f in os.listdir(MEDIA_DIR)
 
69
  if f.lower().endswith(".mp4")
70
  ]
71
  if not mp4_candidates:
72
+ raise RuntimeError("No MP4 created by Wav2Lip.")
73
  return max(mp4_candidates, key=os.path.getmtime)
74
+
75
  return os.path.join(MEDIA_DIR, new_mp4[0])
76
 
77
 
78
+ def lipsync_func(image, audio):
79
+ """
80
+ image: PIL image (from gr.Image)
81
+ audio: (sr, data) tuple (from gr.Audio(type="numpy"))
82
+ """
83
+ if image is None or audio is None:
84
+ return None
85
+
86
+ # Save image
87
+ img_id = uuid.uuid4().hex
88
+ image_path = os.path.join(MEDIA_DIR, f"{img_id}.png")
89
+ image.save(image_path)
90
+
91
+ # Save audio
92
+ aud_id = uuid.uuid4().hex
93
+ audio_path = os.path.join(MEDIA_DIR, f"{aud_id}.wav")
94
+ sr, data = audio
95
+ sf.write(audio_path, data, sr)
96
+
97
+ # Run Wav2Lip
98
+ video_path = run_wav2lip(image_path, audio_path)
99
+ return video_path
100
+
101
+
102
  demo = gr.Interface(
103
  fn=lipsync_func,
104
+ inputs=[
105
+ gr.Image(type="pil", label="Teacher image"),
106
+ gr.Audio(type="numpy", label="Teacher audio (.wav)")
107
+ ],
108
+ outputs=gr.Video(label="Lip-synced video"),
109
+ title="Wav2Lip Lipsync Service",
110
+ description="Upload a static teacher image and a WAV audio. The model will generate a talking video."
111
  )
112
 
113
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1,2 +1,7 @@
1
  wyn-wav2lip
2
  gradio
 
 
 
 
 
 
1
  wyn-wav2lip
2
  gradio
3
+ soundfile
4
+ gdown
5
+ dlib-bin==19.24.2.post1
6
+ opencv-python
7
+