Commit
Β·
bd5d9e0
1
Parent(s):
e09b7ad
feat: local inference fallback using DeepFakeAI face_swapper
Browse files
app.py
CHANGED
|
@@ -2,6 +2,12 @@ import gradio as gr
|
|
| 2 |
import os, uuid, time
|
| 3 |
from gradio_client import Client, handle_file
|
| 4 |
from moviepy.editor import VideoFileClip
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# ββ config βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 7 |
hf_token = os.environ.get("TOKEN")
|
|
@@ -36,6 +42,46 @@ def preprocess_video(path: str, target_fps: int = 12,
|
|
| 36 |
clip.close()
|
| 37 |
return out_path
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
# ββ main generate ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 40 |
def generate(input_image, input_video):
|
| 41 |
|
|
@@ -52,24 +98,26 @@ def generate(input_image, input_video):
|
|
| 52 |
|
| 53 |
try:
|
| 54 |
pre_video = preprocess_video(input_video)
|
| 55 |
-
job = client.submit(
|
| 56 |
-
input_image=handle_file(input_image),
|
| 57 |
-
input_video={"video": handle_file(pre_video)},
|
| 58 |
-
device='cpu',
|
| 59 |
-
selector='many',
|
| 60 |
-
gender=None,
|
| 61 |
-
race=None,
|
| 62 |
-
order=None,
|
| 63 |
-
api_name="/predict"
|
| 64 |
-
)
|
| 65 |
-
while not job.done():
|
| 66 |
-
time.sleep(5)
|
| 67 |
-
|
| 68 |
-
if not job.status().success:
|
| 69 |
-
return None
|
| 70 |
|
| 71 |
-
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
except Exception as e:
|
| 75 |
gr.Error(f"Generation failed: {e}")
|
|
|
|
| 2 |
import os, uuid, time
|
| 3 |
from gradio_client import Client, handle_file
|
| 4 |
from moviepy.editor import VideoFileClip
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
# Local pipeline imports
|
| 8 |
+
import DeepFakeAI.globals as DF_G
|
| 9 |
+
from DeepFakeAI import utilities as DF_U
|
| 10 |
+
from DeepFakeAI.processors.frame.modules import face_swapper as DF_FS
|
| 11 |
|
| 12 |
# ββ config βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 13 |
hf_token = os.environ.get("TOKEN")
|
|
|
|
| 42 |
clip.close()
|
| 43 |
return out_path
|
| 44 |
|
| 45 |
+
def _run_local_faceswap(source_image_path: str, target_video_path: str) -> Optional[str]:
|
| 46 |
+
# Configure defaults for local pipeline
|
| 47 |
+
DF_G.source_path = source_image_path
|
| 48 |
+
DF_G.target_path = target_video_path
|
| 49 |
+
DF_G.output_video_encoder = 'libx264'
|
| 50 |
+
DF_G.output_video_quality = 20
|
| 51 |
+
DF_G.temp_frame_format = 'png'
|
| 52 |
+
DF_G.temp_frame_quality = 95
|
| 53 |
+
DF_G.keep_temp = False
|
| 54 |
+
DF_G.skip_audio = False
|
| 55 |
+
DF_G.execution_thread_count = 2
|
| 56 |
+
DF_G.execution_queue_count = 2
|
| 57 |
+
# Prefer CUDA on T4 if available; fallback to CPU
|
| 58 |
+
DF_G.execution_providers = DF_U.decode_execution_providers(['cuda', 'cpu'])
|
| 59 |
+
|
| 60 |
+
# Ensure model exists
|
| 61 |
+
DF_FS.pre_check()
|
| 62 |
+
|
| 63 |
+
# Extract frames
|
| 64 |
+
fps = DF_U.detect_fps(target_video_path) or 12.0
|
| 65 |
+
DF_U.create_temp(target_video_path)
|
| 66 |
+
ok = DF_U.extract_frames(target_video_path, fps)
|
| 67 |
+
if not ok:
|
| 68 |
+
return None
|
| 69 |
+
temp_frames = DF_U.get_temp_frame_paths(target_video_path)
|
| 70 |
+
if not temp_frames:
|
| 71 |
+
return None
|
| 72 |
+
|
| 73 |
+
# Process frames
|
| 74 |
+
DF_FS.process_video(source_image_path, temp_frames)
|
| 75 |
+
|
| 76 |
+
# Rebuild video and restore audio
|
| 77 |
+
if not DF_U.create_video(target_video_path, fps):
|
| 78 |
+
return None
|
| 79 |
+
out_path = DF_U.normalize_output_path(source_image_path, target_video_path, os.path.join(output_dir, f"out_{uuid.uuid4().hex}.mp4"))
|
| 80 |
+
DF_U.restore_audio(target_video_path, out_path)
|
| 81 |
+
DF_U.clear_temp(target_video_path)
|
| 82 |
+
return out_path
|
| 83 |
+
|
| 84 |
+
|
| 85 |
# ββ main generate ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 86 |
def generate(input_image, input_video):
|
| 87 |
|
|
|
|
| 98 |
|
| 99 |
try:
|
| 100 |
pre_video = preprocess_video(input_video)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
+
if client is not None:
|
| 103 |
+
job = client.submit(
|
| 104 |
+
input_image=handle_file(input_image),
|
| 105 |
+
input_video={"video": handle_file(pre_video)},
|
| 106 |
+
device='cpu',
|
| 107 |
+
selector='many',
|
| 108 |
+
gender=None,
|
| 109 |
+
race=None,
|
| 110 |
+
order=None,
|
| 111 |
+
api_name="/predict"
|
| 112 |
+
)
|
| 113 |
+
while not job.done():
|
| 114 |
+
time.sleep(5)
|
| 115 |
+
if not job.status().success:
|
| 116 |
+
return None
|
| 117 |
+
return job.outputs()[0]["video"]
|
| 118 |
+
else:
|
| 119 |
+
# Local fallback
|
| 120 |
+
return _run_local_faceswap(input_image, pre_video)
|
| 121 |
|
| 122 |
except Exception as e:
|
| 123 |
gr.Error(f"Generation failed: {e}")
|