File size: 2,263 Bytes
9fca7c3
0ed24f7
19d53bf
 
9fca7c3
0ed24f7
279af2f
19d53bf
 
 
b8c2f1b
19d53bf
9fca7c3
19d53bf
 
a73b3bc
19d53bf
 
 
 
 
 
 
 
9fca7c3
19d53bf
 
 
9fca7c3
19d53bf
 
 
 
9fca7c3
19d53bf
 
427cb66
19d53bf
 
 
 
 
 
0ed24f7
19d53bf
 
0ed24f7
19d53bf
 
 
427cb66
19d53bf
279af2f
19d53bf
9fca7c3
19d53bf
 
 
 
 
 
 
 
 
a73b3bc
19d53bf
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import gradio as gr
import numpy as np
import cv2
import onnxruntime
from insightface.app import FaceAnalysis
from pathlib import Path

# Initialize Face Analysis
face_analyzer = FaceAnalysis(name="buffalo_l")
face_analyzer.prepare(ctx_id=0, det_size=(640, 640))

# Load Face Swapper Model
MODEL_PATH = Path("models/inswapper_128.onnx")
if not MODEL_PATH.exists():
    raise FileNotFoundError("Model file inswapper_128.onnx not found.")

session = onnxruntime.InferenceSession(str(MODEL_PATH))

def swap_faces(source_img, target_img):
    """Perform face swapping using the ONNX model."""
    try:
        # Convert images to correct format
        source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
        target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)

        # Detect faces
        source_faces = face_analyzer.get(source_img)
        target_faces = face_analyzer.get(target_img)

        if not source_faces or not target_faces:
            return "No faces detected in one or both images."
        if len(source_faces) > 1 or len(target_faces) > 1:
            return "Multiple faces detected; only one face per image is supported."

        source_face = source_faces[0]
        target_face = target_faces[0]

        # Prepare input data for ONNX model
        input_data = {
            "target_image": target_img,
            "target_face": target_face.embedding,
            "source_face": source_face.embedding
        }

        # Run the ONNX model
        result = session.run(None, input_data)[0]

        # Convert result to image format
        result_img = np.clip(result * 255, 0, 255).astype(np.uint8)
        result_img = cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)

        return result_img
    except Exception as e:
        return f"Face swap failed: {e}"

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("# Face Swap Tool πŸš€")
    with gr.Row():
        input_source = gr.Image(label="Source Face", type="pil")
        input_target = gr.Image(label="Target Image", type="pil")
    btn_swap = gr.Button("Swap Faces")
    output_image = gr.Image(label="Swapped Face")
    btn_swap.click(swap_faces, inputs=[input_source, input_target], outputs=output_image)

# Launch Gradio App
demo.launch()