File size: 3,066 Bytes
a067824
 
 
6e0412b
a067824
 
 
 
6e0412b
68b2baa
6e0412b
 
 
 
 
a067824
 
 
 
 
6e0412b
a067824
6e0412b
a067824
6e0412b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a067824
 
6e0412b
a067824
 
6e0412b
 
 
 
a067824
6e0412b
 
 
a067824
 
6e0412b
 
 
 
 
 
 
 
 
a067824
 
 
6e0412b
a067824
6e0412b
a067824
 
6e0412b
 
 
a067824
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
import torch
import cv2
import numpy as np
from face_alignment import FaceAlignment, LandmarksType
from torchvision import transforms

# Load model pretrained
try:
    model = torch.load("./abudhabi.pt", map_location=torch.device('cpu'))
    model.eval()
    print("Model loaded successfully!")
except Exception as e:
    print(f"Error loading model: {e}")
    raise e

# Inisialisasi face alignment model
fa = FaceAlignment(LandmarksType._2D, flip_input=False)

# Fungsi untuk mendeteksi dan melakukan alignment wajah
def detect_and_align_faces(frame):
    preds = fa.get_landmarks(frame)
    aligned_face = None
    if preds is not None:
        landmarks = preds[0]

        # Implementasi Alignment Wajah
        left_eye = landmarks[36:42]
        right_eye = landmarks[42:48]

        # Hitung sudut antara kedua mata
        left_eye_center = left_eye.mean(axis=0).astype("int")
        right_eye_center = right_eye.mean(axis=0).astype("int")
        dY = right_eye_center[1] - left_eye_center[1]
        dX = right_eye_center[0] - left_eye_center[0]
        angle = np.degrees(np.arctan2(dY, dX)) - 180

        # Hitung jarak antara kedua mata
        eyes_center = ((left_eye_center[0] + right_eye_center[0]) // 2,
                      (left_eye_center[1] + right_eye_center[1]) // 2)

        #  Lakukan rotasi gambar
        M = cv2.getRotationMatrix2D(eyes_center, angle, 1)
        aligned_face = cv2.warpAffine(frame, M, (frame.shape[1], frame.shape[0]))

    return aligned_face

# Fungsi untuk manipulasi ekspresi (sesuaikan dengan model)
def manipulate_expression(face, expression, intensity):
    # Implementasi manipulasi ekspresi menggunakan model
    # ... (misalnya, berikan input ekspresi dan intensitas ke model)
    # Contoh:
    # expression_mapping = {"senang": 0, "sedih": 1, "marah": 2, "takut": 3, "jijik": 4, "terkejut": 5}
    # expression_index = expression_mapping[expression]
    # manipulated_face = model(face, expression_index, intensity)
    # ...
    # Ganti dengan kode manipulasi ekspresi Anda yang sebenarnya
    manipulated_face = face  # Ganti dengan output model Anda
    return manipulated_face


def process_frame(frame, expression, intensity):
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    aligned_face = detect_and_align_faces(frame)
    if aligned_face is not None:
        manipulated_face = manipulate_expression(aligned_face, expression, intensity)
        # ... (Implementasi penggabungan wajah ke frame asli)
        # ... (Ganti frame dengan manipulated_face)
        frame = manipulated_face  # Ganti dengan hasil penggabungan
    return cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

# Antarmuka Gradio
iface = gr.Interface(
    fn=process_frame,
    inputs=[
        gr.Image(source="webcam", streaming=True, type="numpy"),
        gr.Dropdown(["senang", "sedih", "marah", "takut", "jijik", "terkejut"], label="Ekspresi"),
        gr.Slider(0, 1, label="Intensitas"),
    ],
    outputs=gr.Image(type="numpy"),
    title="Deepfake Emotion Control MVP (Real-time)",
)

iface.launch()