Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| import cv2 | |
| import numpy as np | |
| from face_alignment import FaceAlignment, LandmarksType | |
| from torchvision import transforms | |
| # Load model pretrained | |
| try: | |
| model = torch.load("./abudhabi.pt", map_location=torch.device('cpu')) | |
| model.eval() | |
| print("Model loaded successfully!") | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| raise e | |
| # Inisialisasi face alignment model | |
| fa = FaceAlignment(LandmarksType._2D, flip_input=False) | |
| # Fungsi untuk mendeteksi dan melakukan alignment wajah | |
| def detect_and_align_faces(frame): | |
| preds = fa.get_landmarks(frame) | |
| aligned_face = None | |
| if preds is not None: | |
| landmarks = preds[0] | |
| # Implementasi Alignment Wajah | |
| left_eye = landmarks[36:42] | |
| right_eye = landmarks[42:48] | |
| # Hitung sudut antara kedua mata | |
| left_eye_center = left_eye.mean(axis=0).astype("int") | |
| right_eye_center = right_eye.mean(axis=0).astype("int") | |
| dY = right_eye_center[1] - left_eye_center[1] | |
| dX = right_eye_center[0] - left_eye_center[0] | |
| angle = np.degrees(np.arctan2(dY, dX)) - 180 | |
| # Hitung jarak antara kedua mata | |
| eyes_center = ((left_eye_center[0] + right_eye_center[0]) // 2, | |
| (left_eye_center[1] + right_eye_center[1]) // 2) | |
| # Lakukan rotasi gambar | |
| M = cv2.getRotationMatrix2D(eyes_center, angle, 1) | |
| aligned_face = cv2.warpAffine(frame, M, (frame.shape[1], frame.shape[0])) | |
| return aligned_face | |
| # Fungsi untuk manipulasi ekspresi (sesuaikan dengan model) | |
| def manipulate_expression(face, expression, intensity): | |
| # Implementasi manipulasi ekspresi menggunakan model | |
| # ... (misalnya, berikan input ekspresi dan intensitas ke model) | |
| # Contoh: | |
| # expression_mapping = {"senang": 0, "sedih": 1, "marah": 2, "takut": 3, "jijik": 4, "terkejut": 5} | |
| # expression_index = expression_mapping[expression] | |
| # manipulated_face = model(face, expression_index, intensity) | |
| # ... | |
| # Ganti dengan kode manipulasi ekspresi Anda yang sebenarnya | |
| manipulated_face = face # Ganti dengan output model Anda | |
| return manipulated_face | |
| def process_frame(frame, expression, intensity): | |
| frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| aligned_face = detect_and_align_faces(frame) | |
| if aligned_face is not None: | |
| manipulated_face = manipulate_expression(aligned_face, expression, intensity) | |
| # ... (Implementasi penggabungan wajah ke frame asli) | |
| # ... (Ganti frame dengan manipulated_face) | |
| frame = manipulated_face # Ganti dengan hasil penggabungan | |
| return cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) | |
| # Antarmuka Gradio | |
| iface = gr.Interface( | |
| fn=process_frame, | |
| inputs=[ | |
| gr.Image(source="webcam", streaming=True, type="numpy"), | |
| gr.Dropdown(["senang", "sedih", "marah", "takut", "jijik", "terkejut"], label="Ekspresi"), | |
| gr.Slider(0, 1, label="Intensitas"), | |
| ], | |
| outputs=gr.Image(type="numpy"), | |
| title="Deepfake Emotion Control MVP (Real-time)", | |
| ) | |
| iface.launch() |