woojinSong commited on
Commit
9e54219
·
1 Parent(s): 1639892

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -0
app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import librosa
4
+ import numpy as np
5
+ from keras.models import load_model
6
+ import os
7
+ import cv2
8
+ import json
9
+ import pickle
10
+ import librosa
11
+ import shutil
12
+ import numpy as np
13
+ import pandas as pd
14
+ from pathlib import Path
15
+ from scipy.io import wavfile
16
+ from moviepy.editor import VideoFileClip
17
+ from keras.utils import np_utils
18
+ from sklearn.preprocessing import LabelEncoder
19
+
20
+ # 데이터 전처리
21
+ def preprocess_video(video_path):
22
+ face_cascade = cv2.CascadeClassifier('/content/drive/Shareddrives/23 인공지능 모델링_돌핀/haarcascade_frontalface_default.xml')
23
+ cnn_data = []
24
+ rnn_data = []
25
+
26
+ cap = cv2.VideoCapture(video_path)
27
+ count = 0
28
+ while len(cnn_data) < 2:
29
+ ret, frame = cap.read()
30
+ if ret:
31
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
32
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
33
+ for (x, y, w, h) in faces:
34
+ face_img = gray[y:y+h, x:x+w]
35
+ resized_img = cv2.resize(face_img, (224, 224))
36
+ cnn_data.append(resized_img)
37
+ count += 1
38
+ if count >= 15:
39
+ break
40
+ else:
41
+ break
42
+
43
+ if len(cnn_data) < 281:
44
+ video_clip = VideoFileClip(video_path)
45
+ audio_clip = video_clip.audio
46
+ audio_clip.write_audiofile("audio.wav")
47
+ y, sr = librosa.load("audio.wav", sr=44100)
48
+ mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=20)
49
+ mfcc = mfcc[:, :400]
50
+ rnn_data.append(mfcc)
51
+ os.remove("audio.wav")
52
+
53
+ cnn_data = np.array(cnn_data)
54
+ rnn_data = np.array(rnn_data)
55
+ return cnn_data, rnn_data
56
+
57
+ # 딥페이크 영상 유무 판별
58
+ def detect_deepfake(video_path):
59
+ cnn_data, rnn_data = preprocess_video(video_path)
60
+
61
+ cnn_data_np = np.array(cnn_data)
62
+ rnn_data_np= np.array(rnn_data)
63
+
64
+ def augment_data(data, target_size):
65
+ # 증강된 데이터 배열 초기화
66
+ augmented_data = np.empty((target_size,) + data.shape[1:])
67
+
68
+ # RNN 데이터를 반전하여 복사
69
+ for i in range(target_size):
70
+ augmented_data[i] = np.flip(data[i % data.shape[0]], axis=0)
71
+
72
+ return augmented_data
73
+
74
+ # RNN 데이터 증강
75
+ augmented_rnn_data = augment_data(rnn_data_np, cnn_data_np.shape[0])
76
+
77
+ y_pred = multimodal_model.predict([cnn_data, augmented_rnn_data])
78
+
79
+ #print(y_pred)
80
+ max_prob = np.max(y_pred)
81
+ print(max_prob)
82
+
83
+ if max_prob < 0.5:
84
+ result = "Deepfake"
85
+ else:
86
+ result = "Real"
87
+ return result
88
+ iface = gr.Interface(
89
+ fn=detect_deepfake,
90
+ inputs="video",
91
+ outputs="text",
92
+ title="Video Deepfake Detection",
93
+ description="Upload a video to check if it contains deepfake content.",
94
+ allow_flagging=False,
95
+ analytics_enabled=False
96
+ )
97
+
98
+ iface.launch()