samuelolubukun commited on
Commit
848d12a
·
verified ·
1 Parent(s): 21e6f49

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +414 -305
  2. requirements.txt +1 -0
app.py CHANGED
@@ -1,305 +1,414 @@
1
- import streamlit as st
2
- from PIL import Image, ImageChops, ImageEnhance
3
- import numpy as np
4
- import cv2
5
- import os
6
- import torch
7
- import timm
8
- import cv2 as cv
9
- from mtcnn import MTCNN
10
- from tensorflow.keras.models import load_model
11
- from tensorflow.keras.preprocessing import image as keras_image
12
- from torchvision import transforms
13
- import keras
14
-
15
- # Load models
16
- @st.cache_resource
17
- def load_image_forgery_model():
18
- return load_model("imageforgerydetection.h5")
19
-
20
- @st.cache_resource
21
- def load_deepfake_image_model():
22
- return load_model("deepfake_image_detection.h5")
23
-
24
- @st.cache_resource
25
- def load_video_forgery_model():
26
- return load_model("videoforgerydetection.keras")
27
-
28
- # Constants
29
- IMG_SIZE = 224
30
- MAX_SEQ_LENGTH = 20
31
- NUM_FEATURES = 2048
32
-
33
- @st.cache_resource
34
- def load_deepfake_model():
35
- return load_model('video_classifier_full_model.h5')
36
-
37
- # Load pre-trained models and processor
38
- deepfake_model = load_deepfake_model()
39
- vocabulary2 = np.load('label_processor_vocabulary.npy', allow_pickle=True)
40
- label_processor2 = keras.layers.StringLookup(num_oov_indices=0, vocabulary=vocabulary2.tolist())
41
-
42
-
43
- # Helper functions
44
- # Image Forgery Detection
45
- def convert_to_ela_image(image, quality=90):
46
- temp_filename = 'temp_file_name.jpg'
47
- ela_filename = 'temp_ela.png'
48
-
49
- if image.mode != 'RGB':
50
- image = image.convert('RGB')
51
-
52
- image.save(temp_filename, 'JPEG', quality=quality)
53
- temp_image = Image.open(temp_filename)
54
-
55
- ela_image = ImageChops.difference(image, temp_image)
56
- extrema = ela_image.getextrema()
57
- max_diff = max([ex[1] for ex in extrema])
58
- max_diff = max_diff if max_diff != 0 else 1
59
- scale = 255.0 / max_diff
60
-
61
- ela_image = ImageEnhance.Brightness(ela_image).enhance(scale)
62
- return ela_image
63
-
64
- def prepare_image_for_forgery(image):
65
- ela_image = convert_to_ela_image(image, 90).resize((128, 128))
66
- return np.array(ela_image).flatten() / 255.0
67
-
68
- # Deepfake Image Detection
69
- def predict_deepfake_image(image_path, model):
70
- img = keras_image.load_img(image_path, target_size=(256, 256))
71
- img_array = keras_image.img_to_array(img) / 255.0
72
- img_array = np.expand_dims(img_array, axis=0)
73
- prediction = model.predict(img_array)
74
- return 'Real' if prediction[0] > 0.5 else 'Fake'
75
-
76
- # Video Forgery Detection
77
- target_height, target_width = 240, 320 # Define target dimensions (height, width)
78
-
79
- def predict_video_forgery(video_path, model):
80
- vid = []
81
- sumframes = 0
82
- cap = cv2.VideoCapture(video_path)
83
-
84
- while cap.isOpened():
85
- ret, frame = cap.read()
86
- if not ret:
87
- break
88
-
89
- # Resize frame to target dimensions
90
- frame = cv2.resize(frame, (target_width, target_height))
91
-
92
- sumframes += 1
93
- vid.append(frame)
94
-
95
- cap.release()
96
- st.write(f"No. Of Frames in the Video: {sumframes}")
97
-
98
- Xtest = np.array(vid)
99
- output = model.predict(Xtest)
100
- output = output.reshape((-1))
101
-
102
- results = []
103
- for i in output:
104
- if i>0.5:
105
- results.append(1)
106
- else:
107
- results.append(0)
108
- #print(len(results))
109
- #print(results)
110
- forge_flag = 0
111
- for i in results:
112
- if i == 1:
113
- forge_flag = 1
114
- break
115
-
116
- forge_flag = any(results)
117
-
118
- if forge_flag == 0:
119
- return "The video is not forged", 0, sumframes
120
- else:
121
- return "The video is forged", sum(results), sumframes
122
-
123
-
124
- # Deepfake Video Detection
125
- def build_feature_extractor():
126
- feature_extractor = keras.applications.InceptionV3(
127
- weights="imagenet",
128
- include_top=False,
129
- pooling="avg",
130
- input_shape=(IMG_SIZE, IMG_SIZE, 3),
131
- )
132
- preprocess_input = keras.applications.inception_v3.preprocess_input
133
-
134
- inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3))
135
- preprocessed = preprocess_input(inputs)
136
- outputs = feature_extractor(preprocessed)
137
- return keras.Model(inputs, outputs, name="feature_extractor")
138
-
139
- feature_extractor = build_feature_extractor()
140
- detector = MTCNN()
141
-
142
- def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE), skip_frames=2):
143
- cap = cv.VideoCapture(path)
144
- frames = []
145
- frame_count = 0
146
- previous_box = None
147
-
148
- while True:
149
- ret, frame = cap.read()
150
- if not ret:
151
- break
152
-
153
- if frame_count % skip_frames == 0:
154
- frame, previous_box = get_face_region_first_frame(frame, previous_box)
155
- if frame is not None:
156
- frame = cv.resize(frame, resize)
157
- frame = frame[:, :, [2, 1, 0]]
158
- frames.append(frame)
159
-
160
- if len(frames) == max_frames:
161
- break
162
- frame_count += 1
163
-
164
- while len(frames) < max_frames and frames:
165
- frames.append(frames[-1])
166
-
167
- cap.release()
168
- return np.array(frames)
169
-
170
- def get_face_region_first_frame(frame, previous_box=None):
171
- if previous_box is None:
172
- detections = detector.detect_faces(frame)
173
- if detections:
174
- x, y, width, height = detections[0]['box']
175
- previous_box = (x, y, width, height)
176
- else:
177
- return None, None
178
- else:
179
- x, y, width, height = previous_box
180
-
181
- face_region = frame[y:y+height, x:x+width]
182
- return face_region, previous_box
183
-
184
- def prepare_single_video(frames):
185
- frames = frames[None, ...]
186
- frame_mask = np.zeros(shape=(1, MAX_SEQ_LENGTH,), dtype="bool")
187
- frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32")
188
-
189
- for i, batch in enumerate(frames):
190
- video_length = batch.shape[0]
191
- length = min(MAX_SEQ_LENGTH, video_length)
192
- for j in range(length):
193
- frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :])
194
- frame_mask[i, :length] = 1
195
-
196
- return frame_features, frame_mask
197
-
198
- def sequence_prediction(video_path):
199
- class_vocab = label_processor2.get_vocabulary()
200
- frames = load_video(video_path)
201
- if len(frames) == 0:
202
- st.error("Could not process video. Please try another file.")
203
- return None
204
-
205
- frame_features, frame_mask = prepare_single_video(frames)
206
- probabilities = deepfake_model.predict([frame_features, frame_mask])[0]
207
-
208
- predictions = {class_vocab[i]: probabilities[i] * 100 for i in np.argsort(probabilities)[::-1]}
209
- return predictions
210
-
211
-
212
- # Streamlit App
213
- st.title("Fraudulent Image and Video Detection System")
214
-
215
- # Sidebar for model selection
216
- task = st.sidebar.selectbox("Choose a detection task:", [
217
- "Image Forgery Detection",
218
- "Deepfake Image Detection",
219
- "Video Forgery Detection",
220
- "Deepfake Video Detection"
221
- ])
222
-
223
- if task == "Image Forgery Detection":
224
- uploaded_file = st.file_uploader("Upload an image", type=['jpg', 'jpeg', 'png'])
225
- if uploaded_file:
226
- image = Image.open(uploaded_file)
227
- st.image(image, caption="Uploaded Image", use_container_width=True)
228
-
229
- prepared_image = prepare_image_for_forgery(image).reshape(-1, 128, 128, 3)
230
- model = load_image_forgery_model()
231
- prediction = model.predict(prepared_image)
232
- confidence_real = prediction[0][1] * 100
233
- confidence_fake = prediction[0][0] * 100
234
-
235
- if confidence_real > confidence_fake:
236
- st.success(f"Result: Real Image with {confidence_real:.2f}% confidence")
237
- else:
238
- st.error(f"Result: Forged Image with {confidence_fake:.2f}% confidence")
239
-
240
- elif task == "Deepfake Image Detection":
241
- uploaded_file = st.file_uploader("Upload an image", type=['jpg', 'jpeg', 'png'])
242
- if uploaded_file:
243
- with open("temp_image.jpg", "wb") as f:
244
- f.write(uploaded_file.getbuffer())
245
-
246
- st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
247
- model = load_deepfake_image_model()
248
- result = predict_deepfake_image("temp_image.jpg", model)
249
-
250
- if result == 'Real':
251
- st.success("Prediction: Real")
252
- else:
253
- st.error("Prediction: Fake")
254
-
255
- os.remove("temp_image.jpg")
256
-
257
- if task == "Video Forgery Detection":
258
- uploaded_file = st.file_uploader("Upload a video", type=['mp4', 'avi', 'mov', 'mkv'])
259
- if uploaded_file:
260
- with open("temp_video.mp4", "wb") as f:
261
- f.write(uploaded_file.getbuffer())
262
-
263
- st.video("temp_video.mp4")
264
- st.write("Analyzing the video for forgery...")
265
-
266
- model = load_video_forgery_model()
267
- result_message, forged_frames, total_frames = predict_video_forgery("temp_video.mp4", model)
268
-
269
- if forged_frames == 0:
270
- st.success(result_message)
271
- else:
272
- st.error(result_message)
273
-
274
- st.write(f"Forged Frames: {forged_frames}/{total_frames}")
275
- os.remove("temp_video.mp4")
276
-
277
-
278
- elif task == "Deepfake Video Detection":
279
- uploaded_file = st.file_uploader("Upload a video", type=["mp4", "avi", "mov"])
280
- if uploaded_file is not None:
281
- with open("temp_video.mp4", "wb") as f:
282
- f.write(uploaded_file.read())
283
-
284
- st.video("temp_video.mp4")
285
- st.write("Analyzing the video...")
286
-
287
- frames = load_video("temp_video.mp4")
288
- if len(frames) == 0:
289
- st.error("Could not process video. Please try another file.")
290
- else:
291
- frame_features, frame_mask = prepare_single_video(frames)
292
- probabilities = deepfake_model.predict([frame_features, frame_mask])[0]
293
-
294
- predictions = {label_processor2.get_vocabulary()[i]: probabilities[i] * 100 for i in np.argsort(probabilities)[::-1]}
295
-
296
- if predictions:
297
- highest_label = max(predictions, key=predictions.get)
298
- highest_prob = predictions[highest_label]
299
-
300
- if highest_label.lower() == "real":
301
- st.success(f"The video is real with a confidence of {highest_prob:.2f}%.")
302
- elif highest_label.lower() == "fake":
303
- st.error(f"This video is a deepfake with a confidence of {highest_prob:.2f}%.")
304
- else:
305
- st.warning(f"Uncertain prediction: {highest_label} with {highest_prob:.2f}% confidence.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image, ImageChops, ImageEnhance
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import cv2
6
+ import os
7
+ import cv2 as cv
8
+ from mtcnn import MTCNN
9
+ from tensorflow.keras.models import load_model
10
+ from tensorflow.keras.preprocessing import image as keras_image
11
+ import keras
12
+
13
+ # Load models
14
+ @st.cache_resource
15
+ def load_image_forgery_model():
16
+ return load_model("imageforgerydetection.h5")
17
+
18
+ @st.cache_resource
19
+ def load_deepfake_image_model():
20
+ return load_model("deepfake_image_detection.h5")
21
+
22
+ @st.cache_resource
23
+ def load_video_forgery_model():
24
+ return load_model("videoforgerydetection.keras")
25
+
26
+ # Constants
27
+ IMG_SIZE = 224
28
+ MAX_SEQ_LENGTH = 20
29
+ NUM_FEATURES = 2048
30
+
31
+ @st.cache_resource
32
+ def load_deepfake_model():
33
+ return load_model('video_classifier_full_model.h5')
34
+
35
+ # Load pre-trained models and processor
36
+ deepfake_model = load_deepfake_model()
37
+ vocabulary2 = np.load('label_processor_vocabulary.npy', allow_pickle=True)
38
+ label_processor2 = keras.layers.StringLookup(num_oov_indices=0, vocabulary=vocabulary2.tolist())
39
+
40
+
41
+ # Helper functions
42
+ # Image Forgery Detection
43
+ def convert_to_ela_image(image, quality=90):
44
+ temp_filename = 'temp_file_name.jpg'
45
+ ela_filename = 'temp_ela.png'
46
+
47
+ if image.mode != 'RGB':
48
+ image = image.convert('RGB')
49
+
50
+ image.save(temp_filename, 'JPEG', quality=quality)
51
+ temp_image = Image.open(temp_filename)
52
+
53
+ ela_image = ImageChops.difference(image, temp_image)
54
+ extrema = ela_image.getextrema()
55
+ max_diff = max([ex[1] for ex in extrema])
56
+ max_diff = max_diff if max_diff != 0 else 1
57
+ scale = 255.0 / max_diff
58
+
59
+ ela_image = ImageEnhance.Brightness(ela_image).enhance(scale)
60
+ return ela_image
61
+
62
+ def prepare_image_for_forgery(image):
63
+ ela_image = convert_to_ela_image(image, 90).resize((128, 128))
64
+ return np.array(ela_image).flatten() / 255.0
65
+
66
+ # Deepfake Image Detection
67
+ def predict_deepfake_image(image_path, model):
68
+ img = keras_image.load_img(image_path, target_size=(256, 256))
69
+ img_array = keras_image.img_to_array(img) / 255.0
70
+ img_array = np.expand_dims(img_array, axis=0)
71
+ prediction = model.predict(img_array)
72
+ return 'Real' if prediction[0] > 0.5 else 'Fake'
73
+
74
+ # Video Forgery Detection
75
+ # Configuration
76
+ target_height, target_width = 240, 320
77
+ threshold = 30 # Threshold for freeze/duplicate detection
78
+
79
+ def predict_video_forgery_cnn(video_path, model):
80
+ """CNN-based video forgery detection"""
81
+ vid = []
82
+ sumframes = 0
83
+ cap = cv2.VideoCapture(video_path)
84
+
85
+ while cap.isOpened():
86
+ ret, frame = cap.read()
87
+ if not ret:
88
+ break
89
+
90
+ # Resize frame to target dimensions
91
+ frame = cv2.resize(frame, (target_width, target_height))
92
+ sumframes += 1
93
+ vid.append(frame)
94
+
95
+ cap.release()
96
+
97
+ if sumframes == 0:
98
+ return False, 0, 0
99
+
100
+ Xtest = np.array(vid)
101
+ output = model.predict(Xtest)
102
+ output = output.reshape((-1))
103
+
104
+ # Check if any frame is predicted as forged
105
+ forged_frames = sum(1 for i in output if i > 0.5)
106
+ is_forged = any(i > 0.5 for i in output)
107
+
108
+ return is_forged, forged_frames, sumframes
109
+
110
+ def analyze_video_tampering(video_path):
111
+ """Frame difference analysis for tampering detection"""
112
+ cap = cv2.VideoCapture(video_path)
113
+ if not cap.isOpened():
114
+ return False, [], []
115
+
116
+ prev_frame = None
117
+ frame_differences = []
118
+ suspected_frames = []
119
+
120
+ while cap.isOpened():
121
+ ret, frame = cap.read()
122
+ if not ret:
123
+ break
124
+
125
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
126
+
127
+ if prev_frame is not None:
128
+ diff = cv2.absdiff(gray, prev_frame)
129
+ non_zero = np.count_nonzero(diff)
130
+ frame_differences.append(non_zero)
131
+
132
+ if non_zero < threshold:
133
+ current_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
134
+ suspected_frames.append(current_frame)
135
+
136
+ prev_frame = gray
137
+
138
+ cap.release()
139
+
140
+ # Simple rule: if any frame is suspected, flag as tampered
141
+ is_tampered = len(suspected_frames) > 0
142
+
143
+ return is_tampered, frame_differences, suspected_frames
144
+
145
+ def plot_frame_analysis(frame_differences):
146
+ """Create a simple plot of frame differences"""
147
+ plt.figure(figsize=(10, 4))
148
+ plt.plot(frame_differences, color='blue', linewidth=1)
149
+ plt.axhline(y=threshold, color='red', linestyle='--', label=f"Threshold ({threshold})")
150
+ plt.xlabel("Frame Number")
151
+ plt.ylabel("Pixel Differences")
152
+ plt.title("Frame Difference Analysis")
153
+ plt.legend()
154
+ plt.grid(True, alpha=0.3)
155
+
156
+ # Add statistics
157
+ if frame_differences:
158
+ mean_val = np.mean(frame_differences)
159
+ std_val = np.std(frame_differences)
160
+ plt.text(0.02, 0.98, f"Mean: {mean_val:.1f}\nStd: {std_val:.1f}",
161
+ transform=plt.gca().transAxes, verticalalignment='top',
162
+ bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
163
+
164
+ return plt
165
+
166
+ def combined_video_forgery_detection(video_path, model):
167
+ """Combined detection using both CNN and frame analysis"""
168
+
169
+ # Method 1: CNN-based detection
170
+ cnn_forged, cnn_forged_frames, total_frames = predict_video_forgery_cnn(video_path, model)
171
+
172
+ # Method 2: Frame analysis tampering detection
173
+ frame_tampered, frame_differences, suspected_frames = analyze_video_tampering(video_path)
174
+
175
+ # Results
176
+ results = {
177
+ 'cnn_forged': cnn_forged,
178
+ 'cnn_forged_frames': cnn_forged_frames,
179
+ 'frame_tampered': frame_tampered,
180
+ 'suspected_frames': len(suspected_frames),
181
+ 'total_frames': total_frames,
182
+ 'frame_differences': frame_differences
183
+ }
184
+
185
+ # Simple decision logic
186
+ if cnn_forged and frame_tampered:
187
+ verdict = "FORGED - Detected by both CNN and Frame Analysis"
188
+ confidence = "High"
189
+ elif cnn_forged:
190
+ verdict = "FORGED - Detected by CNN"
191
+ confidence = "Medium"
192
+ elif frame_tampered:
193
+ verdict = "FORGED - Detected by Frame Analysis"
194
+ confidence = "Medium"
195
+ else:
196
+ verdict = "NOT TAMPERED - No Forgery detected"
197
+ confidence = "High"
198
+
199
+ return verdict, confidence, results
200
+
201
+ # Deepfake Video Detection
202
+ def build_feature_extractor():
203
+ feature_extractor = keras.applications.InceptionV3(
204
+ weights="imagenet",
205
+ include_top=False,
206
+ pooling="avg",
207
+ input_shape=(IMG_SIZE, IMG_SIZE, 3),
208
+ )
209
+ preprocess_input = keras.applications.inception_v3.preprocess_input
210
+
211
+ inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3))
212
+ preprocessed = preprocess_input(inputs)
213
+ outputs = feature_extractor(preprocessed)
214
+ return keras.Model(inputs, outputs, name="feature_extractor")
215
+
216
+ feature_extractor = build_feature_extractor()
217
+ detector = MTCNN()
218
+
219
+ def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE), skip_frames=2):
220
+ cap = cv.VideoCapture(path)
221
+ frames = []
222
+ frame_count = 0
223
+ previous_box = None
224
+
225
+ while True:
226
+ ret, frame = cap.read()
227
+ if not ret:
228
+ break
229
+
230
+ if frame_count % skip_frames == 0:
231
+ frame, previous_box = get_face_region_first_frame(frame, previous_box)
232
+ if frame is not None:
233
+ frame = cv.resize(frame, resize)
234
+ frame = frame[:, :, [2, 1, 0]]
235
+ frames.append(frame)
236
+
237
+ if len(frames) == max_frames:
238
+ break
239
+ frame_count += 1
240
+
241
+ while len(frames) < max_frames and frames:
242
+ frames.append(frames[-1])
243
+
244
+ cap.release()
245
+ return np.array(frames)
246
+
247
+ def get_face_region_first_frame(frame, previous_box=None):
248
+ if previous_box is None:
249
+ detections = detector.detect_faces(frame)
250
+ if detections:
251
+ x, y, width, height = detections[0]['box']
252
+ previous_box = (x, y, width, height)
253
+ else:
254
+ return None, None
255
+ else:
256
+ x, y, width, height = previous_box
257
+
258
+ face_region = frame[y:y+height, x:x+width]
259
+ return face_region, previous_box
260
+
261
+ def prepare_single_video(frames):
262
+ frames = frames[None, ...]
263
+ frame_mask = np.zeros(shape=(1, MAX_SEQ_LENGTH,), dtype="bool")
264
+ frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32")
265
+
266
+ for i, batch in enumerate(frames):
267
+ video_length = batch.shape[0]
268
+ length = min(MAX_SEQ_LENGTH, video_length)
269
+ for j in range(length):
270
+ frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :])
271
+ frame_mask[i, :length] = 1
272
+
273
+ return frame_features, frame_mask
274
+
275
+ def sequence_prediction(video_path):
276
+ class_vocab = label_processor2.get_vocabulary()
277
+ frames = load_video(video_path)
278
+ if len(frames) == 0:
279
+ st.error("Could not process video. Please try another file.")
280
+ return None
281
+
282
+ frame_features, frame_mask = prepare_single_video(frames)
283
+ probabilities = deepfake_model.predict([frame_features, frame_mask])[0]
284
+
285
+ predictions = {class_vocab[i]: probabilities[i] * 100 for i in np.argsort(probabilities)[::-1]}
286
+ return predictions
287
+
288
+
289
+ # Streamlit App
290
+ st.title("Fraudulent Image and Video Detection System")
291
+
292
+ # Sidebar for model selection
293
+ task = st.sidebar.selectbox("Choose a detection task:", [
294
+ "Image Forgery Detection",
295
+ "Deepfake Image Detection",
296
+ "Video Forgery Detection",
297
+ "Deepfake Video Detection"
298
+ ])
299
+
300
+ if task == "Image Forgery Detection":
301
+ uploaded_file = st.file_uploader("Upload an image", type=['jpg', 'jpeg', 'png'])
302
+ if uploaded_file:
303
+ image = Image.open(uploaded_file)
304
+ st.image(image, caption="Uploaded Image", use_container_width=True)
305
+
306
+ prepared_image = prepare_image_for_forgery(image).reshape(-1, 128, 128, 3)
307
+ model = load_image_forgery_model()
308
+ prediction = model.predict(prepared_image)
309
+ confidence_real = prediction[0][1] * 100
310
+ confidence_fake = prediction[0][0] * 100
311
+
312
+ if confidence_real > confidence_fake:
313
+ st.success(f"Result: Real Image with {confidence_real:.2f}% confidence")
314
+ else:
315
+ st.error(f"Result: Forged Image with {confidence_fake:.2f}% confidence")
316
+
317
+ elif task == "Deepfake Image Detection":
318
+ uploaded_file = st.file_uploader("Upload an image", type=['jpg', 'jpeg', 'png'])
319
+ if uploaded_file:
320
+ with open("temp_image.jpg", "wb") as f:
321
+ f.write(uploaded_file.getbuffer())
322
+
323
+ st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
324
+ model = load_deepfake_image_model()
325
+ result = predict_deepfake_image("temp_image.jpg", model)
326
+
327
+ if result == 'Real':
328
+ st.success("Prediction: Real")
329
+ else:
330
+ st.error("Prediction: Fake")
331
+
332
+ os.remove("temp_image.jpg")
333
+
334
+ if task == "Video Forgery Detection":
335
+ uploaded_file = st.file_uploader("Upload a video", type=['mp4', 'avi', 'mov', 'mkv'])
336
+
337
+ if uploaded_file:
338
+ # Save uploaded file
339
+ with open("temp_video.mp4", "wb") as f:
340
+ f.write(uploaded_file.getbuffer())
341
+
342
+ st.video("temp_video.mp4")
343
+ st.write("Analyzing the video for forgery...")
344
+
345
+ # Load model and run combined detection
346
+ model = load_video_forgery_model()
347
+ verdict, confidence, results = combined_video_forgery_detection("temp_video.mp4", model)
348
+
349
+ # Display results
350
+ if "FORGED" in verdict:
351
+ st.error(f"🚨 {verdict}")
352
+ else:
353
+ st.success(f"✅ {verdict}")
354
+
355
+ st.write(f"**Confidence Level:** {confidence}")
356
+
357
+ # Show detailed results
358
+ col1, col2 = st.columns(2)
359
+
360
+ with col1:
361
+ st.write("**CNN Analysis:**")
362
+ if results['cnn_forged']:
363
+ st.write(f"- Status: Forged ❌")
364
+ st.write(f"- Forged Frames: {results['cnn_forged_frames']}/{results['total_frames']}")
365
+ else:
366
+ st.write(f"- Status: Not Forged ✅")
367
+
368
+ with col2:
369
+ st.write("**Frame Analysis:**")
370
+ if results['frame_tampered']:
371
+ st.write(f"- Status: Tampered ❌")
372
+ st.write(f"- Suspected Frames: {results['suspected_frames']}")
373
+ else:
374
+ st.write(f"- Status: Not Tampered ✅")
375
+
376
+ # Plot frame differences if available
377
+ if results['frame_differences']:
378
+ st.write("**Frame Difference Analysis:**")
379
+ fig = plot_frame_analysis(results['frame_differences'])
380
+ st.pyplot(fig)
381
+ plt.close()
382
+
383
+ # Cleanup
384
+ os.remove("temp_video.mp4")
385
+
386
+
387
+ elif task == "Deepfake Video Detection":
388
+ uploaded_file = st.file_uploader("Upload a video", type=["mp4", "avi", "mov"])
389
+ if uploaded_file is not None:
390
+ with open("temp_video.mp4", "wb") as f:
391
+ f.write(uploaded_file.read())
392
+
393
+ st.video("temp_video.mp4")
394
+ st.write("Analyzing the video...")
395
+
396
+ frames = load_video("temp_video.mp4")
397
+ if len(frames) == 0:
398
+ st.error("Could not process video. Please try another file.")
399
+ else:
400
+ frame_features, frame_mask = prepare_single_video(frames)
401
+ probabilities = deepfake_model.predict([frame_features, frame_mask])[0]
402
+
403
+ predictions = {label_processor2.get_vocabulary()[i]: probabilities[i] * 100 for i in np.argsort(probabilities)[::-1]}
404
+
405
+ if predictions:
406
+ highest_label = max(predictions, key=predictions.get)
407
+ highest_prob = predictions[highest_label]
408
+
409
+ if highest_label.lower() == "real":
410
+ st.success(f"The video is real with a confidence of {highest_prob:.2f}%.")
411
+ elif highest_label.lower() == "fake":
412
+ st.error(f"This video is a deepfake with a confidence of {highest_prob:.2f}%.")
413
+ else:
414
+ st.warning(f"Uncertain prediction: {highest_label} with {highest_prob:.2f}% confidence.")
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  streamlit
2
  pillow
3
  numpy<2
 
4
  opencv-python-headless==4.8.0.74
5
  torch
6
  timm
 
1
  streamlit
2
  pillow
3
  numpy<2
4
+ matplotlib
5
  opencv-python-headless==4.8.0.74
6
  torch
7
  timm