samuelolubukun's picture
Update app.py
72da25c verified
raw
history blame
30.4 kB
import streamlit as st
from PIL import Image, ImageChops, ImageEnhance, ImageDraw, ImageFilter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy import ndimage
from skimage import feature, measure
import io
import cv2
import os
import cv2 as cv
from mtcnn import MTCNN
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image as keras_image
import keras
# Load models
@st.cache_resource
def load_image_forgery_model():
return load_model("imageforgerydetection.h5")
@st.cache_resource
def load_deepfake_image_model():
return load_model("deepfake_image_detection.h5")
@st.cache_resource
def load_video_forgery_model():
return load_model("videoforgerydetection.keras")
# Constants
IMG_SIZE = 224
MAX_SEQ_LENGTH = 20
NUM_FEATURES = 2048
@st.cache_resource
def load_deepfake_model():
return load_model('video_classifier_full_model.h5')
# Load pre-trained models and processor
deepfake_model = load_deepfake_model()
vocabulary2 = np.load('label_processor_vocabulary.npy', allow_pickle=True)
label_processor2 = keras.layers.StringLookup(num_oov_indices=0, vocabulary=vocabulary2.tolist())
# Helper functions
# Image Forgery Detection Functions
def convert_to_ela_image(image, quality=90):
temp_filename = 'temp_file_name.jpg'
ela_filename = 'temp_ela.png'
if image.mode != 'RGB':
image = image.convert('RGB')
image.save(temp_filename, 'JPEG', quality=quality)
temp_image = Image.open(temp_filename)
ela_image = ImageChops.difference(image, temp_image)
extrema = ela_image.getextrema()
max_diff = max([ex[1] for ex in extrema])
max_diff = max_diff if max_diff != 0 else 1
scale = 255.0 / max_diff
ela_image = ImageEnhance.Brightness(ela_image).enhance(scale)
return ela_image
def prepare_image_for_forgery(image):
ela_image = convert_to_ela_image(image, 90).resize((128, 128))
return np.array(ela_image).flatten() / 255.0
# Advanced Analysis Functions
def detect_copy_move_forgery_advanced(image):
"""Advanced copy-move forgery detection using multiple techniques"""
# Convert to grayscale
gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
# Parameters
block_size = 16
overlap_threshold = 8
correlation_threshold = 0.85
min_distance = 32 # Minimum distance between blocks to avoid self-matching
h, w = gray.shape
matches = []
# Extract overlapping blocks with their descriptors
blocks = []
positions = []
for i in range(0, h - block_size, overlap_threshold):
for j in range(0, w - block_size, overlap_threshold):
block = gray[i:i+block_size, j:j+block_size]
# Multiple feature descriptors for better matching
# 1. Raw block data
block_flat = block.flatten()
# 2. DCT coefficients (frequency domain)
dct_block = cv2.dct(np.float32(block))
dct_flat = dct_block.flatten()
# 3. LBP (Local Binary Pattern) for texture
lbp = feature.local_binary_pattern(block, 8, 1, method='uniform')
lbp_hist, _ = np.histogram(lbp.ravel(), bins=10, range=(0, 10))
# Combine features
descriptor = np.concatenate([
block_flat / 255.0, # Normalized pixel values
dct_flat / np.max(np.abs(dct_flat)) if np.max(np.abs(dct_flat)) > 0 else dct_flat, # Normalized DCT
lbp_hist / np.sum(lbp_hist) if np.sum(lbp_hist) > 0 else lbp_hist # LBP histogram
])
blocks.append(descriptor)
positions.append((j + block_size//2, i + block_size//2))
# Advanced matching using multiple similarity metrics
for idx1, (block1, pos1) in enumerate(zip(blocks, positions)):
for idx2, (block2, pos2) in enumerate(zip(blocks[idx1+1:], positions[idx1+1:]), idx1+1):
# Skip if blocks are too close (likely same region)
distance = np.sqrt((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2)
if distance < min_distance:
continue
# Multiple similarity measures
# 1. Normalized Cross Correlation
correlation = np.corrcoef(block1[:block_size*block_size], block2[:block_size*block_size])[0, 1]
# 2. Structural Similarity (simplified)
ssim = 1 - np.mean((block1 - block2)**2) / (np.var(block1) + np.var(block2) + 1e-10)
# 3. Cosine similarity
cosine_sim = np.dot(block1, block2) / (np.linalg.norm(block1) * np.linalg.norm(block2) + 1e-10)
# Combined similarity score
combined_score = (correlation + ssim + cosine_sim) / 3
if combined_score > correlation_threshold and not np.isnan(combined_score):
matches.append((pos1, pos2, combined_score))
# Post-processing: Remove duplicate matches and cluster nearby matches
filtered_matches = []
for match in sorted(matches, key=lambda x: x[2], reverse=True):
pos1, pos2, score = match
# Check if this match is too similar to existing ones
is_duplicate = False
for existing_match in filtered_matches:
existing_pos1, existing_pos2, _ = existing_match
if (abs(pos1[0] - existing_pos1[0]) < 20 and abs(pos1[1] - existing_pos1[1]) < 20 and
abs(pos2[0] - existing_pos2[0]) < 20 and abs(pos2[1] - existing_pos2[1]) < 20):
is_duplicate = True
break
if not is_duplicate:
filtered_matches.append(match)
if len(filtered_matches) >= 20: # Limit to top 20 matches
break
return filtered_matches
def detect_splicing_regions(image):
"""Detect potential splicing/tampering regions using edge inconsistencies"""
# Convert to grayscale
gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
# Apply different edge detection methods
edges_canny = cv2.Canny(gray, 50, 150)
edges_sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 1, ksize=3)
edges_sobel = np.uint8(np.absolute(edges_sobel))
# Find inconsistent regions
diff = cv2.absdiff(edges_canny, edges_sobel)
# Threshold and find contours
_, thresh = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Filter contours by area
suspicious_regions = []
for contour in contours:
area = cv2.contourArea(contour)
if area > 100: # Minimum area threshold
x, y, w, h = cv2.boundingRect(contour)
suspicious_regions.append((x, y, w, h))
return suspicious_regions
def analyze_noise_patterns(image):
"""Analyze noise patterns to detect inconsistencies"""
# Convert to grayscale
gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
# Apply Gaussian blur and subtract to get noise
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
noise = cv2.absdiff(gray, blurred)
# Divide image into blocks and calculate noise variance
block_size = 32
h, w = gray.shape
noise_map = np.zeros((h//block_size, w//block_size))
for i in range(0, h - block_size, block_size):
for j in range(0, w - block_size, block_size):
block = noise[i:i+block_size, j:j+block_size]
noise_map[i//block_size, j//block_size] = np.var(block)
return noise_map, noise
# Individual Analysis Functions
def create_ela_analysis(image):
"""Create ELA analysis visualization"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.suptitle('Error Level Analysis (ELA)', fontsize=14, fontweight='bold')
# Original image
ax1.imshow(image)
ax1.set_title('Original Image')
ax1.axis('off')
# ELA image
ela_image = convert_to_ela_image(image, 90)
ax2.imshow(ela_image)
ax2.set_title('ELA Result (Bright areas indicate potential editing)')
ax2.axis('off')
plt.tight_layout()
buffer = io.BytesIO()
plt.savefig(buffer, format='png', dpi=150, bbox_inches='tight')
buffer.seek(0)
plt.close()
return buffer
def create_copy_move_analysis(image):
"""Create copy-move detection visualization"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.suptitle('Advanced Copy-Move Forgery Detection', fontsize=14, fontweight='bold')
# Original image
ax1.imshow(image)
ax1.set_title('Original Image')
ax1.axis('off')
# Copy-move detection
copy_move_matches = detect_copy_move_forgery_advanced(image)
ax2.imshow(image)
ax2.set_title(f'Copy-Move Detection ({len(copy_move_matches)} matches found)')
# Draw copy-move connections with confidence scores
colors = plt.cm.RdYlGn(np.linspace(0.3, 1, len(copy_move_matches)))
for i, (match, color) in enumerate(zip(copy_move_matches, colors)):
if len(match) == 3: # Advanced version with score
point1, point2, score = match
# Red dot for source
ax2.plot(point1[0], point1[1], 'o', color='red', markersize=5)
# Green dot for destination
ax2.plot(point2[0], point2[1], 'o', color='lime', markersize=5)
# Line with color based on confidence
ax2.plot([point1[0], point2[0]], [point1[1], point2[1]],
color=color, linewidth=2, alpha=0.8)
# Add confidence score as text
mid_x, mid_y = (point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2
ax2.text(mid_x, mid_y, f'{score:.2f}', fontsize=8,
bbox=dict(boxstyle="round,pad=0.1", facecolor='white', alpha=0.7))
else: # Simple version
point1, point2 = match
ax2.plot(point1[0], point1[1], 'ro', markersize=4)
ax2.plot(point2[0], point2[1], 'go', markersize=4)
ax2.plot([point1[0], point2[0]], [point1[1], point2[1]], 'g-', linewidth=1, alpha=0.7)
ax2.axis('off')
plt.tight_layout()
buffer = io.BytesIO()
plt.savefig(buffer, format='png', dpi=150, bbox_inches='tight')
buffer.seek(0)
plt.close()
return buffer
def create_tampering_analysis(image):
"""Create tampering/splicing detection visualization"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.suptitle('Tampering/Splicing Detection', fontsize=14, fontweight='bold')
# Original image
ax1.imshow(image)
ax1.set_title('Original Image')
ax1.axis('off')
# Tampering detection
suspicious_regions = detect_splicing_regions(image)
ax2.imshow(image)
ax2.set_title(f'Suspicious Regions ({len(suspicious_regions)} found)')
# Highlight suspicious regions with different colors
colors = ['red', 'orange', 'yellow', 'purple', 'pink']
for i, region in enumerate(suspicious_regions):
x, y, w, h = region
color = colors[i % len(colors)]
rect = patches.Rectangle((x, y), w, h, linewidth=2,
edgecolor=color, facecolor='none', alpha=0.8)
ax2.add_patch(rect)
# Add region number
ax2.text(x, y-5, f'R{i+1}', fontsize=10, color=color, fontweight='bold')
ax2.axis('off')
plt.tight_layout()
buffer = io.BytesIO()
plt.savefig(buffer, format='png', dpi=150, bbox_inches='tight')
buffer.seek(0)
plt.close()
return buffer
def create_noise_analysis(image):
"""Create noise pattern analysis visualization"""
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12, 10))
fig.suptitle('Noise Pattern Analysis', fontsize=14, fontweight='bold')
# Original image
ax1.imshow(image)
ax1.set_title('Original Image')
ax1.axis('off')
# Noise analysis
noise_map, noise = analyze_noise_patterns(image)
# Noise map
im1 = ax2.imshow(noise_map, cmap='hot', interpolation='nearest')
ax2.set_title('Noise Variance Map')
ax2.axis('off')
plt.colorbar(im1, ax=ax2, shrink=0.8)
# Raw noise
ax3.imshow(noise, cmap='gray')
ax3.set_title('Extracted Noise')
ax3.axis('off')
# Noise histogram
ax4.hist(noise.flatten(), bins=50, alpha=0.7, color='blue')
ax4.set_title('Noise Distribution')
ax4.set_xlabel('Noise Level')
ax4.set_ylabel('Frequency')
ax4.grid(True, alpha=0.3)
plt.tight_layout()
buffer = io.BytesIO()
plt.savefig(buffer, format='png', dpi=150, bbox_inches='tight')
buffer.seek(0)
plt.close()
return buffer
# Deepfake Image Detection
def predict_deepfake_image(image_path, model):
img = keras_image.load_img(image_path, target_size=(256, 256))
img_array = keras_image.img_to_array(img) / 255.0
img_array = np.expand_dims(img_array, axis=0)
prediction = model.predict(img_array)
return 'Real' if prediction[0] > 0.5 else 'Fake'
# Video Forgery Detection
# Configuration
target_height, target_width = 240, 320
threshold = 30 # Threshold for freeze/duplicate detection
def predict_video_forgery_cnn(video_path, model):
"""CNN-based video forgery detection"""
vid = []
sumframes = 0
cap = cv2.VideoCapture(video_path)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Resize frame to target dimensions
frame = cv2.resize(frame, (target_width, target_height))
sumframes += 1
vid.append(frame)
cap.release()
if sumframes == 0:
return False, 0, 0
Xtest = np.array(vid)
output = model.predict(Xtest)
output = output.reshape((-1))
# Check if any frame is predicted as forged
forged_frames = sum(1 for i in output if i > 0.5)
is_forged = any(i > 0.5 for i in output)
return is_forged, forged_frames, sumframes
def analyze_video_tampering(video_path):
"""Frame difference analysis for tampering detection"""
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return False, [], []
prev_frame = None
frame_differences = []
suspected_frames = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if prev_frame is not None:
diff = cv2.absdiff(gray, prev_frame)
non_zero = np.count_nonzero(diff)
frame_differences.append(non_zero)
if non_zero < threshold:
current_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
suspected_frames.append(current_frame)
prev_frame = gray
cap.release()
# Simple rule: if any frame is suspected, flag as tampered
is_tampered = len(suspected_frames) > 0
return is_tampered, frame_differences, suspected_frames
def plot_frame_analysis(frame_differences):
"""Create a simple plot of frame differences"""
plt.figure(figsize=(10, 4))
plt.plot(frame_differences, color='blue', linewidth=1)
plt.axhline(y=threshold, color='red', linestyle='--', label=f"Threshold ({threshold})")
plt.xlabel("Frame Number")
plt.ylabel("Pixel Differences")
plt.title("Frame Difference Analysis")
plt.legend()
plt.grid(True, alpha=0.3)
# Add statistics
if frame_differences:
mean_val = np.mean(frame_differences)
std_val = np.std(frame_differences)
plt.text(0.02, 0.98, f"Mean: {mean_val:.1f}\nStd: {std_val:.1f}",
transform=plt.gca().transAxes, verticalalignment='top',
bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
return plt
def combined_video_forgery_detection(video_path, model):
"""Combined detection using both CNN and frame analysis"""
# Method 1: CNN-based detection
cnn_forged, cnn_forged_frames, total_frames = predict_video_forgery_cnn(video_path, model)
# Method 2: Frame analysis tampering detection
frame_tampered, frame_differences, suspected_frames = analyze_video_tampering(video_path)
# Results
results = {
'cnn_forged': cnn_forged,
'cnn_forged_frames': cnn_forged_frames,
'frame_tampered': frame_tampered,
'suspected_frames': len(suspected_frames),
'total_frames': total_frames,
'frame_differences': frame_differences
}
# Simple decision logic
if cnn_forged and frame_tampered:
verdict = "FORGED - Detected by both CNN and Frame Analysis"
confidence = "High"
elif cnn_forged:
verdict = "FORGED - Detected by CNN"
confidence = "Medium"
elif frame_tampered:
verdict = "FORGED - Detected by Frame Analysis"
confidence = "Medium"
else:
verdict = "NOT TAMPERED - No Forgery detected"
confidence = "High"
return verdict, confidence, results
# Deepfake Video Detection
def build_feature_extractor():
feature_extractor = keras.applications.InceptionV3(
weights="imagenet",
include_top=False,
pooling="avg",
input_shape=(IMG_SIZE, IMG_SIZE, 3),
)
preprocess_input = keras.applications.inception_v3.preprocess_input
inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3))
preprocessed = preprocess_input(inputs)
outputs = feature_extractor(preprocessed)
return keras.Model(inputs, outputs, name="feature_extractor")
feature_extractor = build_feature_extractor()
detector = MTCNN()
def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE), skip_frames=2):
cap = cv.VideoCapture(path)
frames = []
frame_count = 0
previous_box = None
while True:
ret, frame = cap.read()
if not ret:
break
if frame_count % skip_frames == 0:
frame, previous_box = get_face_region_first_frame(frame, previous_box)
if frame is not None:
frame = cv.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]]
frames.append(frame)
if len(frames) == max_frames:
break
frame_count += 1
while len(frames) < max_frames and frames:
frames.append(frames[-1])
cap.release()
return np.array(frames)
def get_face_region_first_frame(frame, previous_box=None):
if previous_box is None:
detections = detector.detect_faces(frame)
if detections:
x, y, width, height = detections[0]['box']
previous_box = (x, y, width, height)
else:
return None, None
else:
x, y, width, height = previous_box
face_region = frame[y:y+height, x:x+width]
return face_region, previous_box
def prepare_single_video(frames):
frames = frames[None, ...]
frame_mask = np.zeros(shape=(1, MAX_SEQ_LENGTH,), dtype="bool")
frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32")
for i, batch in enumerate(frames):
video_length = batch.shape[0]
length = min(MAX_SEQ_LENGTH, video_length)
for j in range(length):
frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :])
frame_mask[i, :length] = 1
return frame_features, frame_mask
def sequence_prediction(video_path):
class_vocab = label_processor2.get_vocabulary()
frames = load_video(video_path)
if len(frames) == 0:
st.error("Could not process video. Please try another file.")
return None
frame_features, frame_mask = prepare_single_video(frames)
probabilities = deepfake_model.predict([frame_features, frame_mask])[0]
predictions = {class_vocab[i]: probabilities[i] * 100 for i in np.argsort(probabilities)[::-1]}
return predictions
# Streamlit App
st.title("Fraudulent Image and Video Detection System")
# Sidebar for model selection
task = st.sidebar.selectbox("Choose a detection task:", [
"Image Forgery Detection",
"Deepfake Image Detection",
"Video Forgery Detection",
"Deepfake Video Detection"
])
# Main Streamlit App
if task == "Image Forgery Detection":
uploaded_file = st.file_uploader("Upload an image", type=['jpg', 'jpeg', 'png'])
if uploaded_file:
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Image", use_container_width=True)
# Original prediction
prepared_image = prepare_image_for_forgery(image).reshape(-1, 128, 128, 3)
model = load_image_forgery_model()
prediction = model.predict(prepared_image)
confidence_real = prediction[0][1] * 100
confidence_fake = prediction[0][0] * 100
if confidence_real > confidence_fake:
st.success(f"Result: Real Image with {confidence_real:.2f}% confidence")
else:
st.error(f"Result: Forged Image with {confidence_fake:.2f}% confidence")
# Add analysis options
st.markdown("---")
st.subheader("πŸ” Detailed Forgery Analysis")
# Analysis type selection
analysis_type = st.selectbox(
"Choose Analysis Type:",
["Error Level Analysis (ELA)", "Copy-Move Detection", "Tampering Detection", "Noise Analysis"],
index=0
)
col1, col2 = st.columns([1, 3])
with col1:
analyze_button = st.button("Run Analysis", type="primary", use_container_width=True)
with col2:
st.markdown("### Analysis Guide:")
if analysis_type == "Error Level Analysis (ELA)":
st.info("**ELA**: Reveals compression artifacts. Bright areas indicate potential editing or manipulation.")
elif analysis_type == "Copy-Move Detection":
st.info("**Copy-Move**: Finds duplicated regions. Red dots show source, green dots show destination, lines show confidence.")
elif analysis_type == "Tampering Detection":
st.info("**Tampering**: Detects edge inconsistencies. Colored rectangles highlight suspicious regions.")
elif analysis_type == "Noise Analysis":
st.info("**Noise**: Analyzes noise patterns. Hot spots in variance map indicate inconsistent noise.")
if analyze_button:
with st.spinner(f"Running {analysis_type}..."):
try:
if analysis_type == "Error Level Analysis (ELA)":
analysis_buffer = create_ela_analysis(image)
filename = "ela_analysis.png"
elif analysis_type == "Copy-Move Detection":
analysis_buffer = create_copy_move_analysis(image)
filename = "copy_move_analysis.png"
elif analysis_type == "Tampering Detection":
analysis_buffer = create_tampering_analysis(image)
filename = "tampering_analysis.png"
elif analysis_type == "Noise Analysis":
analysis_buffer = create_noise_analysis(image)
filename = "noise_analysis.png"
st.image(analysis_buffer, caption=f"{analysis_type} Results", use_container_width=True)
# Detailed results based on analysis type
if analysis_type == "Copy-Move Detection":
matches = detect_copy_move_forgery_advanced(image)
if matches:
st.success(f"Found {len(matches)} potential copy-move regions")
with st.expander("Detailed Match Information"):
for i, match in enumerate(matches[:5]): # Show top 5
if len(match) == 3:
pos1, pos2, score = match
st.write(f"**Match {i+1}**: Confidence {score:.3f}")
st.write(f" Source: ({pos1[0]}, {pos1[1]}) β†’ Destination: ({pos2[0]}, {pos2[1]})")
else:
st.success("No copy-move forgery detected")
elif analysis_type == "Tampering Detection":
regions = detect_splicing_regions(image)
if regions:
st.warning(f"Found {len(regions)} suspicious regions")
with st.expander("Region Details"):
for i, (x, y, w, h) in enumerate(regions):
st.write(f"**Region {i+1}**: Position ({x}, {y}), Size {w}Γ—{h}")
else:
st.success("No suspicious tampering regions detected")
elif analysis_type == "Noise Analysis":
noise_map, _ = analyze_noise_patterns(image)
avg_noise = np.mean(noise_map)
std_noise = np.std(noise_map)
st.info(f"Average noise variance: {avg_noise:.3f}")
st.info(f"Noise variance std: {std_noise:.3f}")
if std_noise > avg_noise * 0.5:
st.warning("High noise variance detected - possible inconsistent editing")
else:
st.success("Noise patterns appear consistent")
# Download button
st.download_button(
label=f"Download {analysis_type} Results",
data=analysis_buffer.getvalue(),
file_name=filename,
mime="image/png",
use_container_width=True
)
except Exception as e:
st.error(f"Error during {analysis_type}: {str(e)}")
st.info("Some analysis methods may not work with all image types. Try with a different image if needed.")
elif task == "Deepfake Image Detection":
uploaded_file = st.file_uploader("Upload an image", type=['jpg', 'jpeg', 'png'])
if uploaded_file:
with open("temp_image.jpg", "wb") as f:
f.write(uploaded_file.getbuffer())
st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
model = load_deepfake_image_model()
result = predict_deepfake_image("temp_image.jpg", model)
if result == 'Real':
st.success("Prediction: Real")
else:
st.error("Prediction: Fake")
os.remove("temp_image.jpg")
if task == "Video Forgery Detection":
uploaded_file = st.file_uploader("Upload a video", type=['mp4', 'avi', 'mov', 'mkv'])
if uploaded_file:
# Save uploaded file
with open("temp_video.mp4", "wb") as f:
f.write(uploaded_file.getbuffer())
st.video("temp_video.mp4")
st.write("Analyzing the video for forgery...")
# Load model and run combined detection
model = load_video_forgery_model()
verdict, confidence, results = combined_video_forgery_detection("temp_video.mp4", model)
# Display results
if "FORGED" in verdict:
st.error(f"🚨 {verdict}")
else:
st.success(f"βœ… {verdict}")
st.write(f"**Confidence Level:** {confidence}")
# Show detailed results
col1, col2 = st.columns(2)
with col1:
st.write("**CNN Analysis:**")
if results['cnn_forged']:
st.write(f"- Status: Forged ❌")
st.write(f"- Forged Frames: {results['cnn_forged_frames']}/{results['total_frames']}")
else:
st.write(f"- Status: Not Forged βœ…")
with col2:
st.write("**Frame Analysis:**")
if results['frame_tampered']:
st.write(f"- Status: Tampered ❌")
st.write(f"- Suspected Frames: {results['suspected_frames']}")
else:
st.write(f"- Status: Not Tampered βœ…")
# Plot frame differences if available
if results['frame_differences']:
st.write("**Frame Difference Analysis:**")
fig = plot_frame_analysis(results['frame_differences'])
st.pyplot(fig)
plt.close()
# Cleanup
os.remove("temp_video.mp4")
elif task == "Deepfake Video Detection":
uploaded_file = st.file_uploader("Upload a video", type=["mp4", "avi", "mov"])
if uploaded_file is not None:
with open("temp_video.mp4", "wb") as f:
f.write(uploaded_file.read())
st.video("temp_video.mp4")
st.write("Analyzing the video...")
frames = load_video("temp_video.mp4")
if len(frames) == 0:
st.error("Could not process video. Please try another file.")
else:
frame_features, frame_mask = prepare_single_video(frames)
probabilities = deepfake_model.predict([frame_features, frame_mask])[0]
predictions = {label_processor2.get_vocabulary()[i]: probabilities[i] * 100 for i in np.argsort(probabilities)[::-1]}
if predictions:
highest_label = max(predictions, key=predictions.get)
highest_prob = predictions[highest_label]
if highest_label.lower() == "real":
st.success(f"The video is real with a confidence of {highest_prob:.2f}%.")
elif highest_label.lower() == "fake":
st.error(f"This video is a deepfake with a confidence of {highest_prob:.2f}%.")
else:
st.warning(f"Uncertain prediction: {highest_label} with {highest_prob:.2f}% confidence.")