Spaces:
Sleeping
Sleeping
Upload 11 files
Browse files- README.md +34 -13
- combined_visualization.py +221 -0
- compare_images.py +96 -0
- comparison_interface.py +410 -0
- deepfake_detector.py +235 -0
- demo_combined_visualization.py +109 -0
- image_processor.py +162 -0
- labeling.py +146 -0
- main.py +167 -0
- requirements.txt +20 -0
- streamlit_app.py +188 -0
README.md
CHANGED
|
@@ -1,13 +1,34 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deepfake Detection System with Heatmap Visualization
|
| 2 |
+
|
| 3 |
+
This project implements a deepfake detection system that processes images through multiple stages:
|
| 4 |
+
|
| 5 |
+
1. **Verification Module**: Uses Nvidia AI model for image comparison
|
| 6 |
+
- Calculates SMI (Structural Matching Index)
|
| 7 |
+
- Generates difference images
|
| 8 |
+
- Applies threshold visualization
|
| 9 |
+
- Adds bounding boxes around detected areas
|
| 10 |
+
|
| 11 |
+
2. **Labeling System**: Labels detected areas based on threat level
|
| 12 |
+
|
| 13 |
+
3. **Heatmap Visualization**: Creates heatmaps highlighting high-threat areas
|
| 14 |
+
|
| 15 |
+
## Project Structure
|
| 16 |
+
|
| 17 |
+
- `deepfake_detector.py`: Core detection functionality using Nvidia AI model
|
| 18 |
+
- `image_processor.py`: Image processing utilities
|
| 19 |
+
- `labeling.py`: Threat labeling system
|
| 20 |
+
- `heatmap_generator.py`: Heatmap visualization tools
|
| 21 |
+
- `main.py`: Main application entry point
|
| 22 |
+
- `requirements.txt`: Project dependencies
|
| 23 |
+
|
| 24 |
+
## Setup and Installation
|
| 25 |
+
|
| 26 |
+
```bash
|
| 27 |
+
pip install -r requirements.txt
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
## Usage
|
| 31 |
+
|
| 32 |
+
```bash
|
| 33 |
+
python main.py --input_dir /path/to/images --output_dir /path/to/output
|
| 34 |
+
```
|
combined_visualization.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from image_processor import ImageProcessor
|
| 5 |
+
from heatmap_generator import HeatmapGenerator
|
| 6 |
+
|
| 7 |
+
class CombinedVisualizer:
|
| 8 |
+
def __init__(self):
|
| 9 |
+
"""
|
| 10 |
+
Initialize the combined visualizer for creating overlaid threat visualizations
|
| 11 |
+
"""
|
| 12 |
+
self.image_processor = ImageProcessor()
|
| 13 |
+
self.heatmap_generator = HeatmapGenerator()
|
| 14 |
+
|
| 15 |
+
def create_combined_visualization(self, image_pair_results, output_path, alpha_diff=0.4, alpha_low=0.3, alpha_medium=0.4, dpi=300):
|
| 16 |
+
"""
|
| 17 |
+
Create a combined visualization that overlays difference image, low and medium threat heatmaps,
|
| 18 |
+
and bounding boxes on top of each other.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
image_pair_results: Dictionary containing all processing results
|
| 22 |
+
output_path: Path to save the visualization
|
| 23 |
+
alpha_diff: Transparency for difference image overlay
|
| 24 |
+
alpha_low: Transparency for low threat heatmap overlay
|
| 25 |
+
alpha_medium: Transparency for medium threat heatmap overlay
|
| 26 |
+
dpi: Resolution for saved image
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
Path to the generated visualization
|
| 30 |
+
"""
|
| 31 |
+
# Extract required components from results
|
| 32 |
+
original_image = image_pair_results['original_image']
|
| 33 |
+
difference_image = image_pair_results['difference_image']
|
| 34 |
+
bounding_boxes = image_pair_results['bounding_boxes']
|
| 35 |
+
multi_heatmaps = image_pair_results.get('multi_heatmaps', {})
|
| 36 |
+
labeled_regions = [r for r in image_pair_results.get('labeled_regions', [])
|
| 37 |
+
if 'bbox' in r and 'threat_level' in r]
|
| 38 |
+
|
| 39 |
+
# If labeled_regions not provided, extract from bounding boxes
|
| 40 |
+
if not labeled_regions and 'threat_summary' in image_pair_results:
|
| 41 |
+
# Create simplified labeled regions from bounding boxes
|
| 42 |
+
for bbox in bounding_boxes:
|
| 43 |
+
# Default to medium threat if specific threat level not available
|
| 44 |
+
labeled_regions.append({
|
| 45 |
+
'bbox': bbox,
|
| 46 |
+
'threat_level': 'medium',
|
| 47 |
+
'difference_percentage': 50 # Default value
|
| 48 |
+
})
|
| 49 |
+
|
| 50 |
+
# Start with a copy of the original image
|
| 51 |
+
combined_image = original_image.copy()
|
| 52 |
+
|
| 53 |
+
# 1. Overlay the difference image with transparency
|
| 54 |
+
# Convert difference image to RGB if it's grayscale
|
| 55 |
+
if len(difference_image.shape) == 2 or difference_image.shape[2] == 1:
|
| 56 |
+
diff_colored = cv2.applyColorMap(difference_image, cv2.COLORMAP_HOT)
|
| 57 |
+
diff_colored = cv2.cvtColor(diff_colored, cv2.COLOR_BGR2RGB)
|
| 58 |
+
else:
|
| 59 |
+
diff_colored = difference_image
|
| 60 |
+
|
| 61 |
+
# Overlay difference image
|
| 62 |
+
combined_image = self.image_processor.overlay_images(combined_image, diff_colored, alpha_diff)
|
| 63 |
+
|
| 64 |
+
# 2. Overlay low threat heatmap if available
|
| 65 |
+
if 'low' in multi_heatmaps:
|
| 66 |
+
low_heatmap = multi_heatmaps['low']
|
| 67 |
+
combined_image = self.image_processor.overlay_images(combined_image, low_heatmap, alpha_low)
|
| 68 |
+
|
| 69 |
+
# 3. Overlay medium threat heatmap if available
|
| 70 |
+
if 'medium' in multi_heatmaps:
|
| 71 |
+
medium_heatmap = multi_heatmaps['medium']
|
| 72 |
+
combined_image = self.image_processor.overlay_images(combined_image, medium_heatmap, alpha_medium)
|
| 73 |
+
|
| 74 |
+
# 4. Draw bounding boxes with threat level colors
|
| 75 |
+
threat_colors = {
|
| 76 |
+
'low': (0, 255, 0), # Green
|
| 77 |
+
'medium': (0, 165, 255), # Orange
|
| 78 |
+
'high': (0, 0, 255) # Red
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
# Draw bounding boxes based on threat levels
|
| 82 |
+
for region in labeled_regions:
|
| 83 |
+
bbox = region['bbox']
|
| 84 |
+
threat_level = region['threat_level']
|
| 85 |
+
x, y, w, h = bbox
|
| 86 |
+
|
| 87 |
+
# Get color for this threat level (default to red if not found)
|
| 88 |
+
color = threat_colors.get(threat_level, (0, 0, 255))
|
| 89 |
+
|
| 90 |
+
# Convert BGR to RGB for matplotlib
|
| 91 |
+
color_rgb = (color[2]/255, color[1]/255, color[0]/255)
|
| 92 |
+
|
| 93 |
+
# Draw rectangle with threat level color
|
| 94 |
+
cv2.rectangle(combined_image, (x, y), (x + w, y + h), color, 2)
|
| 95 |
+
|
| 96 |
+
# Add label text with threat level
|
| 97 |
+
if 'difference_percentage' in region:
|
| 98 |
+
label_text = f"{threat_level.upper()}: {region['difference_percentage']:.1f}%"
|
| 99 |
+
else:
|
| 100 |
+
label_text = f"{threat_level.upper()}"
|
| 101 |
+
|
| 102 |
+
cv2.putText(combined_image, label_text, (x, y - 10),
|
| 103 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
| 104 |
+
|
| 105 |
+
# Save the combined visualization
|
| 106 |
+
plt.figure(figsize=(12, 8))
|
| 107 |
+
plt.imshow(combined_image)
|
| 108 |
+
plt.title('Combined Threat Visualization')
|
| 109 |
+
plt.axis('off')
|
| 110 |
+
plt.tight_layout()
|
| 111 |
+
plt.savefig(output_path, dpi=dpi, bbox_inches='tight')
|
| 112 |
+
plt.close()
|
| 113 |
+
|
| 114 |
+
# Also save the raw image for potential further processing
|
| 115 |
+
raw_output_path = output_path.replace('.png', '_raw.png')
|
| 116 |
+
self.image_processor.save_image(combined_image, raw_output_path)
|
| 117 |
+
|
| 118 |
+
return output_path
|
| 119 |
+
|
| 120 |
+
def create_combined_visualization_from_files(self, original_path, difference_path,
|
| 121 |
+
low_heatmap_path, medium_heatmap_path,
|
| 122 |
+
bounding_boxes, output_path,
|
| 123 |
+
alpha_diff=0.4, alpha_low=0.3, alpha_medium=0.4):
|
| 124 |
+
"""
|
| 125 |
+
Create a combined visualization from individual image files
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
original_path: Path to original image
|
| 129 |
+
difference_path: Path to difference image
|
| 130 |
+
low_heatmap_path: Path to low threat heatmap
|
| 131 |
+
medium_heatmap_path: Path to medium threat heatmap
|
| 132 |
+
bounding_boxes: List of bounding boxes (x, y, w, h)
|
| 133 |
+
output_path: Path to save the visualization
|
| 134 |
+
alpha_diff: Transparency for difference image overlay
|
| 135 |
+
alpha_low: Transparency for low threat heatmap overlay
|
| 136 |
+
alpha_medium: Transparency for medium threat heatmap overlay
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
Path to the generated visualization
|
| 140 |
+
"""
|
| 141 |
+
# Load images
|
| 142 |
+
original_image = self.image_processor.load_image(original_path)
|
| 143 |
+
difference_image = self.image_processor.load_image(difference_path)
|
| 144 |
+
|
| 145 |
+
# Load heatmaps if paths are provided
|
| 146 |
+
low_heatmap = None
|
| 147 |
+
medium_heatmap = None
|
| 148 |
+
|
| 149 |
+
if low_heatmap_path:
|
| 150 |
+
low_heatmap = self.image_processor.load_image(low_heatmap_path)
|
| 151 |
+
|
| 152 |
+
if medium_heatmap_path:
|
| 153 |
+
medium_heatmap = self.image_processor.load_image(medium_heatmap_path)
|
| 154 |
+
|
| 155 |
+
# Create a mock image_pair_results dictionary
|
| 156 |
+
image_pair_results = {
|
| 157 |
+
'original_image': original_image,
|
| 158 |
+
'difference_image': difference_image,
|
| 159 |
+
'bounding_boxes': bounding_boxes,
|
| 160 |
+
'multi_heatmaps': {}
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
if low_heatmap is not None:
|
| 164 |
+
image_pair_results['multi_heatmaps']['low'] = low_heatmap
|
| 165 |
+
|
| 166 |
+
if medium_heatmap is not None:
|
| 167 |
+
image_pair_results['multi_heatmaps']['medium'] = medium_heatmap
|
| 168 |
+
|
| 169 |
+
# Call the main visualization method
|
| 170 |
+
return self.create_combined_visualization(
|
| 171 |
+
image_pair_results, output_path, alpha_diff, alpha_low, alpha_medium
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
# Example usage
|
| 175 |
+
if __name__ == "__main__":
|
| 176 |
+
import os
|
| 177 |
+
from deepfake_detector import DeepfakeDetector
|
| 178 |
+
from labeling import ThreatLabeler
|
| 179 |
+
|
| 180 |
+
# Initialize components
|
| 181 |
+
detector = DeepfakeDetector()
|
| 182 |
+
labeler = ThreatLabeler()
|
| 183 |
+
heatmap_gen = HeatmapGenerator()
|
| 184 |
+
img_processor = ImageProcessor()
|
| 185 |
+
visualizer = CombinedVisualizer()
|
| 186 |
+
|
| 187 |
+
# Example paths
|
| 188 |
+
image1_path = "path/to/original.jpg"
|
| 189 |
+
image2_path = "path/to/modified.jpg"
|
| 190 |
+
output_dir = "path/to/output"
|
| 191 |
+
|
| 192 |
+
# Ensure output directory exists
|
| 193 |
+
if not os.path.exists(output_dir):
|
| 194 |
+
os.makedirs(output_dir)
|
| 195 |
+
|
| 196 |
+
# Process images
|
| 197 |
+
results = detector.process_image_pair(image1_path, image2_path)
|
| 198 |
+
|
| 199 |
+
# Label regions
|
| 200 |
+
original_image = img_processor.load_image(image1_path)
|
| 201 |
+
labeled_image, labeled_regions = labeler.label_regions(
|
| 202 |
+
original_image, results['difference_image'], results['bounding_boxes']
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
# Generate multi-level heatmaps
|
| 206 |
+
multi_heatmaps = heatmap_gen.generate_multi_level_heatmap(original_image, labeled_regions)
|
| 207 |
+
|
| 208 |
+
# Prepare results for combined visualization
|
| 209 |
+
image_pair_results = {
|
| 210 |
+
'original_image': original_image,
|
| 211 |
+
'difference_image': results['difference_image'],
|
| 212 |
+
'bounding_boxes': results['bounding_boxes'],
|
| 213 |
+
'multi_heatmaps': multi_heatmaps,
|
| 214 |
+
'labeled_regions': labeled_regions
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
# Create combined visualization
|
| 218 |
+
output_path = os.path.join(output_dir, "combined_visualization.png")
|
| 219 |
+
visualizer.create_combined_visualization(image_pair_results, output_path)
|
| 220 |
+
|
| 221 |
+
print(f"Combined visualization saved to: {output_path}")
|
compare_images.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
from deepfake_detector import DeepfakeDetector
|
| 4 |
+
from image_processor import ImageProcessor
|
| 5 |
+
from labeling import ThreatLabeler
|
| 6 |
+
from heatmap_generator import HeatmapGenerator
|
| 7 |
+
from comparison_interface import ComparisonInterface
|
| 8 |
+
|
| 9 |
+
def ensure_dir(directory):
|
| 10 |
+
if not os.path.exists(directory):
|
| 11 |
+
os.makedirs(directory)
|
| 12 |
+
|
| 13 |
+
def compare_images(image1_path, image2_path, output_dir, threshold=30, min_area=100):
|
| 14 |
+
# Initialize components
|
| 15 |
+
detector = DeepfakeDetector()
|
| 16 |
+
labeler = ThreatLabeler()
|
| 17 |
+
heatmap_gen = HeatmapGenerator()
|
| 18 |
+
img_processor = ImageProcessor()
|
| 19 |
+
comparison = ComparisonInterface()
|
| 20 |
+
|
| 21 |
+
# Create output directory
|
| 22 |
+
ensure_dir(output_dir)
|
| 23 |
+
|
| 24 |
+
# Get base filename for outputs
|
| 25 |
+
base_name = os.path.splitext(os.path.basename(image1_path))[0]
|
| 26 |
+
|
| 27 |
+
print(f"Processing images: {image1_path} and {image2_path}")
|
| 28 |
+
|
| 29 |
+
# Step 1: Verification Module - Process the image pair
|
| 30 |
+
results = detector.process_image_pair(image1_path, image2_path, threshold, min_area)
|
| 31 |
+
|
| 32 |
+
# Step 2: Labeling System - Label detected regions by threat level
|
| 33 |
+
original_image = img_processor.load_image(image1_path)
|
| 34 |
+
modified_image = img_processor.load_image(image2_path)
|
| 35 |
+
labeled_image, labeled_regions = labeler.label_regions(
|
| 36 |
+
original_image, results['difference_image'], results['bounding_boxes'])
|
| 37 |
+
|
| 38 |
+
# Get threat summary
|
| 39 |
+
threat_summary = labeler.get_threat_summary(labeled_regions)
|
| 40 |
+
|
| 41 |
+
# Step 3: Heatmap Visualization - Generate heatmaps for threat visualization
|
| 42 |
+
# Generate standard heatmap
|
| 43 |
+
heatmap = heatmap_gen.generate_threat_heatmap(original_image, labeled_regions)
|
| 44 |
+
|
| 45 |
+
# Generate multi-level heatmaps
|
| 46 |
+
multi_heatmaps = heatmap_gen.generate_multi_level_heatmap(original_image, labeled_regions)
|
| 47 |
+
|
| 48 |
+
# Prepare results for comparison interface
|
| 49 |
+
image_pair_results = {
|
| 50 |
+
'original_image': original_image,
|
| 51 |
+
'modified_image': modified_image,
|
| 52 |
+
'difference_image': results['difference_image'],
|
| 53 |
+
'threshold_image': results['threshold_image'],
|
| 54 |
+
'annotated_image': results['annotated_image'],
|
| 55 |
+
'labeled_image': labeled_image,
|
| 56 |
+
'heatmap_overlay': heatmap,
|
| 57 |
+
'multi_heatmaps': multi_heatmaps,
|
| 58 |
+
'threat_summary': threat_summary,
|
| 59 |
+
'smi_score': results['smi_score'],
|
| 60 |
+
'bounding_boxes': results['bounding_boxes']
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
# Create comprehensive visualization
|
| 64 |
+
output_path = os.path.join(output_dir, f"{base_name}_comparison.png")
|
| 65 |
+
comparison.create_comparison_grid(image_pair_results, output_path)
|
| 66 |
+
|
| 67 |
+
# Print summary information
|
| 68 |
+
print(f"\nAnalysis Results:")
|
| 69 |
+
print(f"SMI Score: {results['smi_score']:.4f} (1.0 = identical, 0.0 = completely different)")
|
| 70 |
+
print(f"Total regions detected: {threat_summary['total_regions']}")
|
| 71 |
+
print(f"Threat counts: Low={threat_summary['threat_counts']['low']}, "
|
| 72 |
+
f"Medium={threat_summary['threat_counts']['medium']}, "
|
| 73 |
+
f"High={threat_summary['threat_counts']['high']}")
|
| 74 |
+
if threat_summary['max_threat']:
|
| 75 |
+
print(f"Maximum threat: {threat_summary['max_threat']['level'].upper()} "
|
| 76 |
+
f"({threat_summary['max_threat']['percentage']:.1f}%)")
|
| 77 |
+
print(f"Average difference: {threat_summary['average_difference']:.1f}%")
|
| 78 |
+
|
| 79 |
+
print(f"\nComparison visualization saved to: {output_path}")
|
| 80 |
+
return output_path
|
| 81 |
+
|
| 82 |
+
def main():
|
| 83 |
+
if len(sys.argv) < 3:
|
| 84 |
+
print("Usage: python compare_images.py <image1_path> <image2_path> [output_dir]")
|
| 85 |
+
sys.exit(1)
|
| 86 |
+
|
| 87 |
+
image1_path = sys.argv[1]
|
| 88 |
+
image2_path = sys.argv[2]
|
| 89 |
+
|
| 90 |
+
# Use default output directory if not specified
|
| 91 |
+
output_dir = sys.argv[3] if len(sys.argv) > 3 else "./comparison_output"
|
| 92 |
+
|
| 93 |
+
compare_images(image1_path, image2_path, output_dir)
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
main()
|
comparison_interface.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
from matplotlib.gridspec import GridSpec
|
| 6 |
+
from deepfake_detector import DeepfakeDetector
|
| 7 |
+
from image_processor import ImageProcessor
|
| 8 |
+
from labeling import ThreatLabeler
|
| 9 |
+
from heatmap_generator import HeatmapGenerator
|
| 10 |
+
|
| 11 |
+
class ComparisonInterface:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
"""
|
| 14 |
+
Initialize the comparison interface for visualizing all processing stages
|
| 15 |
+
"""
|
| 16 |
+
self.img_processor = ImageProcessor()
|
| 17 |
+
|
| 18 |
+
def create_comparison_grid(self, image_pair_results, output_path, figsize=(18, 12), dpi=300):
|
| 19 |
+
"""
|
| 20 |
+
Create a comprehensive grid visualization of all processing stages
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
image_pair_results: Dictionary containing all processing results
|
| 24 |
+
output_path: Path to save the visualization
|
| 25 |
+
figsize: Figure size (width, height) in inches
|
| 26 |
+
dpi: Resolution for saved image
|
| 27 |
+
"""
|
| 28 |
+
# Extract images from results
|
| 29 |
+
original_image = image_pair_results['original_image']
|
| 30 |
+
modified_image = image_pair_results['modified_image']
|
| 31 |
+
difference_image = image_pair_results['difference_image']
|
| 32 |
+
threshold_image = image_pair_results['threshold_image']
|
| 33 |
+
annotated_image = image_pair_results['annotated_image']
|
| 34 |
+
labeled_image = image_pair_results['labeled_image']
|
| 35 |
+
heatmap_overlay = image_pair_results['heatmap_overlay']
|
| 36 |
+
|
| 37 |
+
# Extract multi-level heatmaps if available
|
| 38 |
+
multi_heatmaps = image_pair_results.get('multi_heatmaps', {})
|
| 39 |
+
|
| 40 |
+
# Create figure with grid layout
|
| 41 |
+
fig = plt.figure(figsize=figsize)
|
| 42 |
+
gs = GridSpec(3, 4, figure=fig)
|
| 43 |
+
|
| 44 |
+
# Row 1: Original images and difference
|
| 45 |
+
ax1 = fig.add_subplot(gs[0, 0])
|
| 46 |
+
ax1.imshow(original_image)
|
| 47 |
+
ax1.set_title('Original Image')
|
| 48 |
+
ax1.axis('off')
|
| 49 |
+
|
| 50 |
+
ax2 = fig.add_subplot(gs[0, 1])
|
| 51 |
+
ax2.imshow(modified_image)
|
| 52 |
+
ax2.set_title('Modified Image')
|
| 53 |
+
ax2.axis('off')
|
| 54 |
+
|
| 55 |
+
ax3 = fig.add_subplot(gs[0, 2])
|
| 56 |
+
ax3.imshow(difference_image, cmap='gray')
|
| 57 |
+
ax3.set_title('Difference Image')
|
| 58 |
+
ax3.axis('off')
|
| 59 |
+
|
| 60 |
+
ax4 = fig.add_subplot(gs[0, 3])
|
| 61 |
+
ax4.imshow(threshold_image, cmap='gray')
|
| 62 |
+
ax4.set_title('Thresholded Difference')
|
| 63 |
+
ax4.axis('off')
|
| 64 |
+
|
| 65 |
+
# Row 2: Annotated, labeled, and heatmap
|
| 66 |
+
ax5 = fig.add_subplot(gs[1, 0:2])
|
| 67 |
+
ax5.imshow(annotated_image)
|
| 68 |
+
ax5.set_title('Detected Regions')
|
| 69 |
+
ax5.axis('off')
|
| 70 |
+
|
| 71 |
+
ax6 = fig.add_subplot(gs[1, 2:4])
|
| 72 |
+
ax6.imshow(labeled_image)
|
| 73 |
+
ax6.set_title('Threat Labeled Regions')
|
| 74 |
+
ax6.axis('off')
|
| 75 |
+
|
| 76 |
+
# Row 3: Multi-level heatmaps
|
| 77 |
+
if 'low' in multi_heatmaps and 'medium' in multi_heatmaps and 'high' in multi_heatmaps:
|
| 78 |
+
ax7 = fig.add_subplot(gs[2, 0])
|
| 79 |
+
ax7.imshow(multi_heatmaps['low'])
|
| 80 |
+
ax7.set_title('Low Threat Heatmap')
|
| 81 |
+
ax7.axis('off')
|
| 82 |
+
|
| 83 |
+
ax8 = fig.add_subplot(gs[2, 1])
|
| 84 |
+
ax8.imshow(multi_heatmaps['medium'])
|
| 85 |
+
ax8.set_title('Medium Threat Heatmap')
|
| 86 |
+
ax8.axis('off')
|
| 87 |
+
|
| 88 |
+
ax9 = fig.add_subplot(gs[2, 2])
|
| 89 |
+
ax9.imshow(multi_heatmaps['high'])
|
| 90 |
+
ax9.set_title('High Threat Heatmap')
|
| 91 |
+
ax9.axis('off')
|
| 92 |
+
else:
|
| 93 |
+
# If multi-level heatmaps not available, show combined heatmap in larger space
|
| 94 |
+
ax7 = fig.add_subplot(gs[2, 0:3])
|
| 95 |
+
ax7.imshow(heatmap_overlay)
|
| 96 |
+
ax7.set_title('Combined Threat Heatmap')
|
| 97 |
+
ax7.axis('off')
|
| 98 |
+
|
| 99 |
+
# Add threat summary in text box
|
| 100 |
+
ax10 = fig.add_subplot(gs[2, 3])
|
| 101 |
+
ax10.axis('off')
|
| 102 |
+
summary_text = self._format_summary_text(image_pair_results['threat_summary'], image_pair_results['smi_score'])
|
| 103 |
+
ax10.text(0, 0.5, summary_text, fontsize=10, va='center', ha='left', wrap=True)
|
| 104 |
+
ax10.set_title('Threat Summary')
|
| 105 |
+
|
| 106 |
+
# Add overall title
|
| 107 |
+
plt.suptitle(f"Deepfake Detection Analysis", fontsize=16)
|
| 108 |
+
|
| 109 |
+
# Adjust layout and save
|
| 110 |
+
plt.tight_layout(rect=[0, 0, 1, 0.97])
|
| 111 |
+
plt.savefig(output_path, dpi=dpi, bbox_inches='tight')
|
| 112 |
+
plt.close()
|
| 113 |
+
|
| 114 |
+
return output_path
|
| 115 |
+
|
| 116 |
+
def _format_summary_text(self, threat_summary, smi_score):
|
| 117 |
+
"""
|
| 118 |
+
Format threat summary as text for display
|
| 119 |
+
"""
|
| 120 |
+
text = f"SMI Score: {smi_score:.4f}\n"
|
| 121 |
+
text += f"(1.0 = identical, 0.0 = different)\n\n"
|
| 122 |
+
text += f"Total regions: {threat_summary['total_regions']}\n\n"
|
| 123 |
+
text += f"Threat counts:\n"
|
| 124 |
+
text += f" Low: {threat_summary['threat_counts']['low']}\n"
|
| 125 |
+
text += f" Medium: {threat_summary['threat_counts']['medium']}\n"
|
| 126 |
+
text += f" High: {threat_summary['threat_counts']['high']}\n\n"
|
| 127 |
+
|
| 128 |
+
if threat_summary['max_threat']:
|
| 129 |
+
text += f"Maximum threat: {threat_summary['max_threat']['level'].upper()}\n"
|
| 130 |
+
text += f" ({threat_summary['max_threat']['percentage']:.1f}%)\n\n"
|
| 131 |
+
|
| 132 |
+
text += f"Average difference: {threat_summary['average_difference']:.1f}%"
|
| 133 |
+
|
| 134 |
+
return text
|
| 135 |
+
|
| 136 |
+
def create_interactive_comparison(self, image_pair_results, output_path):
|
| 137 |
+
"""
|
| 138 |
+
Create an HTML file with interactive comparison of all processing stages
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
image_pair_results: Dictionary containing all processing results
|
| 142 |
+
output_path: Path to save the HTML file
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
Path to the generated HTML file
|
| 146 |
+
"""
|
| 147 |
+
# Create output directory for individual images
|
| 148 |
+
output_dir = os.path.dirname(output_path)
|
| 149 |
+
images_dir = os.path.join(output_dir, 'images')
|
| 150 |
+
if not os.path.exists(images_dir):
|
| 151 |
+
os.makedirs(images_dir)
|
| 152 |
+
|
| 153 |
+
# Get base filename for outputs
|
| 154 |
+
base_name = os.path.basename(output_path).split('.')[0]
|
| 155 |
+
|
| 156 |
+
# Save individual images for HTML display
|
| 157 |
+
image_paths = {}
|
| 158 |
+
|
| 159 |
+
# Save original and modified images
|
| 160 |
+
original_path = os.path.join(images_dir, f"{base_name}_original.png")
|
| 161 |
+
modified_path = os.path.join(images_dir, f"{base_name}_modified.png")
|
| 162 |
+
self.img_processor.save_image(image_pair_results['original_image'], original_path)
|
| 163 |
+
self.img_processor.save_image(image_pair_results['modified_image'], modified_path)
|
| 164 |
+
image_paths['original_image_path'] = os.path.relpath(original_path, output_dir)
|
| 165 |
+
image_paths['modified_image_path'] = os.path.relpath(modified_path, output_dir)
|
| 166 |
+
|
| 167 |
+
# Save difference and threshold images
|
| 168 |
+
difference_path = os.path.join(images_dir, f"{base_name}_difference.png")
|
| 169 |
+
threshold_path = os.path.join(images_dir, f"{base_name}_threshold.png")
|
| 170 |
+
self.img_processor.save_image(image_pair_results['difference_image'], difference_path)
|
| 171 |
+
self.img_processor.save_image(image_pair_results['threshold_image'], threshold_path)
|
| 172 |
+
image_paths['difference_image_path'] = os.path.relpath(difference_path, output_dir)
|
| 173 |
+
image_paths['threshold_image_path'] = os.path.relpath(threshold_path, output_dir)
|
| 174 |
+
|
| 175 |
+
# Save annotated and labeled images
|
| 176 |
+
annotated_path = os.path.join(images_dir, f"{base_name}_annotated.png")
|
| 177 |
+
labeled_path = os.path.join(images_dir, f"{base_name}_labeled.png")
|
| 178 |
+
self.img_processor.save_image(image_pair_results['annotated_image'], annotated_path)
|
| 179 |
+
self.img_processor.save_image(image_pair_results['labeled_image'], labeled_path)
|
| 180 |
+
image_paths['annotated_image_path'] = os.path.relpath(annotated_path, output_dir)
|
| 181 |
+
image_paths['labeled_image_path'] = os.path.relpath(labeled_path, output_dir)
|
| 182 |
+
|
| 183 |
+
# Save heatmap overlay
|
| 184 |
+
heatmap_path = os.path.join(images_dir, f"{base_name}_heatmap.png")
|
| 185 |
+
self.img_processor.save_image(image_pair_results['heatmap_overlay'], heatmap_path)
|
| 186 |
+
image_paths['heatmap_overlay_path'] = os.path.relpath(heatmap_path, output_dir)
|
| 187 |
+
|
| 188 |
+
# Save multi-level heatmaps if available
|
| 189 |
+
multi_heatmaps = image_pair_results.get('multi_heatmaps', {})
|
| 190 |
+
if 'low' in multi_heatmaps and 'medium' in multi_heatmaps and 'high' in multi_heatmaps:
|
| 191 |
+
low_path = os.path.join(images_dir, f"{base_name}_heatmap_low.png")
|
| 192 |
+
medium_path = os.path.join(images_dir, f"{base_name}_heatmap_medium.png")
|
| 193 |
+
high_path = os.path.join(images_dir, f"{base_name}_heatmap_high.png")
|
| 194 |
+
|
| 195 |
+
self.img_processor.save_image(multi_heatmaps['low'], low_path)
|
| 196 |
+
self.img_processor.save_image(multi_heatmaps['medium'], medium_path)
|
| 197 |
+
self.img_processor.save_image(multi_heatmaps['high'], high_path)
|
| 198 |
+
|
| 199 |
+
image_paths['low_heatmap_path'] = os.path.relpath(low_path, output_dir)
|
| 200 |
+
image_paths['medium_heatmap_path'] = os.path.relpath(medium_path, output_dir)
|
| 201 |
+
image_paths['high_heatmap_path'] = os.path.relpath(high_path, output_dir)
|
| 202 |
+
|
| 203 |
+
# Format threat summary for HTML display
|
| 204 |
+
threat_summary_text = self._format_summary_text(
|
| 205 |
+
image_pair_results['threat_summary'],
|
| 206 |
+
image_pair_results['smi_score']
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
# Read HTML template
|
| 210 |
+
template_path = os.path.join(os.path.dirname(__file__), 'templates', 'interactive_comparison.html')
|
| 211 |
+
with open(template_path, 'r') as f:
|
| 212 |
+
html_template = f.read()
|
| 213 |
+
|
| 214 |
+
# Replace placeholders with actual values
|
| 215 |
+
for key, value in image_paths.items():
|
| 216 |
+
html_template = html_template.replace(f"{{{{{key}}}}}", value)
|
| 217 |
+
|
| 218 |
+
# Replace threat summary
|
| 219 |
+
html_template = html_template.replace("{{threat_summary}}", threat_summary_text)
|
| 220 |
+
|
| 221 |
+
# Write HTML file
|
| 222 |
+
with open(output_path, 'w') as f:
|
| 223 |
+
f.write(html_template)
|
| 224 |
+
|
| 225 |
+
print(f"Interactive comparison saved to: {output_path}")
|
| 226 |
+
return output_path
|
| 227 |
+
|
| 228 |
+
def process_and_visualize(self, image1_path, image2_path, output_dir, model_path=None, threshold=30, min_area=100):
|
| 229 |
+
"""
|
| 230 |
+
Process an image pair and create comprehensive visualization
|
| 231 |
+
|
| 232 |
+
Args:
|
| 233 |
+
image1_path: Path to first image
|
| 234 |
+
image2_path: Path to second image
|
| 235 |
+
output_dir: Directory to save outputs
|
| 236 |
+
model_path: Path to AI model (optional)
|
| 237 |
+
threshold: Threshold for difference detection
|
| 238 |
+
min_area: Minimum area for region detection
|
| 239 |
+
|
| 240 |
+
Returns:
|
| 241 |
+
Path to the generated comparison visualization
|
| 242 |
+
"""
|
| 243 |
+
# Initialize components
|
| 244 |
+
detector = DeepfakeDetector(model_path)
|
| 245 |
+
labeler = ThreatLabeler()
|
| 246 |
+
heatmap_gen = HeatmapGenerator()
|
| 247 |
+
|
| 248 |
+
# Create output directory
|
| 249 |
+
if not os.path.exists(output_dir):
|
| 250 |
+
os.makedirs(output_dir)
|
| 251 |
+
|
| 252 |
+
# Get base filename for outputs
|
| 253 |
+
base_name = os.path.splitext(os.path.basename(image1_path))[0]
|
| 254 |
+
|
| 255 |
+
# Step 1: Verification Module - Process the image pair
|
| 256 |
+
print(f"Processing images: {image1_path} and {image2_path}")
|
| 257 |
+
detection_results = detector.process_image_pair(image1_path, image2_path, threshold, min_area)
|
| 258 |
+
|
| 259 |
+
# Step 2: Labeling System - Label detected regions by threat level
|
| 260 |
+
original_image = self.img_processor.load_image(image1_path)
|
| 261 |
+
modified_image = self.img_processor.load_image(image2_path)
|
| 262 |
+
labeled_image, labeled_regions = labeler.label_regions(
|
| 263 |
+
original_image, detection_results['difference_image'], detection_results['bounding_boxes'])
|
| 264 |
+
|
| 265 |
+
# Get threat summary
|
| 266 |
+
threat_summary = labeler.get_threat_summary(labeled_regions)
|
| 267 |
+
|
| 268 |
+
# Step 3: Heatmap Visualization - Generate heatmaps for threat visualization
|
| 269 |
+
heatmap_overlay = heatmap_gen.generate_threat_heatmap(original_image, labeled_regions)
|
| 270 |
+
multi_heatmaps = heatmap_gen.generate_multi_level_heatmap(original_image, labeled_regions)
|
| 271 |
+
|
| 272 |
+
# Combine all results
|
| 273 |
+
all_results = {
|
| 274 |
+
'original_image': original_image,
|
| 275 |
+
'modified_image': modified_image,
|
| 276 |
+
'difference_image': detection_results['difference_image'],
|
| 277 |
+
'threshold_image': detection_results['threshold_image'],
|
| 278 |
+
'annotated_image': detection_results['annotated_image'],
|
| 279 |
+
'labeled_image': labeled_image,
|
| 280 |
+
'heatmap_overlay': heatmap_overlay,
|
| 281 |
+
'multi_heatmaps': multi_heatmaps,
|
| 282 |
+
'threat_summary': threat_summary,
|
| 283 |
+
'smi_score': detection_results['smi_score'],
|
| 284 |
+
'bounding_boxes': detection_results['bounding_boxes']
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
# Create and save comparison visualization
|
| 288 |
+
grid_output_path = os.path.join(output_dir, f"{base_name}_comparison.png")
|
| 289 |
+
self.create_comparison_grid(all_results, grid_output_path)
|
| 290 |
+
|
| 291 |
+
# Create interactive HTML comparison
|
| 292 |
+
html_output_path = os.path.join(output_dir, f"{base_name}_interactive.html")
|
| 293 |
+
self.create_interactive_comparison(all_results, html_output_path)
|
| 294 |
+
|
| 295 |
+
print(f"Comparison visualization saved to: {grid_output_path}")
|
| 296 |
+
print(f"Interactive HTML comparison saved to: {html_output_path}")
|
| 297 |
+
return html_output_path # Return the interactive HTML path as the primary output
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def batch_process_directory(input_dir, output_dir, model_path=None, threshold=30, min_area=100):
|
| 301 |
+
"""
|
| 302 |
+
Process all image pairs in a directory and create comparison visualizations
|
| 303 |
+
|
| 304 |
+
Args:
|
| 305 |
+
input_dir: Directory containing input images
|
| 306 |
+
output_dir: Directory to save outputs
|
| 307 |
+
model_path: Path to AI model (optional)
|
| 308 |
+
threshold: Threshold for difference detection
|
| 309 |
+
min_area: Minimum area for region detection
|
| 310 |
+
|
| 311 |
+
Returns:
|
| 312 |
+
List of paths to generated HTML comparison files
|
| 313 |
+
"""
|
| 314 |
+
# Ensure output directory exists
|
| 315 |
+
if not os.path.exists(output_dir):
|
| 316 |
+
os.makedirs(output_dir)
|
| 317 |
+
|
| 318 |
+
# Get all image files in input directory
|
| 319 |
+
image_files = [f for f in os.listdir(input_dir)
|
| 320 |
+
if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
|
| 321 |
+
|
| 322 |
+
if not image_files:
|
| 323 |
+
print(f"No image files found in {input_dir}")
|
| 324 |
+
return []
|
| 325 |
+
|
| 326 |
+
# Group images for comparison (assuming pairs with _original and _modified suffixes)
|
| 327 |
+
original_images = [f for f in image_files if '_original' in f]
|
| 328 |
+
modified_images = [f for f in image_files if '_modified' in f]
|
| 329 |
+
|
| 330 |
+
# If we don't have clear pairs, just process consecutive images
|
| 331 |
+
if not (original_images and modified_images):
|
| 332 |
+
# Process images in pairs (1&2, 3&4, etc.)
|
| 333 |
+
if len(image_files) < 2:
|
| 334 |
+
print("Need at least 2 images to compare")
|
| 335 |
+
return []
|
| 336 |
+
|
| 337 |
+
image_pairs = [(image_files[i], image_files[i+1])
|
| 338 |
+
for i in range(0, len(image_files)-1, 2)]
|
| 339 |
+
print(f"No _original/_modified naming pattern found. Processing {len(image_pairs)} consecutive pairs.")
|
| 340 |
+
else:
|
| 341 |
+
# Match original and modified pairs
|
| 342 |
+
image_pairs = []
|
| 343 |
+
for orig in original_images:
|
| 344 |
+
base_name = orig.replace('_original', '')
|
| 345 |
+
for mod in modified_images:
|
| 346 |
+
if base_name in mod:
|
| 347 |
+
image_pairs.append((orig, mod))
|
| 348 |
+
break
|
| 349 |
+
print(f"Found {len(image_pairs)} original/modified image pairs.")
|
| 350 |
+
|
| 351 |
+
if not image_pairs:
|
| 352 |
+
print("No valid image pairs found to process")
|
| 353 |
+
return []
|
| 354 |
+
|
| 355 |
+
# Initialize comparison interface
|
| 356 |
+
interface = ComparisonInterface()
|
| 357 |
+
|
| 358 |
+
# Process each image pair
|
| 359 |
+
html_paths = []
|
| 360 |
+
for img1, img2 in image_pairs:
|
| 361 |
+
img1_path = os.path.join(input_dir, img1)
|
| 362 |
+
img2_path = os.path.join(input_dir, img2)
|
| 363 |
+
|
| 364 |
+
print(f"\n{'='*50}")
|
| 365 |
+
print(f"Processing pair: {img1} and {img2}")
|
| 366 |
+
print(f"{'='*50}")
|
| 367 |
+
|
| 368 |
+
# Process and create comparison visualization
|
| 369 |
+
html_path = interface.process_and_visualize(
|
| 370 |
+
img1_path, img2_path, output_dir,
|
| 371 |
+
model_path, threshold, min_area
|
| 372 |
+
)
|
| 373 |
+
html_paths.append(html_path)
|
| 374 |
+
|
| 375 |
+
print(f"\n{'='*50}")
|
| 376 |
+
print(f"Overall Summary: Processed {len(image_pairs)} image pairs")
|
| 377 |
+
print(f"{'='*50}")
|
| 378 |
+
print(f"All comparison visualizations saved to: {output_dir}")
|
| 379 |
+
|
| 380 |
+
return html_paths
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
if __name__ == "__main__":
|
| 384 |
+
import argparse
|
| 385 |
+
import webbrowser
|
| 386 |
+
|
| 387 |
+
parser = argparse.ArgumentParser(description='Deepfake Detection Comparison Interface')
|
| 388 |
+
parser.add_argument('--input_dir', type=str, required=True, help='Directory containing input images')
|
| 389 |
+
parser.add_argument('--output_dir', type=str, required=True, help='Directory to save output visualizations')
|
| 390 |
+
parser.add_argument('--model_path', type=str, help='Path to Nvidia AI model (optional)')
|
| 391 |
+
parser.add_argument('--threshold', type=int, default=30, help='Threshold for difference detection (0-255)')
|
| 392 |
+
parser.add_argument('--min_area', type=int, default=100, help='Minimum area for region detection')
|
| 393 |
+
parser.add_argument('--open_browser', action='store_true', help='Automatically open HTML results in browser')
|
| 394 |
+
|
| 395 |
+
args = parser.parse_args()
|
| 396 |
+
|
| 397 |
+
# Process all images in the directory
|
| 398 |
+
html_paths = batch_process_directory(
|
| 399 |
+
args.input_dir, args.output_dir,
|
| 400 |
+
args.model_path, args.threshold, args.min_area
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
# Open the first result in browser if requested
|
| 404 |
+
if args.open_browser and html_paths:
|
| 405 |
+
print(f"\nOpening first result in web browser: {html_paths[0]}")
|
| 406 |
+
webbrowser.open('file://' + os.path.abspath(html_paths[0]))
|
| 407 |
+
|
| 408 |
+
print("\nTo view interactive results, open the HTML files in your web browser.")
|
| 409 |
+
print("Example: file://" + os.path.abspath(html_paths[0]) if html_paths else "")
|
| 410 |
+
print("\nDone!")
|
deepfake_detector.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
import torchvision.transforms as transforms
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
class DeepfakeDetector:
|
| 8 |
+
def __init__(self, model_path=None):
|
| 9 |
+
"""
|
| 10 |
+
Initialize the deepfake detector with Nvidia AI model
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
model_path: Path to the pre-trained Nvidia AI model
|
| 14 |
+
"""
|
| 15 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 16 |
+
self.model = self._load_model(model_path)
|
| 17 |
+
self.transform = transforms.Compose([
|
| 18 |
+
transforms.Resize((256, 256)),
|
| 19 |
+
transforms.ToTensor(),
|
| 20 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 21 |
+
])
|
| 22 |
+
|
| 23 |
+
def _load_model(self, model_path):
|
| 24 |
+
"""
|
| 25 |
+
Load the Nvidia AI model for deepfake detection
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
model_path: Path to the pre-trained model
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
Loaded model
|
| 32 |
+
"""
|
| 33 |
+
# This is a placeholder for the actual model loading code
|
| 34 |
+
# In a real implementation, you would load the specific Nvidia AI model here
|
| 35 |
+
if model_path:
|
| 36 |
+
try:
|
| 37 |
+
# Example placeholder for model loading
|
| 38 |
+
# model = torch.load(model_path, map_location=self.device)
|
| 39 |
+
print(f"Model loaded from {model_path}")
|
| 40 |
+
return None # Replace with actual model
|
| 41 |
+
except Exception as e:
|
| 42 |
+
print(f"Error loading model: {e}")
|
| 43 |
+
return None
|
| 44 |
+
else:
|
| 45 |
+
print("No model path provided, using default detection methods")
|
| 46 |
+
return None
|
| 47 |
+
|
| 48 |
+
def calculate_smi(self, image1, image2):
|
| 49 |
+
"""
|
| 50 |
+
Calculate Structural Matching Index between two images
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
image1: First image (numpy array or path)
|
| 54 |
+
image2: Second image (numpy array or path)
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
SMI score (float between 0 and 1)
|
| 58 |
+
"""
|
| 59 |
+
# Convert paths to images if needed
|
| 60 |
+
if isinstance(image1, str):
|
| 61 |
+
image1 = cv2.imread(image1)
|
| 62 |
+
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
|
| 63 |
+
|
| 64 |
+
if isinstance(image2, str):
|
| 65 |
+
image2 = cv2.imread(image2)
|
| 66 |
+
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
|
| 67 |
+
|
| 68 |
+
# Ensure images are the same size
|
| 69 |
+
if image1.shape != image2.shape:
|
| 70 |
+
image2 = cv2.resize(image2, (image1.shape[1], image1.shape[0]))
|
| 71 |
+
|
| 72 |
+
# Calculate SMI (similar to SSIM but adapted for deepfake detection)
|
| 73 |
+
# This is a simplified version - in a real implementation, you would use
|
| 74 |
+
# the specific SMI calculation from the Nvidia AI model
|
| 75 |
+
gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)
|
| 76 |
+
gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)
|
| 77 |
+
|
| 78 |
+
# Using SSIM as a placeholder for SMI
|
| 79 |
+
from skimage.metrics import structural_similarity as ssim
|
| 80 |
+
smi_score, _ = ssim(gray1, gray2, full=True)
|
| 81 |
+
|
| 82 |
+
return smi_score
|
| 83 |
+
|
| 84 |
+
def generate_difference_image(self, image1, image2):
|
| 85 |
+
"""
|
| 86 |
+
Generate a difference image highlighting areas of discrepancy
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
image1: First image (numpy array or path)
|
| 90 |
+
image2: Second image (numpy array or path)
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
Difference image (numpy array)
|
| 94 |
+
"""
|
| 95 |
+
# Convert paths to images if needed
|
| 96 |
+
if isinstance(image1, str):
|
| 97 |
+
image1 = cv2.imread(image1)
|
| 98 |
+
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
|
| 99 |
+
|
| 100 |
+
if isinstance(image2, str):
|
| 101 |
+
image2 = cv2.imread(image2)
|
| 102 |
+
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
|
| 103 |
+
|
| 104 |
+
# Ensure images are the same size
|
| 105 |
+
if image1.shape != image2.shape:
|
| 106 |
+
image2 = cv2.resize(image2, (image1.shape[1], image1.shape[0]))
|
| 107 |
+
|
| 108 |
+
# Convert to grayscale
|
| 109 |
+
gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)
|
| 110 |
+
gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)
|
| 111 |
+
|
| 112 |
+
# Compute the absolute difference
|
| 113 |
+
diff = cv2.absdiff(gray1, gray2)
|
| 114 |
+
|
| 115 |
+
# Normalize for better visualization
|
| 116 |
+
diff_normalized = cv2.normalize(diff, None, 0, 255, cv2.NORM_MINMAX)
|
| 117 |
+
|
| 118 |
+
return diff_normalized
|
| 119 |
+
|
| 120 |
+
def apply_threshold(self, diff_image, threshold=30):
|
| 121 |
+
"""
|
| 122 |
+
Apply threshold to difference image to highlight significant differences
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
diff_image: Difference image (numpy array)
|
| 126 |
+
threshold: Threshold value (0-255)
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
Thresholded image (numpy array)
|
| 130 |
+
"""
|
| 131 |
+
_, thresh = cv2.threshold(diff_image, threshold, 255, cv2.THRESH_BINARY)
|
| 132 |
+
return thresh
|
| 133 |
+
|
| 134 |
+
def detect_bounding_boxes(self, thresh_image, min_area=100):
|
| 135 |
+
"""
|
| 136 |
+
Detect bounding boxes around areas of significant difference
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
thresh_image: Thresholded image (numpy array)
|
| 140 |
+
min_area: Minimum contour area to consider
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
List of bounding boxes (x, y, w, h)
|
| 144 |
+
"""
|
| 145 |
+
# Find contours in the thresholded image
|
| 146 |
+
contours, _ = cv2.findContours(thresh_image.astype(np.uint8),
|
| 147 |
+
cv2.RETR_EXTERNAL,
|
| 148 |
+
cv2.CHAIN_APPROX_SIMPLE)
|
| 149 |
+
|
| 150 |
+
# Filter contours by area and get bounding boxes
|
| 151 |
+
bounding_boxes = []
|
| 152 |
+
for contour in contours:
|
| 153 |
+
area = cv2.contourArea(contour)
|
| 154 |
+
if area >= min_area:
|
| 155 |
+
x, y, w, h = cv2.boundingRect(contour)
|
| 156 |
+
bounding_boxes.append((x, y, w, h))
|
| 157 |
+
|
| 158 |
+
return bounding_boxes
|
| 159 |
+
|
| 160 |
+
def draw_bounding_boxes(self, image, bounding_boxes, color=(0, 255, 0), thickness=2):
|
| 161 |
+
"""
|
| 162 |
+
Draw bounding boxes on an image
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
image: Image to draw on (numpy array)
|
| 166 |
+
bounding_boxes: List of bounding boxes (x, y, w, h)
|
| 167 |
+
color: Box color (B, G, R)
|
| 168 |
+
thickness: Line thickness
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
Image with bounding boxes
|
| 172 |
+
"""
|
| 173 |
+
# Make a copy of the image to avoid modifying the original
|
| 174 |
+
result = image.copy()
|
| 175 |
+
|
| 176 |
+
# Draw each bounding box
|
| 177 |
+
for (x, y, w, h) in bounding_boxes:
|
| 178 |
+
cv2.rectangle(result, (x, y), (x + w, y + h), color, thickness)
|
| 179 |
+
|
| 180 |
+
return result
|
| 181 |
+
|
| 182 |
+
def process_image_pair(self, image1, image2, threshold=30, min_area=100):
|
| 183 |
+
"""
|
| 184 |
+
Process a pair of images through the complete verification pipeline
|
| 185 |
+
|
| 186 |
+
Args:
|
| 187 |
+
image1: First image (numpy array or path)
|
| 188 |
+
image2: Second image (numpy array or path)
|
| 189 |
+
threshold: Threshold value for difference detection
|
| 190 |
+
min_area: Minimum area for bounding box detection
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
Dictionary containing:
|
| 194 |
+
- smi_score: Structural Matching Index
|
| 195 |
+
- difference_image: Difference visualization
|
| 196 |
+
- threshold_image: Thresholded difference image
|
| 197 |
+
- bounding_boxes: List of detected bounding boxes
|
| 198 |
+
- annotated_image: Original image with bounding boxes
|
| 199 |
+
"""
|
| 200 |
+
# Load images if paths are provided
|
| 201 |
+
if isinstance(image1, str):
|
| 202 |
+
img1 = cv2.imread(image1)
|
| 203 |
+
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
|
| 204 |
+
else:
|
| 205 |
+
img1 = image1.copy()
|
| 206 |
+
|
| 207 |
+
if isinstance(image2, str):
|
| 208 |
+
img2 = cv2.imread(image2)
|
| 209 |
+
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
|
| 210 |
+
else:
|
| 211 |
+
img2 = image2.copy()
|
| 212 |
+
|
| 213 |
+
# Calculate SMI
|
| 214 |
+
smi_score = self.calculate_smi(img1, img2)
|
| 215 |
+
|
| 216 |
+
# Generate difference image
|
| 217 |
+
diff_image = self.generate_difference_image(img1, img2)
|
| 218 |
+
|
| 219 |
+
# Apply threshold
|
| 220 |
+
thresh_image = self.apply_threshold(diff_image, threshold)
|
| 221 |
+
|
| 222 |
+
# Detect bounding boxes
|
| 223 |
+
bounding_boxes = self.detect_bounding_boxes(thresh_image, min_area)
|
| 224 |
+
|
| 225 |
+
# Draw bounding boxes on original image
|
| 226 |
+
annotated_image = self.draw_bounding_boxes(img1, bounding_boxes)
|
| 227 |
+
|
| 228 |
+
# Return results
|
| 229 |
+
return {
|
| 230 |
+
'smi_score': smi_score,
|
| 231 |
+
'difference_image': diff_image,
|
| 232 |
+
'threshold_image': thresh_image,
|
| 233 |
+
'bounding_boxes': bounding_boxes,
|
| 234 |
+
'annotated_image': annotated_image
|
| 235 |
+
}
|
demo_combined_visualization.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
from deepfake_detector import DeepfakeDetector
|
| 6 |
+
from image_processor import ImageProcessor
|
| 7 |
+
from labeling import ThreatLabeler
|
| 8 |
+
from heatmap_generator import HeatmapGenerator
|
| 9 |
+
from combined_visualization import CombinedVisualizer
|
| 10 |
+
|
| 11 |
+
def ensure_dir(directory):
|
| 12 |
+
"""Ensure directory exists"""
|
| 13 |
+
if not os.path.exists(directory):
|
| 14 |
+
os.makedirs(directory)
|
| 15 |
+
|
| 16 |
+
def demo_combined_visualization(image1_path, image2_path, output_dir, threshold=30, min_area=100):
|
| 17 |
+
"""
|
| 18 |
+
Demonstrate the combined visualization that overlays difference image,
|
| 19 |
+
low and medium threat heatmaps, and bounding boxes.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
image1_path: Path to original image
|
| 23 |
+
image2_path: Path to modified image
|
| 24 |
+
output_dir: Directory to save outputs
|
| 25 |
+
threshold: Threshold for difference detection
|
| 26 |
+
min_area: Minimum area for region detection
|
| 27 |
+
"""
|
| 28 |
+
# Initialize components
|
| 29 |
+
detector = DeepfakeDetector()
|
| 30 |
+
labeler = ThreatLabeler()
|
| 31 |
+
heatmap_gen = HeatmapGenerator()
|
| 32 |
+
img_processor = ImageProcessor()
|
| 33 |
+
visualizer = CombinedVisualizer()
|
| 34 |
+
|
| 35 |
+
# Create output directory
|
| 36 |
+
ensure_dir(output_dir)
|
| 37 |
+
|
| 38 |
+
# Get base filename for outputs
|
| 39 |
+
base_name = os.path.splitext(os.path.basename(image1_path))[0]
|
| 40 |
+
|
| 41 |
+
print(f"Processing images: {image1_path} and {image2_path}")
|
| 42 |
+
|
| 43 |
+
# Step 1: Verification Module - Process the image pair
|
| 44 |
+
results = detector.process_image_pair(image1_path, image2_path, threshold, min_area)
|
| 45 |
+
|
| 46 |
+
# Step 2: Labeling System - Label detected regions by threat level
|
| 47 |
+
original_image = img_processor.load_image(image1_path)
|
| 48 |
+
modified_image = img_processor.load_image(image2_path)
|
| 49 |
+
labeled_image, labeled_regions = labeler.label_regions(
|
| 50 |
+
original_image, results['difference_image'], results['bounding_boxes'])
|
| 51 |
+
|
| 52 |
+
# Get threat summary
|
| 53 |
+
threat_summary = labeler.get_threat_summary(labeled_regions)
|
| 54 |
+
|
| 55 |
+
# Step 3: Generate multi-level heatmaps
|
| 56 |
+
multi_heatmaps = heatmap_gen.generate_multi_level_heatmap(original_image, labeled_regions)
|
| 57 |
+
|
| 58 |
+
# Prepare results for combined visualization
|
| 59 |
+
image_pair_results = {
|
| 60 |
+
'original_image': original_image,
|
| 61 |
+
'modified_image': modified_image,
|
| 62 |
+
'difference_image': results['difference_image'],
|
| 63 |
+
'threshold_image': results['threshold_image'],
|
| 64 |
+
'annotated_image': results['annotated_image'],
|
| 65 |
+
'labeled_image': labeled_image,
|
| 66 |
+
'multi_heatmaps': multi_heatmaps,
|
| 67 |
+
'bounding_boxes': results['bounding_boxes'],
|
| 68 |
+
'labeled_regions': labeled_regions,
|
| 69 |
+
'threat_summary': threat_summary,
|
| 70 |
+
'smi_score': results['smi_score']
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
# Create combined visualization
|
| 74 |
+
output_path = os.path.join(output_dir, f"{base_name}_combined_overlay.png")
|
| 75 |
+
visualizer.create_combined_visualization(
|
| 76 |
+
image_pair_results,
|
| 77 |
+
output_path,
|
| 78 |
+
alpha_diff=0.4, # Transparency for difference image
|
| 79 |
+
alpha_low=0.3, # Transparency for low threat heatmap
|
| 80 |
+
alpha_medium=0.4 # Transparency for medium threat heatmap
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# Print summary information
|
| 84 |
+
print(f"\nAnalysis Results:")
|
| 85 |
+
print(f"SMI Score: {results['smi_score']:.4f} (1.0 = identical, 0.0 = completely different)")
|
| 86 |
+
print(f"Total regions detected: {threat_summary['total_regions']}")
|
| 87 |
+
print(f"Threat counts: Low={threat_summary['threat_counts']['low']}, "
|
| 88 |
+
f"Medium={threat_summary['threat_counts']['medium']}, "
|
| 89 |
+
f"High={threat_summary['threat_counts']['high']}")
|
| 90 |
+
if threat_summary['max_threat']:
|
| 91 |
+
print(f"Maximum threat: {threat_summary['max_threat']['level'].upper()} "
|
| 92 |
+
f"({threat_summary['max_threat']['percentage']:.1f}%)")
|
| 93 |
+
print(f"Average difference: {threat_summary['average_difference']:.1f}%")
|
| 94 |
+
|
| 95 |
+
print(f"\nCombined visualization saved to: {output_path}")
|
| 96 |
+
return output_path
|
| 97 |
+
|
| 98 |
+
if __name__ == "__main__":
|
| 99 |
+
import sys
|
| 100 |
+
|
| 101 |
+
if len(sys.argv) < 3:
|
| 102 |
+
print("Usage: python demo_combined_visualization.py <original_image> <modified_image> [output_dir]")
|
| 103 |
+
sys.exit(1)
|
| 104 |
+
|
| 105 |
+
image1_path = sys.argv[1]
|
| 106 |
+
image2_path = sys.argv[2]
|
| 107 |
+
output_dir = sys.argv[3] if len(sys.argv) > 3 else "./output"
|
| 108 |
+
|
| 109 |
+
demo_combined_visualization(image1_path, image2_path, output_dir)
|
image_processor.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
|
| 6 |
+
class ImageProcessor:
|
| 7 |
+
def __init__(self):
|
| 8 |
+
"""
|
| 9 |
+
Initialize the image processor for handling image manipulation tasks
|
| 10 |
+
"""
|
| 11 |
+
pass
|
| 12 |
+
|
| 13 |
+
@staticmethod
|
| 14 |
+
def load_image(image_path):
|
| 15 |
+
"""
|
| 16 |
+
Load an image from a file path
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
image_path: Path to the image file
|
| 20 |
+
|
| 21 |
+
Returns:
|
| 22 |
+
Loaded image as numpy array in RGB format
|
| 23 |
+
"""
|
| 24 |
+
img = cv2.imread(image_path)
|
| 25 |
+
if img is None:
|
| 26 |
+
raise ValueError(f"Could not load image from {image_path}")
|
| 27 |
+
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 28 |
+
|
| 29 |
+
@staticmethod
|
| 30 |
+
def save_image(image, output_path):
|
| 31 |
+
"""
|
| 32 |
+
Save an image to a file
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
image: Image as numpy array in RGB format
|
| 36 |
+
output_path: Path to save the image
|
| 37 |
+
"""
|
| 38 |
+
# Convert from RGB to BGR for OpenCV
|
| 39 |
+
if len(image.shape) == 3 and image.shape[2] == 3:
|
| 40 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 41 |
+
cv2.imwrite(output_path, image)
|
| 42 |
+
|
| 43 |
+
@staticmethod
|
| 44 |
+
def resize_image(image, width=None, height=None):
|
| 45 |
+
"""
|
| 46 |
+
Resize an image while maintaining aspect ratio
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
image: Image as numpy array
|
| 50 |
+
width: Target width (if None, calculated from height)
|
| 51 |
+
height: Target height (if None, calculated from width)
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
Resized image
|
| 55 |
+
"""
|
| 56 |
+
if width is None and height is None:
|
| 57 |
+
return image
|
| 58 |
+
|
| 59 |
+
h, w = image.shape[:2]
|
| 60 |
+
if width is None:
|
| 61 |
+
aspect = height / float(h)
|
| 62 |
+
dim = (int(w * aspect), height)
|
| 63 |
+
elif height is None:
|
| 64 |
+
aspect = width / float(w)
|
| 65 |
+
dim = (width, int(h * aspect))
|
| 66 |
+
else:
|
| 67 |
+
dim = (width, height)
|
| 68 |
+
|
| 69 |
+
return cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
|
| 70 |
+
|
| 71 |
+
@staticmethod
|
| 72 |
+
def normalize_image(image):
|
| 73 |
+
"""
|
| 74 |
+
Normalize image values to 0-255 range
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
image: Input image
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
Normalized image
|
| 81 |
+
"""
|
| 82 |
+
return cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
|
| 83 |
+
|
| 84 |
+
@staticmethod
|
| 85 |
+
def apply_color_map(image, colormap=cv2.COLORMAP_JET):
|
| 86 |
+
"""
|
| 87 |
+
Apply a colormap to a grayscale image
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
image: Grayscale image
|
| 91 |
+
colormap: OpenCV colormap type
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
Color-mapped image
|
| 95 |
+
"""
|
| 96 |
+
if len(image.shape) == 3:
|
| 97 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
| 98 |
+
return cv2.applyColorMap(image, colormap)
|
| 99 |
+
|
| 100 |
+
@staticmethod
|
| 101 |
+
def overlay_images(background, overlay, alpha=0.5):
|
| 102 |
+
"""
|
| 103 |
+
Overlay one image on top of another with transparency
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
background: Background image
|
| 107 |
+
overlay: Image to overlay
|
| 108 |
+
alpha: Transparency factor (0-1)
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
Combined image
|
| 112 |
+
"""
|
| 113 |
+
# Ensure images are the same size
|
| 114 |
+
if background.shape != overlay.shape:
|
| 115 |
+
overlay = cv2.resize(overlay, (background.shape[1], background.shape[0]))
|
| 116 |
+
|
| 117 |
+
# Blend images
|
| 118 |
+
return cv2.addWeighted(background, 1-alpha, overlay, alpha, 0)
|
| 119 |
+
|
| 120 |
+
@staticmethod
|
| 121 |
+
def crop_image(image, x, y, width, height):
|
| 122 |
+
"""
|
| 123 |
+
Crop a region from an image
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
image: Input image
|
| 127 |
+
x, y: Top-left corner coordinates
|
| 128 |
+
width, height: Dimensions of the crop
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
Cropped image
|
| 132 |
+
"""
|
| 133 |
+
return image[y:y+height, x:x+width]
|
| 134 |
+
|
| 135 |
+
@staticmethod
|
| 136 |
+
def display_images(images, titles=None, figsize=(15, 10)):
|
| 137 |
+
"""
|
| 138 |
+
Display multiple images in a grid
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
images: List of images to display
|
| 142 |
+
titles: List of titles for each image
|
| 143 |
+
figsize: Figure size (width, height)
|
| 144 |
+
"""
|
| 145 |
+
n = len(images)
|
| 146 |
+
if titles is None:
|
| 147 |
+
titles = [f'Image {i+1}' for i in range(n)]
|
| 148 |
+
|
| 149 |
+
fig, axes = plt.subplots(1, n, figsize=figsize)
|
| 150 |
+
if n == 1:
|
| 151 |
+
axes = [axes]
|
| 152 |
+
|
| 153 |
+
for i, (img, title) in enumerate(zip(images, titles)):
|
| 154 |
+
if len(img.shape) == 2 or img.shape[2] == 1: # Grayscale
|
| 155 |
+
axes[i].imshow(img, cmap='gray')
|
| 156 |
+
else: # Color
|
| 157 |
+
axes[i].imshow(img)
|
| 158 |
+
axes[i].set_title(title)
|
| 159 |
+
axes[i].axis('off')
|
| 160 |
+
|
| 161 |
+
plt.tight_layout()
|
| 162 |
+
return fig
|
labeling.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
|
| 4 |
+
class ThreatLabeler:
|
| 5 |
+
def __init__(self):
|
| 6 |
+
"""
|
| 7 |
+
Initialize the threat labeling system for deepfake detection
|
| 8 |
+
"""
|
| 9 |
+
# Define threat level thresholds
|
| 10 |
+
self.threat_levels = {
|
| 11 |
+
'low': (0, 30), # Low threat: 0-30% difference
|
| 12 |
+
'medium': (30, 60), # Medium threat: 30-60% difference
|
| 13 |
+
'high': (60, 100) # High threat: 60-100% difference
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
# Define colors for each threat level (BGR format for OpenCV)
|
| 17 |
+
self.threat_colors = {
|
| 18 |
+
'low': (0, 255, 0), # Green
|
| 19 |
+
'medium': (0, 165, 255), # Orange
|
| 20 |
+
'high': (0, 0, 255) # Red
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
def calculate_threat_level(self, diff_value):
|
| 24 |
+
"""
|
| 25 |
+
Calculate threat level based on difference value
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
diff_value: Normalized difference value (0-100)
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
Threat level string ('low', 'medium', or 'high')
|
| 32 |
+
"""
|
| 33 |
+
for level, (min_val, max_val) in self.threat_levels.items():
|
| 34 |
+
if min_val <= diff_value < max_val:
|
| 35 |
+
return level
|
| 36 |
+
return 'high' # Default to high if outside ranges
|
| 37 |
+
|
| 38 |
+
def calculate_region_threat(self, diff_image, bbox):
|
| 39 |
+
"""
|
| 40 |
+
Calculate the threat level for a specific region
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
diff_image: Difference image (normalized 0-255)
|
| 44 |
+
bbox: Bounding box (x, y, w, h)
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
Threat level string and average difference value
|
| 48 |
+
"""
|
| 49 |
+
x, y, w, h = bbox
|
| 50 |
+
region = diff_image[y:y+h, x:x+w]
|
| 51 |
+
|
| 52 |
+
# Calculate average difference in the region (normalized to 0-100)
|
| 53 |
+
avg_diff = np.mean(region) / 255 * 100
|
| 54 |
+
|
| 55 |
+
# Determine threat level
|
| 56 |
+
threat_level = self.calculate_threat_level(avg_diff)
|
| 57 |
+
|
| 58 |
+
return threat_level, avg_diff
|
| 59 |
+
|
| 60 |
+
def label_regions(self, image, diff_image, bounding_boxes):
|
| 61 |
+
"""
|
| 62 |
+
Label regions with threat levels and colors
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
image: Original image to label
|
| 66 |
+
diff_image: Difference image
|
| 67 |
+
bounding_boxes: List of bounding boxes (x, y, w, h)
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
Labeled image and list of regions with threat levels
|
| 71 |
+
"""
|
| 72 |
+
# Make a copy of the image to avoid modifying the original
|
| 73 |
+
labeled_image = image.copy()
|
| 74 |
+
labeled_regions = []
|
| 75 |
+
|
| 76 |
+
# Process each bounding box
|
| 77 |
+
for bbox in bounding_boxes:
|
| 78 |
+
x, y, w, h = bbox
|
| 79 |
+
|
| 80 |
+
# Calculate threat level for this region
|
| 81 |
+
threat_level, avg_diff = self.calculate_region_threat(diff_image, bbox)
|
| 82 |
+
|
| 83 |
+
# Get color for this threat level
|
| 84 |
+
color = self.threat_colors[threat_level]
|
| 85 |
+
|
| 86 |
+
# Draw colored rectangle based on threat level
|
| 87 |
+
cv2.rectangle(labeled_image, (x, y), (x + w, y + h), color, 2)
|
| 88 |
+
|
| 89 |
+
# Add label text with threat level and percentage
|
| 90 |
+
label_text = f"{threat_level.upper()}: {avg_diff:.1f}%"
|
| 91 |
+
cv2.putText(labeled_image, label_text, (x, y - 10),
|
| 92 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
| 93 |
+
|
| 94 |
+
# Store labeled region info
|
| 95 |
+
labeled_regions.append({
|
| 96 |
+
'bbox': bbox,
|
| 97 |
+
'threat_level': threat_level,
|
| 98 |
+
'difference_percentage': avg_diff
|
| 99 |
+
})
|
| 100 |
+
|
| 101 |
+
return labeled_image, labeled_regions
|
| 102 |
+
|
| 103 |
+
def get_threat_summary(self, labeled_regions):
|
| 104 |
+
"""
|
| 105 |
+
Generate a summary of threat levels in the image
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
labeled_regions: List of labeled regions with threat levels
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
Dictionary with threat summary statistics
|
| 112 |
+
"""
|
| 113 |
+
if not labeled_regions:
|
| 114 |
+
return {
|
| 115 |
+
'total_regions': 0,
|
| 116 |
+
'threat_counts': {'low': 0, 'medium': 0, 'high': 0},
|
| 117 |
+
'max_threat': None,
|
| 118 |
+
'average_difference': 0
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
# Count regions by threat level
|
| 122 |
+
threat_counts = {'low': 0, 'medium': 0, 'high': 0}
|
| 123 |
+
total_diff = 0
|
| 124 |
+
max_threat = {'level': 'low', 'percentage': 0}
|
| 125 |
+
|
| 126 |
+
for region in labeled_regions:
|
| 127 |
+
level = region['threat_level']
|
| 128 |
+
diff = region['difference_percentage']
|
| 129 |
+
|
| 130 |
+
# Update counts
|
| 131 |
+
threat_counts[level] += 1
|
| 132 |
+
total_diff += diff
|
| 133 |
+
|
| 134 |
+
# Track maximum threat
|
| 135 |
+
if diff > max_threat['percentage']:
|
| 136 |
+
max_threat = {'level': level, 'percentage': diff}
|
| 137 |
+
|
| 138 |
+
# Calculate average difference
|
| 139 |
+
avg_diff = total_diff / len(labeled_regions) if labeled_regions else 0
|
| 140 |
+
|
| 141 |
+
return {
|
| 142 |
+
'total_regions': len(labeled_regions),
|
| 143 |
+
'threat_counts': threat_counts,
|
| 144 |
+
'max_threat': max_threat,
|
| 145 |
+
'average_difference': avg_diff
|
| 146 |
+
}
|
main.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
from deepfake_detector import DeepfakeDetector
|
| 7 |
+
from image_processor import ImageProcessor
|
| 8 |
+
from labeling import ThreatLabeler
|
| 9 |
+
from heatmap_generator import HeatmapGenerator
|
| 10 |
+
|
| 11 |
+
def parse_args():
|
| 12 |
+
parser = argparse.ArgumentParser(description='Deepfake Detection with Heatmap Visualization')
|
| 13 |
+
parser.add_argument('--input_dir', type=str, required=True, help='Directory containing input images')
|
| 14 |
+
parser.add_argument('--output_dir', type=str, required=True, help='Directory to save output visualizations')
|
| 15 |
+
parser.add_argument('--model_path', type=str, help='Path to Nvidia AI model (optional)')
|
| 16 |
+
parser.add_argument('--threshold', type=int, default=30, help='Threshold for difference detection (0-255)')
|
| 17 |
+
parser.add_argument('--min_area', type=int, default=100, help='Minimum area for region detection')
|
| 18 |
+
return parser.parse_args()
|
| 19 |
+
|
| 20 |
+
def ensure_dir(directory):
|
| 21 |
+
if not os.path.exists(directory):
|
| 22 |
+
os.makedirs(directory)
|
| 23 |
+
|
| 24 |
+
def process_image_pair(image1_path, image2_path, output_dir, model_path=None, threshold=30, min_area=100):
|
| 25 |
+
# Initialize components
|
| 26 |
+
detector = DeepfakeDetector(model_path)
|
| 27 |
+
labeler = ThreatLabeler()
|
| 28 |
+
heatmap_gen = HeatmapGenerator()
|
| 29 |
+
img_processor = ImageProcessor()
|
| 30 |
+
|
| 31 |
+
# Create output subdirectories
|
| 32 |
+
verification_dir = os.path.join(output_dir, 'verification')
|
| 33 |
+
labeling_dir = os.path.join(output_dir, 'labeling')
|
| 34 |
+
heatmap_dir = os.path.join(output_dir, 'heatmap')
|
| 35 |
+
ensure_dir(verification_dir)
|
| 36 |
+
ensure_dir(labeling_dir)
|
| 37 |
+
ensure_dir(heatmap_dir)
|
| 38 |
+
|
| 39 |
+
# Get base filename for outputs
|
| 40 |
+
base_name = os.path.splitext(os.path.basename(image1_path))[0]
|
| 41 |
+
|
| 42 |
+
# Step 1: Verification Module - Process the image pair
|
| 43 |
+
print(f"Processing images: {image1_path} and {image2_path}")
|
| 44 |
+
results = detector.process_image_pair(image1_path, image2_path, threshold, min_area)
|
| 45 |
+
|
| 46 |
+
# Save verification results
|
| 47 |
+
img_processor.save_image(results['difference_image'],
|
| 48 |
+
os.path.join(verification_dir, f"{base_name}_diff.png"))
|
| 49 |
+
img_processor.save_image(results['threshold_image'],
|
| 50 |
+
os.path.join(verification_dir, f"{base_name}_threshold.png"))
|
| 51 |
+
img_processor.save_image(results['annotated_image'],
|
| 52 |
+
os.path.join(verification_dir, f"{base_name}_annotated.png"))
|
| 53 |
+
|
| 54 |
+
# Print SMI score
|
| 55 |
+
print(f"SMI Score: {results['smi_score']:.4f} (1.0 = identical, 0.0 = completely different)")
|
| 56 |
+
|
| 57 |
+
# Step 2: Labeling System - Label detected regions by threat level
|
| 58 |
+
original_image = img_processor.load_image(image1_path)
|
| 59 |
+
labeled_image, labeled_regions = labeler.label_regions(
|
| 60 |
+
original_image, results['difference_image'], results['bounding_boxes'])
|
| 61 |
+
|
| 62 |
+
# Save labeled image
|
| 63 |
+
img_processor.save_image(labeled_image,
|
| 64 |
+
os.path.join(labeling_dir, f"{base_name}_labeled.png"))
|
| 65 |
+
|
| 66 |
+
# Get threat summary
|
| 67 |
+
threat_summary = labeler.get_threat_summary(labeled_regions)
|
| 68 |
+
print("\nThreat Summary:")
|
| 69 |
+
print(f"Total regions detected: {threat_summary['total_regions']}")
|
| 70 |
+
print(f"Threat counts: Low={threat_summary['threat_counts']['low']}, "
|
| 71 |
+
f"Medium={threat_summary['threat_counts']['medium']}, "
|
| 72 |
+
f"High={threat_summary['threat_counts']['high']}")
|
| 73 |
+
if threat_summary['max_threat']:
|
| 74 |
+
print(f"Maximum threat: {threat_summary['max_threat']['level'].upper()} "
|
| 75 |
+
f"({threat_summary['max_threat']['percentage']:.1f}%)")
|
| 76 |
+
print(f"Average difference: {threat_summary['average_difference']:.1f}%")
|
| 77 |
+
|
| 78 |
+
# Step 3: Heatmap Visualization - Generate heatmaps for threat visualization
|
| 79 |
+
# Generate standard heatmap
|
| 80 |
+
heatmap = heatmap_gen.generate_threat_heatmap(original_image, labeled_regions)
|
| 81 |
+
img_processor.save_image(heatmap,
|
| 82 |
+
os.path.join(heatmap_dir, f"{base_name}_heatmap.png"))
|
| 83 |
+
|
| 84 |
+
# Generate multi-level heatmaps
|
| 85 |
+
multi_heatmaps = heatmap_gen.generate_multi_level_heatmap(original_image, labeled_regions)
|
| 86 |
+
|
| 87 |
+
# Save multi-level heatmaps
|
| 88 |
+
for level, hmap in multi_heatmaps.items():
|
| 89 |
+
if level != 'overlay': # 'overlay' is already saved above
|
| 90 |
+
img_processor.save_image(hmap,
|
| 91 |
+
os.path.join(heatmap_dir, f"{base_name}_heatmap_{level}.png"))
|
| 92 |
+
|
| 93 |
+
# Save side-by-side visualization
|
| 94 |
+
heatmap_gen.save_heatmap_visualization(
|
| 95 |
+
original_image, multi_heatmaps['overlay'],
|
| 96 |
+
os.path.join(output_dir, f"{base_name}_visualization.png")
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
print(f"\nProcessing complete. Results saved to {output_dir}")
|
| 100 |
+
return threat_summary
|
| 101 |
+
|
| 102 |
+
def main():
|
| 103 |
+
args = parse_args()
|
| 104 |
+
|
| 105 |
+
# Ensure output directory exists
|
| 106 |
+
ensure_dir(args.output_dir)
|
| 107 |
+
|
| 108 |
+
# Get all image files in input directory
|
| 109 |
+
image_files = [f for f in os.listdir(args.input_dir)
|
| 110 |
+
if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
|
| 111 |
+
|
| 112 |
+
# Group images for comparison (assuming pairs with _original and _modified suffixes)
|
| 113 |
+
# This is a simple example - you might need to adjust based on your naming convention
|
| 114 |
+
original_images = [f for f in image_files if '_original' in f]
|
| 115 |
+
modified_images = [f for f in image_files if '_modified' in f]
|
| 116 |
+
|
| 117 |
+
# If we don't have clear pairs, just process consecutive images
|
| 118 |
+
if not (original_images and modified_images):
|
| 119 |
+
# Process images in pairs (1&2, 3&4, etc.)
|
| 120 |
+
image_pairs = [(image_files[i], image_files[i+1])
|
| 121 |
+
for i in range(0, len(image_files)-1, 2)]
|
| 122 |
+
else:
|
| 123 |
+
# Match original and modified pairs
|
| 124 |
+
image_pairs = []
|
| 125 |
+
for orig in original_images:
|
| 126 |
+
base_name = orig.replace('_original', '')
|
| 127 |
+
for mod in modified_images:
|
| 128 |
+
if base_name in mod:
|
| 129 |
+
image_pairs.append((orig, mod))
|
| 130 |
+
break
|
| 131 |
+
|
| 132 |
+
# Process each image pair
|
| 133 |
+
results = []
|
| 134 |
+
for img1, img2 in image_pairs:
|
| 135 |
+
img1_path = os.path.join(args.input_dir, img1)
|
| 136 |
+
img2_path = os.path.join(args.input_dir, img2)
|
| 137 |
+
|
| 138 |
+
print(f"\n{'='*50}")
|
| 139 |
+
print(f"Processing pair: {img1} and {img2}")
|
| 140 |
+
print(f"{'='*50}")
|
| 141 |
+
|
| 142 |
+
result = process_image_pair(
|
| 143 |
+
img1_path, img2_path, args.output_dir,
|
| 144 |
+
args.model_path, args.threshold, args.min_area
|
| 145 |
+
)
|
| 146 |
+
results.append({
|
| 147 |
+
'pair': (img1, img2),
|
| 148 |
+
'summary': result
|
| 149 |
+
})
|
| 150 |
+
|
| 151 |
+
# Print overall summary
|
| 152 |
+
print(f"\n{'='*50}")
|
| 153 |
+
print(f"Overall Summary: Processed {len(image_pairs)} image pairs")
|
| 154 |
+
print(f"{'='*50}")
|
| 155 |
+
|
| 156 |
+
high_threat_pairs = [r for r in results
|
| 157 |
+
if r['summary']['threat_counts']['high'] > 0]
|
| 158 |
+
print(f"Pairs with high threats: {len(high_threat_pairs)} / {len(results)}")
|
| 159 |
+
|
| 160 |
+
if high_threat_pairs:
|
| 161 |
+
print("\nHigh threat pairs:")
|
| 162 |
+
for r in high_threat_pairs:
|
| 163 |
+
print(f"- {r['pair'][0]} and {r['pair'][1]}: "
|
| 164 |
+
f"{r['summary']['threat_counts']['high']} high threat regions")
|
| 165 |
+
|
| 166 |
+
if __name__ == "__main__":
|
| 167 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core dependencies
|
| 2 |
+
numpy>=1.19.0
|
| 3 |
+
opencv-python>=4.5.0
|
| 4 |
+
pillow>=8.0.0
|
| 5 |
+
scipy>=1.6.0
|
| 6 |
+
matplotlib>=3.3.0
|
| 7 |
+
|
| 8 |
+
# Deep learning frameworks
|
| 9 |
+
torch>=1.8.0
|
| 10 |
+
torchvision>=0.9.0
|
| 11 |
+
|
| 12 |
+
# Image processing
|
| 13 |
+
scikit-image>=0.18.0
|
| 14 |
+
|
| 15 |
+
# Visualization
|
| 16 |
+
seaborn>=0.11.0
|
| 17 |
+
streamlit>=1.20.0
|
| 18 |
+
|
| 19 |
+
# Nvidia AI model dependencies
|
| 20 |
+
cupy-cuda11x>=10.0.0 # Adjust based on your CUDA version
|
streamlit_app.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import tempfile
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import numpy as np
|
| 6 |
+
from comparison_interface import ComparisonInterface
|
| 7 |
+
from deepfake_detector import DeepfakeDetector
|
| 8 |
+
from image_processor import ImageProcessor
|
| 9 |
+
from labeling import ThreatLabeler
|
| 10 |
+
from heatmap_generator import HeatmapGenerator
|
| 11 |
+
|
| 12 |
+
# Set page configuration
|
| 13 |
+
st.set_page_config(page_title="Deepfake Detection Analysis", layout="wide")
|
| 14 |
+
|
| 15 |
+
# Initialize components
|
| 16 |
+
comparison = ComparisonInterface()
|
| 17 |
+
img_processor = ImageProcessor()
|
| 18 |
+
|
| 19 |
+
# Custom CSS to improve the UI
|
| 20 |
+
st.markdown("""
|
| 21 |
+
<style>
|
| 22 |
+
.main-header {
|
| 23 |
+
font-size: 2.5rem;
|
| 24 |
+
font-weight: bold;
|
| 25 |
+
color: #1E3A8A;
|
| 26 |
+
text-align: center;
|
| 27 |
+
margin-bottom: 1rem;
|
| 28 |
+
}
|
| 29 |
+
.sub-header {
|
| 30 |
+
font-size: 1.5rem;
|
| 31 |
+
font-weight: bold;
|
| 32 |
+
color: #2563EB;
|
| 33 |
+
margin-top: 1rem;
|
| 34 |
+
margin-bottom: 0.5rem;
|
| 35 |
+
}
|
| 36 |
+
.info-text {
|
| 37 |
+
font-size: 1rem;
|
| 38 |
+
color: #4B5563;
|
| 39 |
+
}
|
| 40 |
+
.stImage {
|
| 41 |
+
margin-top: 1rem;
|
| 42 |
+
margin-bottom: 1rem;
|
| 43 |
+
}
|
| 44 |
+
</style>
|
| 45 |
+
""", unsafe_allow_html=True)
|
| 46 |
+
|
| 47 |
+
# App header
|
| 48 |
+
st.markdown('<p class="main-header">Deepfake Detection Analysis</p>', unsafe_allow_html=True)
|
| 49 |
+
st.markdown('<p class="info-text">Upload original and modified images to analyze potential deepfakes</p>', unsafe_allow_html=True)
|
| 50 |
+
|
| 51 |
+
# Create columns for file uploaders
|
| 52 |
+
col1, col2 = st.columns(2)
|
| 53 |
+
|
| 54 |
+
with col1:
|
| 55 |
+
st.markdown('<p class="sub-header">Original Image</p>', unsafe_allow_html=True)
|
| 56 |
+
original_file = st.file_uploader("Upload the original image", type=["jpg", "jpeg", "png"])
|
| 57 |
+
|
| 58 |
+
with col2:
|
| 59 |
+
st.markdown('<p class="sub-header">Modified Image</p>', unsafe_allow_html=True)
|
| 60 |
+
modified_file = st.file_uploader("Upload the potentially modified image", type=["jpg", "jpeg", "png"])
|
| 61 |
+
|
| 62 |
+
# Parameters for analysis
|
| 63 |
+
st.markdown('<p class="sub-header">Analysis Parameters</p>', unsafe_allow_html=True)
|
| 64 |
+
col1, col2 = st.columns(2)
|
| 65 |
+
with col1:
|
| 66 |
+
threshold = st.slider("Difference Threshold", min_value=10, max_value=100, value=30,
|
| 67 |
+
help="Higher values detect only significant differences")
|
| 68 |
+
with col2:
|
| 69 |
+
min_area = st.slider("Minimum Detection Area", min_value=50, max_value=500, value=100,
|
| 70 |
+
help="Minimum area size to consider as a modified region")
|
| 71 |
+
|
| 72 |
+
# Process images when both are uploaded
|
| 73 |
+
if original_file and modified_file:
|
| 74 |
+
# Create temporary directory for processing
|
| 75 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 76 |
+
# Save uploaded files to temporary directory
|
| 77 |
+
original_path = os.path.join(temp_dir, "original.jpg")
|
| 78 |
+
modified_path = os.path.join(temp_dir, "modified.jpg")
|
| 79 |
+
|
| 80 |
+
with open(original_path, "wb") as f:
|
| 81 |
+
f.write(original_file.getbuffer())
|
| 82 |
+
with open(modified_path, "wb") as f:
|
| 83 |
+
f.write(modified_file.getbuffer())
|
| 84 |
+
|
| 85 |
+
# Create output directory
|
| 86 |
+
output_dir = os.path.join(temp_dir, "output")
|
| 87 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 88 |
+
|
| 89 |
+
# Process images and generate visualization
|
| 90 |
+
with st.spinner("Processing images and generating analysis..."):
|
| 91 |
+
# Initialize components
|
| 92 |
+
detector = DeepfakeDetector()
|
| 93 |
+
labeler = ThreatLabeler()
|
| 94 |
+
heatmap_gen = HeatmapGenerator()
|
| 95 |
+
|
| 96 |
+
# Step 1: Verification Module - Process the image pair
|
| 97 |
+
detection_results = detector.process_image_pair(original_path, modified_path, threshold, min_area)
|
| 98 |
+
|
| 99 |
+
# Step 2: Labeling System - Label detected regions by threat level
|
| 100 |
+
original_image = img_processor.load_image(original_path)
|
| 101 |
+
modified_image = img_processor.load_image(modified_path)
|
| 102 |
+
labeled_image, labeled_regions = labeler.label_regions(
|
| 103 |
+
original_image, detection_results['difference_image'], detection_results['bounding_boxes'])
|
| 104 |
+
|
| 105 |
+
# Get threat summary
|
| 106 |
+
threat_summary = labeler.get_threat_summary(labeled_regions)
|
| 107 |
+
|
| 108 |
+
# Step 3: Heatmap Visualization - Generate heatmaps for threat visualization
|
| 109 |
+
heatmap_overlay = heatmap_gen.generate_threat_heatmap(original_image, labeled_regions)
|
| 110 |
+
multi_heatmaps = heatmap_gen.generate_multi_level_heatmap(original_image, labeled_regions)
|
| 111 |
+
|
| 112 |
+
# Combine all results
|
| 113 |
+
all_results = {
|
| 114 |
+
'original_image': original_image,
|
| 115 |
+
'modified_image': modified_image,
|
| 116 |
+
'difference_image': detection_results['difference_image'],
|
| 117 |
+
'threshold_image': detection_results['threshold_image'],
|
| 118 |
+
'annotated_image': detection_results['annotated_image'],
|
| 119 |
+
'labeled_image': labeled_image,
|
| 120 |
+
'heatmap_overlay': heatmap_overlay,
|
| 121 |
+
'multi_heatmaps': multi_heatmaps,
|
| 122 |
+
'threat_summary': threat_summary,
|
| 123 |
+
'smi_score': detection_results['smi_score'],
|
| 124 |
+
'bounding_boxes': detection_results['bounding_boxes']
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
# Create output directory in a permanent location
|
| 128 |
+
output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "comparison_output")
|
| 129 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 130 |
+
|
| 131 |
+
# Generate unique filename based on original image
|
| 132 |
+
base_name = os.path.splitext(original_file.name)[0]
|
| 133 |
+
output_path = os.path.join(output_dir, f"{base_name}_combined_overlay.png")
|
| 134 |
+
|
| 135 |
+
# Create and save combined visualization with heatmaps
|
| 136 |
+
from combined_visualization import CombinedVisualizer
|
| 137 |
+
visualizer = CombinedVisualizer()
|
| 138 |
+
combined_results = {
|
| 139 |
+
'original_image': original_image,
|
| 140 |
+
'difference_image': detection_results['difference_image'],
|
| 141 |
+
'bounding_boxes': detection_results['bounding_boxes'],
|
| 142 |
+
'multi_heatmaps': multi_heatmaps,
|
| 143 |
+
'labeled_regions': labeled_regions
|
| 144 |
+
}
|
| 145 |
+
combined_path = visualizer.create_combined_visualization(
|
| 146 |
+
combined_results, output_path,
|
| 147 |
+
alpha_diff=0.4, alpha_low=0.3, alpha_medium=0.4
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
# Create and save comparison visualization
|
| 151 |
+
grid_output_path = os.path.join(output_dir, f"{base_name}_comparison.png")
|
| 152 |
+
comparison.create_comparison_grid(all_results, grid_output_path)
|
| 153 |
+
|
| 154 |
+
# Display the comprehensive visualization
|
| 155 |
+
st.markdown('<p class="sub-header">Comprehensive Analysis</p>', unsafe_allow_html=True)
|
| 156 |
+
st.image(grid_output_path, use_container_width=True)
|
| 157 |
+
|
| 158 |
+
# Display threat summary
|
| 159 |
+
st.markdown('<p class="sub-header">Threat Summary</p>', unsafe_allow_html=True)
|
| 160 |
+
st.markdown(f"**SMI Score:** {detection_results['smi_score']:.4f} (1.0 = identical, 0.0 = completely different)")
|
| 161 |
+
st.markdown(f"**Total regions detected:** {threat_summary['total_regions']}")
|
| 162 |
+
|
| 163 |
+
# Create columns for threat counts
|
| 164 |
+
col1, col2, col3 = st.columns(3)
|
| 165 |
+
with col1:
|
| 166 |
+
st.markdown(f"**Low threats:** {threat_summary['threat_counts']['low']}")
|
| 167 |
+
with col2:
|
| 168 |
+
st.markdown(f"**Medium threats:** {threat_summary['threat_counts']['medium']}")
|
| 169 |
+
with col3:
|
| 170 |
+
st.markdown(f"**High threats:** {threat_summary['threat_counts']['high']}")
|
| 171 |
+
|
| 172 |
+
if threat_summary['max_threat']:
|
| 173 |
+
st.markdown(f"**Maximum threat:** {threat_summary['max_threat']['level'].upper()} ({threat_summary['max_threat']['percentage']:.1f}%)")
|
| 174 |
+
|
| 175 |
+
st.markdown(f"**Average difference:** {threat_summary['average_difference']:.1f}%")
|
| 176 |
+
|
| 177 |
+
else:
|
| 178 |
+
# Display instructions when images are not yet uploaded
|
| 179 |
+
st.info("Please upload both original and modified images to begin analysis.")
|
| 180 |
+
|
| 181 |
+
# Display sample image
|
| 182 |
+
st.markdown('<p class="sub-header">Sample Analysis Output</p>', unsafe_allow_html=True)
|
| 183 |
+
sample_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "comparison_output", "deepfake1_comparison.png")
|
| 184 |
+
if os.path.exists(sample_path):
|
| 185 |
+
st.image(sample_path, use_container_width=True)
|
| 186 |
+
st.caption("Sample analysis showing all detection stages in a single comprehensive view")
|
| 187 |
+
else:
|
| 188 |
+
st.write("Sample image not available. Please upload images to see the analysis.")
|