Spaces:
Sleeping
Sleeping
AMontiB commited on
Commit ·
2e6d263
1
Parent(s): 94154c7
upload
Browse files- CFA.py +47 -51
- JPEG_Ghost.py +51 -56
- PRNU.py +99 -120
- README.md +1 -1
- app.py +201 -27
- requirements.txt +9 -53
- shadows.py +353 -0
CFA.py
CHANGED
|
@@ -122,54 +122,50 @@ def analyze_region(original_image: np.ndarray, box_coords: tuple):
|
|
| 122 |
print(f"4. Analysis complete in {time.time() - start_time:.2f} seconds.")
|
| 123 |
return fig
|
| 124 |
|
| 125 |
-
# --- Build the Gradio Interface
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
# --- Remove the launch() call ---
|
| 173 |
-
# if __name__ == "__main__":
|
| 174 |
-
# demo = create_ui()
|
| 175 |
-
# demo.launch()
|
|
|
|
| 122 |
print(f"4. Analysis complete in {time.time() - start_time:.2f} seconds.")
|
| 123 |
return fig
|
| 124 |
|
| 125 |
+
# --- Build the Gradio Interface using Blocks ---
|
| 126 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 127 |
+
# State variables store data (like the original image) between user interactions
|
| 128 |
+
original_image_state = gr.State()
|
| 129 |
+
box_coords_state = gr.State(value=(0, 0))
|
| 130 |
+
|
| 131 |
+
gr.Markdown("# 🖼️ Image Patch Analyzer")
|
| 132 |
+
gr.Markdown(
|
| 133 |
+
"**Instructions:**\n"
|
| 134 |
+
"1. **Upload** an image.\n"
|
| 135 |
+
"2. **Click** anywhere on the image to move the 128x128 selection box.\n"
|
| 136 |
+
"3. Press the **Analyze Region** button to start processing."
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
with gr.Row():
|
| 140 |
+
image_display = gr.Image(type="numpy", label="Selection Canvas")
|
| 141 |
+
output_plot = gr.Plot(label="Analysis Results")
|
| 142 |
+
|
| 143 |
+
analyze_button = gr.Button("Analyze Region", variant="primary")
|
| 144 |
+
|
| 145 |
+
# --- Wire up the event listeners ---
|
| 146 |
+
|
| 147 |
+
# 1. When a new image is uploaded, call on_upload_image
|
| 148 |
+
image_display.upload(
|
| 149 |
+
fn=on_upload_image,
|
| 150 |
+
inputs=[image_display],
|
| 151 |
+
outputs=[image_display, original_image_state, box_coords_state]
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
# 2. When the user clicks the image, call move_selection_box
|
| 155 |
+
image_display.select(
|
| 156 |
+
fn=move_selection_box,
|
| 157 |
+
inputs=[original_image_state],
|
| 158 |
+
outputs=[image_display, box_coords_state]
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# 3. When the user clicks the analyze button, call analyze_region
|
| 162 |
+
analyze_button.click(
|
| 163 |
+
fn=analyze_region,
|
| 164 |
+
inputs=[original_image_state, box_coords_state],
|
| 165 |
+
outputs=[output_plot],
|
| 166 |
+
# Show a progress bar during analysis
|
| 167 |
+
show_progress="full"
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# --- Launch the App ---
|
| 171 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
JPEG_Ghost.py
CHANGED
|
@@ -96,59 +96,54 @@ def run_analysis(original_image: np.ndarray, box_coords: tuple, qf1: int, qf2: i
|
|
| 96 |
|
| 97 |
return im_composite, fig
|
| 98 |
|
| 99 |
-
# --- Build the Gradio Interface
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
with gr.
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
# --- Remove the launch() call ---
|
| 152 |
-
# if __name__ == "__main__":
|
| 153 |
-
# demo = create_ui()
|
| 154 |
-
# demo.launch(debug=True)
|
|
|
|
| 96 |
|
| 97 |
return im_composite, fig
|
| 98 |
|
| 99 |
+
# --- Build the Gradio Interface ---
|
| 100 |
+
|
| 101 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 102 |
+
gr.Markdown("# 🕵️ JPEG Double Compression Analyzer")
|
| 103 |
+
gr.Markdown(
|
| 104 |
+
"**Instructions:**\n"
|
| 105 |
+
"1. **Upload** an image.\n"
|
| 106 |
+
"2. **Click** on the image to move the 256x256 selection box.\n"
|
| 107 |
+
"3. Press **Analyze Image** to process the selected region."
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
original_image_state = gr.State()
|
| 111 |
+
box_coords_state = gr.State()
|
| 112 |
+
|
| 113 |
+
with gr.Row():
|
| 114 |
+
with gr.Column(scale=1):
|
| 115 |
+
gr.Markdown("### 1. Inputs")
|
| 116 |
+
image_display = gr.Image(type="numpy", label="Upload Image & Click to Select")
|
| 117 |
+
qf1_slider = gr.Slider(minimum=1, maximum=100, value=70, step=1, label="QF1: Background Quality")
|
| 118 |
+
qf2_slider = gr.Slider(minimum=1, maximum=100, value=85, step=1, label="QF2: Final Composite Quality")
|
| 119 |
+
gr.Markdown("#### Analysis QF Range")
|
| 120 |
+
with gr.Row():
|
| 121 |
+
qf_start_slider = gr.Slider(minimum=50, maximum=100, value=50, step=5, label="Start")
|
| 122 |
+
qf_end_slider = gr.Slider(minimum=50, maximum=100, value=90, step=5, label="End")
|
| 123 |
+
analyze_button = gr.Button("Analyze Image", variant="primary")
|
| 124 |
+
|
| 125 |
+
with gr.Column(scale=2):
|
| 126 |
+
gr.Markdown("### 2. Results")
|
| 127 |
+
composite_image_display = gr.Image(type="numpy", label="Generated Composite Image")
|
| 128 |
+
difference_plot_display = gr.Plot(label="Difference Maps")
|
| 129 |
+
|
| 130 |
+
# Event Listeners
|
| 131 |
+
image_display.upload(
|
| 132 |
+
fn=on_upload_image,
|
| 133 |
+
inputs=[image_display],
|
| 134 |
+
outputs=[image_display, original_image_state, box_coords_state]
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
image_display.select(
|
| 138 |
+
fn=move_selection_box,
|
| 139 |
+
inputs=[original_image_state],
|
| 140 |
+
outputs=[image_display, box_coords_state]
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
analyze_button.click(
|
| 144 |
+
fn=run_analysis,
|
| 145 |
+
inputs=[original_image_state, box_coords_state, qf1_slider, qf2_slider, qf_start_slider, qf_end_slider],
|
| 146 |
+
outputs=[composite_image_display, difference_plot_display]
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
demo.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
PRNU.py
CHANGED
|
@@ -1,31 +1,19 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
import matplotlib.pyplot as plt
|
| 4 |
-
import imageio
|
| 5 |
import tempfile
|
| 6 |
import os
|
| 7 |
|
| 8 |
# --- Import your custom source files ---
|
| 9 |
-
#
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
import utils.src.maindir as md
|
| 14 |
-
except ImportError:
|
| 15 |
-
print("Warning: Could not import 'utils.src' modules.")
|
| 16 |
-
print("Please ensure the 'utils' directory is present in your repository.")
|
| 17 |
-
# Define dummy functions so the app can at least load
|
| 18 |
-
class DummyModule:
|
| 19 |
-
def __getattr__(self, name):
|
| 20 |
-
def dummy_func(*args, **kwargs):
|
| 21 |
-
raise ImportError(f"Module 'utils.src' not loaded. '{name}' is unavailable.")
|
| 22 |
-
return dummy_func
|
| 23 |
-
Fu = DummyModule()
|
| 24 |
-
Ft = DummyModule()
|
| 25 |
-
md = DummyModule()
|
| 26 |
-
|
| 27 |
|
| 28 |
# --- App Description ---
|
|
|
|
|
|
|
| 29 |
description = """
|
| 30 |
# 📸 PRNU-Based Image Forgery Detector
|
| 31 |
|
|
@@ -46,108 +34,99 @@ This tool analyzes an image to detect potential manipulations using Photo-Respon
|
|
| 46 |
"""
|
| 47 |
|
| 48 |
# --- Main Analysis Function ---
|
|
|
|
|
|
|
| 49 |
def analyze_image_forgery(fingerprint_file, input_image):
|
| 50 |
"""
|
| 51 |
Processes an image against a camera fingerprint to generate a PCE map.
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
raise gr.Error("Please upload an image to analyze.")
|
| 57 |
-
|
| 58 |
-
try:
|
| 59 |
-
# --- 1. Load Camera Fingerprint ---
|
| 60 |
-
print("Loading camera fingerprint...")
|
| 61 |
-
Fingerprint = np.genfromtxt(fingerprint_file.name)
|
| 62 |
-
print(f"Fingerprint loaded. Shape: {Fingerprint.shape}")
|
| 63 |
-
|
| 64 |
-
# --- 2. Save uploaded image to a temporary file ---
|
| 65 |
-
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as temp_img_file:
|
| 66 |
-
temp_img_path = temp_img_file.name
|
| 67 |
-
imageio.imwrite(temp_img_path, input_image)
|
| 68 |
-
|
| 69 |
-
# --- 3. Extract and filter PRNU noise from the image ---
|
| 70 |
-
print("Extracting noise from image...")
|
| 71 |
-
Noisex = Ft.NoiseExtractFromImage(temp_img_path, sigma=2.)
|
| 72 |
-
Noisex = Fu.WienerInDFT(Noisex, np.std(Noisex))
|
| 73 |
-
print(f"Noise extracted. Shape: {Noisex.shape}")
|
| 74 |
-
|
| 75 |
-
# Clean up the temporary image file
|
| 76 |
-
os.remove(temp_img_path)
|
| 77 |
-
|
| 78 |
-
# --- 4. Align Fingerprint and PRNU sizes by padding if necessary ---
|
| 79 |
-
if Noisex.shape != Fingerprint.shape:
|
| 80 |
-
print("Shapes do not match. Padding PRNU noise to match fingerprint size.")
|
| 81 |
-
Noisex_padded = np.zeros_like(Fingerprint)
|
| 82 |
-
h = min(Noisex.shape[0], Fingerprint.shape[0])
|
| 83 |
-
w = min(Noisex.shape[1], Fingerprint.shape[1])
|
| 84 |
-
Noisex_padded[:h, :w] = Noisex[:h, :w]
|
| 85 |
-
Noisex = Noisex_padded
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
import matplotlib.pyplot as plt
|
| 4 |
+
import imageio
|
| 5 |
import tempfile
|
| 6 |
import os
|
| 7 |
|
| 8 |
# --- Import your custom source files ---
|
| 9 |
+
# Make sure the 'src' folder is in the same directory as this notebook
|
| 10 |
+
import utils.src.Functions as Fu
|
| 11 |
+
import utils.src.Filter as Ft
|
| 12 |
+
import utils.src.maindir as md
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
# --- App Description ---
|
| 15 |
+
# A detailed description using Markdown. It explains what the tool does and how to use it.
|
| 16 |
+
# You can add images from the web using standard markdown syntax.
|
| 17 |
description = """
|
| 18 |
# 📸 PRNU-Based Image Forgery Detector
|
| 19 |
|
|
|
|
| 34 |
"""
|
| 35 |
|
| 36 |
# --- Main Analysis Function ---
|
| 37 |
+
# This function contains all the logic from your script.
|
| 38 |
+
# It takes a fingerprint file and an image array as input, and returns two plots.
|
| 39 |
def analyze_image_forgery(fingerprint_file, input_image):
|
| 40 |
"""
|
| 41 |
Processes an image against a camera fingerprint to generate a PCE map.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
fingerprint_file (gradio.File): The uploaded camera fingerprint .dat file.
|
| 45 |
+
input_image (np.array): The uploaded image as a NumPy array.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
+
Returns:
|
| 48 |
+
(matplotlib.figure, matplotlib.figure): A tuple containing the two output plots.
|
| 49 |
+
"""
|
| 50 |
+
# --- 1. Load Camera Fingerprint ---
|
| 51 |
+
print("Loading camera fingerprint...")
|
| 52 |
+
Fingerprint = np.genfromtxt(fingerprint_file.name)
|
| 53 |
+
print(f"Fingerprint loaded. Shape: {Fingerprint.shape}")
|
| 54 |
+
|
| 55 |
+
# --- 2. Save uploaded image to a temporary file ---
|
| 56 |
+
# The NoiseExtractFromImage function expects a file path, so we create one.
|
| 57 |
+
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as temp_img_file:
|
| 58 |
+
temp_img_path = temp_img_file.name
|
| 59 |
+
imageio.imwrite(temp_img_path, input_image)
|
| 60 |
+
|
| 61 |
+
# --- 3. Extract and filter PRNU noise from the image ---
|
| 62 |
+
print("Extracting noise from image...")
|
| 63 |
+
Noisex = Ft.NoiseExtractFromImage(temp_img_path, sigma=2.)
|
| 64 |
+
Noisex = Fu.WienerInDFT(Noisex, np.std(Noisex))
|
| 65 |
+
print(f"Noise extracted. Shape: {Noisex.shape}")
|
| 66 |
+
|
| 67 |
+
# Clean up the temporary image file
|
| 68 |
+
os.remove(temp_img_path)
|
| 69 |
+
|
| 70 |
+
# --- 4. Align Fingerprint and PRNU sizes by padding if necessary ---
|
| 71 |
+
if Noisex.shape != Fingerprint.shape:
|
| 72 |
+
print("Shapes do not match. Padding PRNU noise to match fingerprint size.")
|
| 73 |
+
Noisex_padded = np.zeros_like(Fingerprint)
|
| 74 |
+
h = min(Noisex.shape[0], Fingerprint.shape[0])
|
| 75 |
+
w = min(Noisex.shape[1], Fingerprint.shape[1])
|
| 76 |
+
Noisex_padded[:h, :w] = Noisex[:h, :w]
|
| 77 |
+
Noisex = Noisex_padded
|
| 78 |
+
|
| 79 |
+
# --- 5. Compute PCE Map in blocks ---
|
| 80 |
+
print("Computing PCE map...")
|
| 81 |
+
block_size = 64
|
| 82 |
+
blocks_x = np.arange(0, Noisex.shape[0], block_size)
|
| 83 |
+
blocks_y = np.arange(0, Noisex.shape[1], block_size)
|
| 84 |
+
PCE_map = np.zeros((len(blocks_x), len(blocks_y)))
|
| 85 |
+
|
| 86 |
+
for y_idx, y_start in enumerate(blocks_y):
|
| 87 |
+
for x_idx, x_start in enumerate(blocks_x):
|
| 88 |
+
block_Noisex = Noisex[x_start:x_start+block_size, y_start:y_start+block_size]
|
| 89 |
+
block_Fingerprint = Fingerprint[x_start:x_start+block_size, y_start:y_start+block_size]
|
| 90 |
+
|
| 91 |
+
# Skip if blocks are not of the expected size (can happen at edges)
|
| 92 |
+
if block_Noisex.shape != (block_size, block_size):
|
| 93 |
+
continue
|
| 94 |
+
|
| 95 |
+
C = Fu.crosscorr(block_Noisex, block_Fingerprint)
|
| 96 |
+
det, _ = md.PCE(C)
|
| 97 |
+
PCE_map[x_idx, y_idx] = det.get('PCE', 0) # Use .get for safety
|
| 98 |
+
|
| 99 |
+
print("PCE map computed successfully.")
|
| 100 |
+
|
| 101 |
+
# --- 6. Generate Output Plots ---
|
| 102 |
+
# Plot 1: PCE Map
|
| 103 |
+
fig1, ax1 = plt.subplots(figsize=(8, 6))
|
| 104 |
+
im = ax1.imshow(PCE_map, cmap='viridis')
|
| 105 |
+
ax1.set_title('Detection PCE-map')
|
| 106 |
+
fig1.colorbar(im, ax=ax1, label='PCE Value')
|
| 107 |
+
|
| 108 |
+
# Plot 2: Original Image
|
| 109 |
+
fig2, ax2 = plt.subplots(figsize=(8, 6))
|
| 110 |
+
ax2.imshow(input_image)
|
| 111 |
+
ax2.set_title('Analyzed Image')
|
| 112 |
+
ax2.axis('off')
|
| 113 |
+
|
| 114 |
+
return fig1, fig2
|
| 115 |
+
|
| 116 |
+
# --- Create and Launch the Gradio Interface ---
|
| 117 |
+
iface = gr.Interface(
|
| 118 |
+
fn=analyze_image_forgery,
|
| 119 |
+
inputs=[
|
| 120 |
+
gr.File(label="Upload Camera Fingerprint (.dat file)"),
|
| 121 |
+
gr.Image(type="numpy", label="Upload Image to Analyze")
|
| 122 |
+
],
|
| 123 |
+
outputs=[
|
| 124 |
+
gr.Plot(label="PCE Map"),
|
| 125 |
+
gr.Plot(label="Analyzed Image")
|
| 126 |
+
],
|
| 127 |
+
title="📸 PRNU-Based Image Forgery Detector",
|
| 128 |
+
description=description
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# Launch the app locally
|
| 132 |
+
iface.launch()
|
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🕵️♂️
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: indigo
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: "4.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: indigo
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: "4.0.0"
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
app.py
CHANGED
|
@@ -1,31 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
-
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
| 3 |
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
# Import the UI-creation functions from your tool scripts
|
| 7 |
-
import CFA as CFA_tool
|
| 8 |
-
import JPEG_Ghost as JPEG_Ghost_tool
|
| 9 |
-
import PRNU as PRNU_tool
|
| 10 |
-
import shadow as shadows_tool
|
| 11 |
-
|
| 12 |
-
# Create the tabbed interface
|
| 13 |
-
demo = gr.TabbedInterface(
|
| 14 |
-
interface_list=[
|
| 15 |
-
CFA_tool.demo, # Use the Blocks object directly
|
| 16 |
-
JPEG_Ghost_tool.demo, # Use the Blocks object directly
|
| 17 |
-
PRNU_tool.iface, # Use the Interface object directly
|
| 18 |
-
shadows_tool.build_gradio_interface() # Call the function that returns the interface
|
| 19 |
-
],
|
| 20 |
-
tab_names=[
|
| 21 |
-
"🎨 CFA Analysis",
|
| 22 |
-
"👻 JPEG Ghost",
|
| 23 |
-
"📸 PRNU Analysis",
|
| 24 |
-
"☀️ Shadow Analysis"
|
| 25 |
-
],
|
| 26 |
-
title="Digital Image Forensics Toolkit 🕵️♂️"
|
| 27 |
-
)
|
| 28 |
-
|
| 29 |
-
# Launch the app
|
| 30 |
if __name__ == "__main__":
|
| 31 |
-
demo.launch()
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import tempfile
|
| 5 |
import os
|
|
|
|
| 6 |
|
| 7 |
+
# Import all the tool modules
|
| 8 |
+
from CFA import analyze_region, on_upload_image as cfa_upload, move_selection_box as cfa_move_box
|
| 9 |
+
from JPEG_Ghost import run_analysis, on_upload_image as jpeg_upload, move_selection_box as jpeg_move_box
|
| 10 |
+
from PRNU import analyze_image_forgery
|
| 11 |
+
from shadows import build_gradio_interface
|
| 12 |
+
|
| 13 |
+
# Configuration
|
| 14 |
+
BOX_SIZE_CFA = 128
|
| 15 |
+
BOX_SIZE_JPEG = 256
|
| 16 |
+
|
| 17 |
+
# Store state for each tool
|
| 18 |
+
class ToolState:
|
| 19 |
+
def __init__(self):
|
| 20 |
+
self.cfa_original_image = None
|
| 21 |
+
self.cfa_box_coords = (0, 0)
|
| 22 |
+
self.jpeg_original_image = None
|
| 23 |
+
self.jpeg_box_coords = (0, 0)
|
| 24 |
+
|
| 25 |
+
state = ToolState()
|
| 26 |
+
|
| 27 |
+
# CFA Analysis Functions
|
| 28 |
+
def cfa_interface():
|
| 29 |
+
with gr.Blocks() as interface:
|
| 30 |
+
gr.Markdown("# 🎨 Color Filter Array Analysis")
|
| 31 |
+
gr.Markdown("Analyzes artifacts introduced during the camera's raw image processing to detect spliced or copy-pasted regions.")
|
| 32 |
+
|
| 33 |
+
with gr.Row():
|
| 34 |
+
cfa_image_display = gr.Image(type="numpy", label="Upload Image & Click to Select 128x128 Region")
|
| 35 |
+
cfa_output_plot = gr.Plot(label="Analysis Results")
|
| 36 |
+
|
| 37 |
+
cfa_analyze_button = gr.Button("Analyze Region", variant="primary")
|
| 38 |
+
|
| 39 |
+
# Event handlers for CFA
|
| 40 |
+
def on_cfa_upload(image):
|
| 41 |
+
result = cfa_upload(image)
|
| 42 |
+
state.cfa_original_image = result[1]
|
| 43 |
+
state.cfa_box_coords = result[2]
|
| 44 |
+
return result[0]
|
| 45 |
+
|
| 46 |
+
def on_cfa_click(evt: gr.SelectData):
|
| 47 |
+
if state.cfa_original_image is not None:
|
| 48 |
+
result = cfa_move_box(state.cfa_original_image, evt)
|
| 49 |
+
state.cfa_box_coords = result[1]
|
| 50 |
+
return result[0]
|
| 51 |
+
return state.cfa_original_image
|
| 52 |
+
|
| 53 |
+
def on_cfa_analyze():
|
| 54 |
+
if state.cfa_original_image is not None:
|
| 55 |
+
return analyze_region(state.cfa_original_image, state.cfa_box_coords)
|
| 56 |
+
return None
|
| 57 |
+
|
| 58 |
+
cfa_image_display.upload(on_cfa_upload, inputs=[cfa_image_display], outputs=[cfa_image_display])
|
| 59 |
+
cfa_image_display.select(on_cfa_click, inputs=[], outputs=[cfa_image_display])
|
| 60 |
+
cfa_analyze_button.click(on_cfa_analyze, inputs=[], outputs=[cfa_output_plot])
|
| 61 |
+
|
| 62 |
+
return interface
|
| 63 |
+
|
| 64 |
+
# JPEG Ghost Analysis Functions
|
| 65 |
+
def jpeg_interface():
|
| 66 |
+
with gr.Blocks() as interface:
|
| 67 |
+
gr.Markdown("# 👻 JPEG Ghost Detection")
|
| 68 |
+
gr.Markdown("Detects forgeries by identifying regions with different JPEG compression levels using recompression analysis.")
|
| 69 |
+
|
| 70 |
+
with gr.Row():
|
| 71 |
+
with gr.Column(scale=1):
|
| 72 |
+
jpeg_image_display = gr.Image(type="numpy", label="Upload Image & Click to Select 256x256 Region")
|
| 73 |
+
qf1_slider = gr.Slider(minimum=1, maximum=100, value=70, step=1, label="QF1: Background Quality")
|
| 74 |
+
qf2_slider = gr.Slider(minimum=1, maximum=100, value=85, step=1, label="QF2: Final Composite Quality")
|
| 75 |
+
gr.Markdown("#### Analysis QF Range")
|
| 76 |
+
with gr.Row():
|
| 77 |
+
qf_start_slider = gr.Slider(minimum=50, maximum=100, value=50, step=5, label="Start")
|
| 78 |
+
qf_end_slider = gr.Slider(minimum=50, maximum=100, value=90, step=5, label="End")
|
| 79 |
+
jpeg_analyze_button = gr.Button("Analyze Image", variant="primary")
|
| 80 |
+
|
| 81 |
+
with gr.Column(scale=2):
|
| 82 |
+
jpeg_composite_display = gr.Image(type="numpy", label="Generated Composite Image")
|
| 83 |
+
jpeg_difference_plot = gr.Plot(label="Difference Maps")
|
| 84 |
+
|
| 85 |
+
# Event handlers for JPEG Ghost
|
| 86 |
+
def on_jpeg_upload(image):
|
| 87 |
+
result = jpeg_upload(image)
|
| 88 |
+
state.jpeg_original_image = result[1]
|
| 89 |
+
state.jpeg_box_coords = result[2]
|
| 90 |
+
return result[0]
|
| 91 |
+
|
| 92 |
+
def on_jpeg_click(evt: gr.SelectData):
|
| 93 |
+
if state.jpeg_original_image is not None:
|
| 94 |
+
result = jpeg_move_box(state.jpeg_original_image, evt)
|
| 95 |
+
state.jpeg_box_coords = result[1]
|
| 96 |
+
return result[0]
|
| 97 |
+
return state.jpeg_original_image
|
| 98 |
+
|
| 99 |
+
def on_jpeg_analyze(qf1, qf2, qf_start, qf_end):
|
| 100 |
+
if state.jpeg_original_image is not None:
|
| 101 |
+
result = run_analysis(state.jpeg_original_image, state.jpeg_box_coords, qf1, qf2, qf_start, qf_end)
|
| 102 |
+
return result
|
| 103 |
+
return None, None
|
| 104 |
+
|
| 105 |
+
jpeg_image_display.upload(on_jpeg_upload, inputs=[jpeg_image_display], outputs=[jpeg_image_display])
|
| 106 |
+
jpeg_image_display.select(on_jpeg_click, inputs=[], outputs=[jpeg_image_display])
|
| 107 |
+
jpeg_analyze_button.click(
|
| 108 |
+
on_jpeg_analyze,
|
| 109 |
+
inputs=[qf1_slider, qf2_slider, qf_start_slider, qf_end_slider],
|
| 110 |
+
outputs=[jpeg_composite_display, jpeg_difference_plot]
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
return interface
|
| 114 |
+
|
| 115 |
+
# PRNU Analysis Functions
|
| 116 |
+
def prnu_interface():
|
| 117 |
+
with gr.Blocks() as interface:
|
| 118 |
+
gr.Markdown("# 📸 PRNU-Based Image Forgery Detector")
|
| 119 |
+
gr.Markdown("""
|
| 120 |
+
Uses Photo-Response Non-Uniformity (PRNU) pattern to detect tampered regions.
|
| 121 |
+
**Requirements:** You need a camera fingerprint file (.dat format) for analysis.
|
| 122 |
+
""")
|
| 123 |
+
|
| 124 |
+
with gr.Row():
|
| 125 |
+
prnu_fingerprint = gr.File(label="Upload Camera Fingerprint (.dat file)")
|
| 126 |
+
prnu_image = gr.Image(type="numpy", label="Upload Image to Analyze")
|
| 127 |
+
|
| 128 |
+
prnu_analyze_button = gr.Button("Analyze Image", variant="primary")
|
| 129 |
+
|
| 130 |
+
with gr.Row():
|
| 131 |
+
prnu_pce_plot = gr.Plot(label="PCE Map")
|
| 132 |
+
prnu_image_plot = gr.Plot(label="Analyzed Image")
|
| 133 |
+
|
| 134 |
+
def on_prnu_analyze(fingerprint, image):
|
| 135 |
+
if fingerprint is None:
|
| 136 |
+
raise gr.Error("Please upload a camera fingerprint file!")
|
| 137 |
+
if image is None:
|
| 138 |
+
raise gr.Error("Please upload an image to analyze!")
|
| 139 |
+
return analyze_image_forgery(fingerprint, image)
|
| 140 |
+
|
| 141 |
+
prnu_analyze_button.click(
|
| 142 |
+
on_prnu_analyze,
|
| 143 |
+
inputs=[prnu_fingerprint, prnu_image],
|
| 144 |
+
outputs=[prnu_pce_plot, prnu_image_plot]
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
return interface
|
| 148 |
+
|
| 149 |
+
# Shadow Analysis Functions
|
| 150 |
+
def shadow_interface():
|
| 151 |
+
return build_gradio_interface()
|
| 152 |
+
|
| 153 |
+
# Main App
|
| 154 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Digital Image Forensics Toolkit") as demo:
|
| 155 |
+
gr.Markdown("""
|
| 156 |
+
# 🕵️♂️ Digital Image Forensics Toolkit
|
| 157 |
+
|
| 158 |
+
This toolkit provides multiple forensic algorithms to detect image manipulations and forgeries.
|
| 159 |
+
Select a tool from the dropdown below to begin analysis.
|
| 160 |
+
""")
|
| 161 |
+
|
| 162 |
+
with gr.Row():
|
| 163 |
+
tool_selector = gr.Dropdown(
|
| 164 |
+
choices=[
|
| 165 |
+
"🎨 Color Filter Array (CFA) Analysis",
|
| 166 |
+
"👻 JPEG Ghost Detection",
|
| 167 |
+
"📸 PRNU Analysis",
|
| 168 |
+
"☀️ Shadow Consistency Analysis"
|
| 169 |
+
],
|
| 170 |
+
label="Select Forensic Tool",
|
| 171 |
+
value="🎨 Color Filter Array (CFA) Analysis"
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
tool_output = gr.Tabs()
|
| 175 |
+
|
| 176 |
+
# Create all interfaces but only show the selected one
|
| 177 |
+
with tool_output:
|
| 178 |
+
with gr.TabItem("CFA Analysis") as cfa_tab:
|
| 179 |
+
cfa_interface()
|
| 180 |
+
with gr.TabItem("JPEG Ghost") as jpeg_tab:
|
| 181 |
+
jpeg_interface()
|
| 182 |
+
with gr.TabItem("PRNU Analysis") as prnu_tab:
|
| 183 |
+
prnu_interface()
|
| 184 |
+
with gr.TabItem("Shadow Analysis") as shadow_tab:
|
| 185 |
+
shadow_interface()
|
| 186 |
+
|
| 187 |
+
# Map tool selection to tabs
|
| 188 |
+
tool_map = {
|
| 189 |
+
"🎨 Color Filter Array (CFA) Analysis": 0,
|
| 190 |
+
"👻 JPEG Ghost Detection": 1,
|
| 191 |
+
"📸 PRNU Analysis": 2,
|
| 192 |
+
"☀️ Shadow Consistency Analysis": 3
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
def select_tool(tool_name):
|
| 196 |
+
return gr.Tabs(selected=tool_map.get(tool_name, 0))
|
| 197 |
+
|
| 198 |
+
tool_selector.change(
|
| 199 |
+
select_tool,
|
| 200 |
+
inputs=[tool_selector],
|
| 201 |
+
outputs=[tool_output]
|
| 202 |
+
)
|
| 203 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
if __name__ == "__main__":
|
| 205 |
+
demo.launch(share=True)
|
requirements.txt
CHANGED
|
@@ -1,53 +1,9 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
fonttools==4.55.0
|
| 11 |
-
imagecodecs==2023.3.16
|
| 12 |
-
imageio==2.35.1
|
| 13 |
-
importlib-metadata==8.5.0
|
| 14 |
-
importlib-resources==6.4.5
|
| 15 |
-
ipykernel==6.29.5
|
| 16 |
-
ipython==8.12.3
|
| 17 |
-
jedi==0.19.2
|
| 18 |
-
jupyter-client==8.6.3
|
| 19 |
-
jupyter-core==5.7.2
|
| 20 |
-
kiwisolver==1.4.7
|
| 21 |
-
lazy-loader==0.4
|
| 22 |
-
matplotlib==3.7.5
|
| 23 |
-
matplotlib-inline==0.1.7
|
| 24 |
-
nest-asyncio==1.6.0
|
| 25 |
-
networkx==3.1
|
| 26 |
-
numpy==1.24.4
|
| 27 |
-
opencv-python==4.10.0.84
|
| 28 |
-
packaging==24.2
|
| 29 |
-
parso==0.8.4
|
| 30 |
-
pickleshare==0.7.5
|
| 31 |
-
pillow==10.4.0
|
| 32 |
-
platformdirs==4.3.6
|
| 33 |
-
prompt-toolkit==3.0.48
|
| 34 |
-
psutil==6.1.0
|
| 35 |
-
pure-eval==0.2.3
|
| 36 |
-
pygments==2.18.0
|
| 37 |
-
pyparsing==3.1.4
|
| 38 |
-
python-dateutil==2.9.0.post0
|
| 39 |
-
PyWavelets==1.4.1
|
| 40 |
-
scikit-image==0.21.0
|
| 41 |
-
scipy==1.10.1
|
| 42 |
-
six==1.16.0
|
| 43 |
-
tornado==6.4.1
|
| 44 |
-
gradio==4.32.4
|
| 45 |
-
fastapi==0.104.1
|
| 46 |
-
uvicorn==0.24.0
|
| 47 |
-
numpy==1.24.4
|
| 48 |
-
opencv-python==4.10.0.84
|
| 49 |
-
pillow==10.4.0
|
| 50 |
-
scikit-image==0.21.0
|
| 51 |
-
scipy==1.10.1
|
| 52 |
-
matplotlib==3.7.5
|
| 53 |
-
imageio==2.35.1
|
|
|
|
| 1 |
+
gradio>=4.0.0
|
| 2 |
+
numpy
|
| 3 |
+
pillow
|
| 4 |
+
matplotlib
|
| 5 |
+
scipy
|
| 6 |
+
imageio
|
| 7 |
+
opencv-python
|
| 8 |
+
PyWavelets
|
| 9 |
+
scikit-image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
shadows.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Gradio app to replicate the interactive vanishing-point selection tool
|
| 3 |
+
from the supplied matplotlib script, implemented for gradio==3.50.2.
|
| 4 |
+
|
| 5 |
+
How it works (UI):
|
| 6 |
+
- Upload an image.
|
| 7 |
+
- Click "Start Yellow" or "Start Red" to enter a drawing mode for that line group.
|
| 8 |
+
- Click on the image to add points. Two consecutive clicks make a line.
|
| 9 |
+
- You can add as many lines as you want for each color.
|
| 10 |
+
- Press "Compute vanishing points" to run optimization (scipy.minimize) for
|
| 11 |
+
each color group and display the vanishing points and overlayed lines.
|
| 12 |
+
- Reset clears all state.
|
| 13 |
+
|
| 14 |
+
Requirements:
|
| 15 |
+
- gradio==3.50.2
|
| 16 |
+
- numpy
|
| 17 |
+
- scipy
|
| 18 |
+
- pillow
|
| 19 |
+
|
| 20 |
+
Run:
|
| 21 |
+
pip install gradio==3.50.2 numpy scipy pillow
|
| 22 |
+
python grad_io_gradio_app.py
|
| 23 |
+
|
| 24 |
+
Note: This implementation uses the Image.select event which behaves correctly
|
| 25 |
+
in gradio 3.50.2 (it provides pixel coordinates of the clicked point). If you
|
| 26 |
+
use a newer Gradio version, the event behavior might differ.
|
| 27 |
+
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
import io
|
| 31 |
+
import math
|
| 32 |
+
import numpy as np
|
| 33 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 34 |
+
import gradio as gr
|
| 35 |
+
from scipy.optimize import minimize
|
| 36 |
+
|
| 37 |
+
# ------------------------ Helper math functions ---------------------------
|
| 38 |
+
|
| 39 |
+
def build_line_from_points(p1, p2):
|
| 40 |
+
"""Return line coefficients (A, B, C) for Ax + By + C = 0 given two points."""
|
| 41 |
+
x1, y1 = p1
|
| 42 |
+
x2, y2 = p2
|
| 43 |
+
a = y1 - y2
|
| 44 |
+
b = x2 - x1
|
| 45 |
+
c = x1 * y2 - y1 * x2
|
| 46 |
+
return np.array([a, b, c], dtype=float)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def distance_point_to_line(pt, line):
|
| 50 |
+
x, y = pt
|
| 51 |
+
a, b, c = line
|
| 52 |
+
return abs(a * x + b * y + c) / math.hypot(a, b)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def total_distances(x, lines, noise_lines):
|
| 56 |
+
"""Sum of distances from candidate point x to all lines and noise lines."""
|
| 57 |
+
pt = x
|
| 58 |
+
s = 0.0
|
| 59 |
+
for L in lines:
|
| 60 |
+
s += distance_point_to_line(pt, L)
|
| 61 |
+
for Ln in noise_lines:
|
| 62 |
+
s += distance_point_to_line(pt, Ln)
|
| 63 |
+
return s
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def add_noise_lines_for_line(p1, p2, n=4, sigma=1.0):
|
| 67 |
+
"""Create a list of "noise" lines by jittering the endpoints slightly."""
|
| 68 |
+
noise_lines = []
|
| 69 |
+
for _ in range(n):
|
| 70 |
+
p1n = (p1[0] + np.random.normal(0, sigma), p1[1] + np.random.normal(0, sigma))
|
| 71 |
+
p2n = (p2[0] + np.random.normal(0, sigma), p2[1] + np.random.normal(0, sigma))
|
| 72 |
+
noise_lines.append(build_line_from_points(p1n, p2n))
|
| 73 |
+
return noise_lines
|
| 74 |
+
|
| 75 |
+
# ------------------------- Drawing utilities ------------------------------
|
| 76 |
+
|
| 77 |
+
def draw_overlay(base_pil, yellow_lines, red_lines, yellow_points, red_points, vps=None):
|
| 78 |
+
"""Return a new PIL image with overlays drawn: lines, points and vanishing points.
|
| 79 |
+
|
| 80 |
+
- yellow_lines, red_lines: lists of line coefficients
|
| 81 |
+
- yellow_points, red_points: lists of tuples (p1, p2) for each line
|
| 82 |
+
- vps: dict with keys 'yellow' and 'red' for vanishing points (x,y)
|
| 83 |
+
"""
|
| 84 |
+
img = base_pil.copy().convert("RGBA")
|
| 85 |
+
draw = ImageDraw.Draw(img)
|
| 86 |
+
|
| 87 |
+
# helpers
|
| 88 |
+
def draw_point(pt, color, r=4):
|
| 89 |
+
x, y = pt
|
| 90 |
+
draw.ellipse((x - r, y - r, x + r, y + r), fill=color, outline=color)
|
| 91 |
+
|
| 92 |
+
def draw_line_by_points(p1, p2, color, width=2, dash=False):
|
| 93 |
+
# we just draw a straight segment connecting endpoints
|
| 94 |
+
if dash:
|
| 95 |
+
# dashed line: draw small segments
|
| 96 |
+
x1, y1 = p1
|
| 97 |
+
x2, y2 = p2
|
| 98 |
+
segs = 40
|
| 99 |
+
for i in range(segs):
|
| 100 |
+
t0 = i / segs
|
| 101 |
+
t1 = (i + 0.5) / segs
|
| 102 |
+
xa = x1 * (1 - t0) + x2 * t0
|
| 103 |
+
ya = y1 * (1 - t0) + y2 * t0
|
| 104 |
+
xb = x1 * (1 - t1) + x2 * t1
|
| 105 |
+
yb = y1 * (1 - t1) + y2 * t1
|
| 106 |
+
draw.line((xa, ya, xb, yb), fill=color, width=width)
|
| 107 |
+
else:
|
| 108 |
+
draw.line((p1[0], p1[1], p2[0], p2[1]), fill=color, width=width)
|
| 109 |
+
|
| 110 |
+
# Draw yellow lines
|
| 111 |
+
for idx, ((p1, p2), L) in enumerate(zip(yellow_points, yellow_lines)):
|
| 112 |
+
# draw long extents of line by projecting to image bounds
|
| 113 |
+
draw_line_segment_from_line(L, img.size, color=(255, 215, 0, 200), draw=draw)
|
| 114 |
+
draw_point(p1, (255, 215, 0, 255))
|
| 115 |
+
draw_point(p2, (255, 215, 0, 255))
|
| 116 |
+
|
| 117 |
+
# Draw red lines
|
| 118 |
+
for idx, ((p1, p2), L) in enumerate(zip(red_points, red_lines)):
|
| 119 |
+
draw_line_segment_from_line(L, img.size, color=(255, 64, 64, 200), draw=draw)
|
| 120 |
+
draw_point(p1, (255, 64, 64, 255))
|
| 121 |
+
draw_point(p2, (255, 64, 64, 255))
|
| 122 |
+
|
| 123 |
+
# Draw vanishing points if present
|
| 124 |
+
if vps is not None:
|
| 125 |
+
if "yellow" in vps and vps["yellow"] is not None:
|
| 126 |
+
draw_point(vps["yellow"], (255, 215, 0, 255), r=6)
|
| 127 |
+
if "red" in vps and vps["red"] is not None:
|
| 128 |
+
draw_point(vps["red"], (255, 64, 64, 255), r=6)
|
| 129 |
+
|
| 130 |
+
return img.convert("RGB")
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def draw_line_segment_from_line(line, image_size, draw=None, color=(255, 255, 0, 255)):
|
| 134 |
+
"""Given line coefficients and image size, draw a segment across the image bounds.
|
| 135 |
+
This draws directly using ImageDraw if 'draw' is provided.
|
| 136 |
+
"""
|
| 137 |
+
W, H = image_size
|
| 138 |
+
a, b, c = line
|
| 139 |
+
points = []
|
| 140 |
+
# intersection with left edge x=0
|
| 141 |
+
if abs(b) > 1e-9:
|
| 142 |
+
y = -(a * 0 + c) / b
|
| 143 |
+
points.append((0, y))
|
| 144 |
+
# right edge x=W
|
| 145 |
+
if abs(b) > 1e-9:
|
| 146 |
+
y = -(a * W + c) / b
|
| 147 |
+
points.append((W, y))
|
| 148 |
+
# top edge y=0 --> a x + c = 0
|
| 149 |
+
if abs(a) > 1e-9:
|
| 150 |
+
x = -(b * 0 + c) / a
|
| 151 |
+
points.append((x, 0))
|
| 152 |
+
# bottom edge y=H
|
| 153 |
+
if abs(a) > 1e-9:
|
| 154 |
+
x = -(b * H + c) / a
|
| 155 |
+
points.append((x, H))
|
| 156 |
+
|
| 157 |
+
# keep only points within the image bounds
|
| 158 |
+
pts_in = [(x, y) for (x, y) in points if -W * 0.1 <= x <= W * 1.1 and -H * 0.1 <= y <= H * 1.1]
|
| 159 |
+
if len(pts_in) >= 2 and draw is not None:
|
| 160 |
+
# pick two extreme points
|
| 161 |
+
# sort by x coordinate
|
| 162 |
+
pts_in = sorted(pts_in, key=lambda p: (p[0], p[1]))
|
| 163 |
+
pA = pts_in[0]
|
| 164 |
+
pB = pts_in[-1]
|
| 165 |
+
draw.line((pA[0], pA[1], pB[0], pB[1]), fill=color, width=2)
|
| 166 |
+
|
| 167 |
+
# ------------------------- Gradio app callbacks ---------------------------
|
| 168 |
+
|
| 169 |
+
# We'll store states in gr.State objects:
|
| 170 |
+
# - current_mode: None | 'yellow' | 'red'
|
| 171 |
+
# - current_points: list of pending points (len 0 or 1 waiting for second click)
|
| 172 |
+
# - yellow_lines: list of (A,B,C)
|
| 173 |
+
# - red_lines: list of (A,B,C)
|
| 174 |
+
# - yellow_points_pairs: list of ((p1,p2))
|
| 175 |
+
# - red_points_pairs: list of ((p1,p2))
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def init_states():
|
| 179 |
+
return None, [], [], [], [], []
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def on_mode_change(mode, image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs):
|
| 183 |
+
"""Switch drawing mode between 'yellow', 'red' or None.
|
| 184 |
+
Returns image (unchanged) and updated states.
|
| 185 |
+
"""
|
| 186 |
+
# Just update the mode state. Clear any pending single point.
|
| 187 |
+
return (image, mode, [], y_lines, r_lines, y_pairs, r_pairs)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def on_image_select(sel: gr.SelectData, image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs):
|
| 191 |
+
"""Called when user clicks on the image. sel.index gives (x, y) in pixels.
|
| 192 |
+
|
| 193 |
+
We append the point, and when there are 2 points we form a line and add to the
|
| 194 |
+
corresponding color list. We then redraw overlays and return the updated image and states.
|
| 195 |
+
"""
|
| 196 |
+
# sel may contain relative coords depending on gradio version; here we expect .index
|
| 197 |
+
if sel is None:
|
| 198 |
+
return image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
|
| 199 |
+
|
| 200 |
+
idx = getattr(sel, "index", None)
|
| 201 |
+
# Some versions wrap coordinates as [x, y], some as (x, y)
|
| 202 |
+
if idx is None:
|
| 203 |
+
# fallback: try .data or .value
|
| 204 |
+
idx = getattr(sel, "data", None) or getattr(sel, "value", None)
|
| 205 |
+
if not idx:
|
| 206 |
+
return image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
|
| 207 |
+
|
| 208 |
+
x, y = int(idx[0]), int(idx[1])
|
| 209 |
+
|
| 210 |
+
# append to current_points
|
| 211 |
+
current_points = list(current_points) if current_points is not None else []
|
| 212 |
+
current_points.append((x, y))
|
| 213 |
+
|
| 214 |
+
# if we have two points, create a line
|
| 215 |
+
if len(current_points) >= 2 and current_mode in ("yellow", "red"):
|
| 216 |
+
p1 = current_points[-2]
|
| 217 |
+
p2 = current_points[-1]
|
| 218 |
+
L = build_line_from_points(p1, p2)
|
| 219 |
+
if current_mode == "yellow":
|
| 220 |
+
y_lines = list(y_lines) if y_lines is not None else []
|
| 221 |
+
y_pairs = list(y_pairs) if y_pairs is not None else []
|
| 222 |
+
y_lines.append(L)
|
| 223 |
+
y_pairs.append((p1, p2))
|
| 224 |
+
else:
|
| 225 |
+
r_lines = list(r_lines) if r_lines is not None else []
|
| 226 |
+
r_pairs = list(r_pairs) if r_pairs is not None else []
|
| 227 |
+
r_lines.append(L)
|
| 228 |
+
r_pairs.append((p1, p2))
|
| 229 |
+
|
| 230 |
+
# redraw overlay image
|
| 231 |
+
base_pil = Image.fromarray(image) if not isinstance(image, Image.Image) else image
|
| 232 |
+
out = draw_overlay(base_pil, y_lines or [], r_lines or [], y_pairs or [], r_pairs or [], vps=None)
|
| 233 |
+
|
| 234 |
+
return out, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def compute_vanishing_points(image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs):
|
| 238 |
+
"""Compute vanishing points for both color groups, draw them and return annotated image.
|
| 239 |
+
|
| 240 |
+
For each group: if there are >1 lines, compute intersections and use mean intersection
|
| 241 |
+
as initial guess; then minimize sum of distances to lines + noise-lines.
|
| 242 |
+
"""
|
| 243 |
+
img_pil = Image.fromarray(image) if not isinstance(image, Image.Image) else image
|
| 244 |
+
|
| 245 |
+
vps = {"yellow": None, "red": None}
|
| 246 |
+
|
| 247 |
+
# process yellow group
|
| 248 |
+
if y_lines and len(y_lines) > 1:
|
| 249 |
+
lines_arr = np.array(y_lines)
|
| 250 |
+
# intersections
|
| 251 |
+
inters = []
|
| 252 |
+
for i in range(len(lines_arr) - 1):
|
| 253 |
+
for j in range(i + 1, len(lines_arr)):
|
| 254 |
+
try:
|
| 255 |
+
ip = np.linalg.solve(np.array([[lines_arr[i][0], lines_arr[i][1]],[lines_arr[j][0], lines_arr[j][1]]]),
|
| 256 |
+
-np.array([lines_arr[i][2], lines_arr[j][2]]))
|
| 257 |
+
inters.append(ip)
|
| 258 |
+
except Exception:
|
| 259 |
+
pass
|
| 260 |
+
if inters:
|
| 261 |
+
p0 = np.mean(inters, axis=0)
|
| 262 |
+
else:
|
| 263 |
+
# fallback: center of image
|
| 264 |
+
p0 = np.array([img_pil.width / 2, img_pil.height / 2])
|
| 265 |
+
|
| 266 |
+
# noise lines
|
| 267 |
+
noise = []
|
| 268 |
+
for (p1, p2) in y_pairs:
|
| 269 |
+
noise += add_noise_lines_for_line(p1, p2, n=4, sigma=2.0)
|
| 270 |
+
|
| 271 |
+
res = minimize(lambda x: total_distances(x, lines_arr, noise), p0, method='Powell')
|
| 272 |
+
vps['yellow'] = (float(res.x[0]), float(res.x[1]))
|
| 273 |
+
|
| 274 |
+
# process red group
|
| 275 |
+
if r_lines and len(r_lines) > 1:
|
| 276 |
+
lines_arr = np.array(r_lines)
|
| 277 |
+
inters = []
|
| 278 |
+
for i in range(len(lines_arr) - 1):
|
| 279 |
+
for j in range(i + 1, len(lines_arr)):
|
| 280 |
+
try:
|
| 281 |
+
ip = np.linalg.solve(np.array([[lines_arr[i][0], lines_arr[i][1]],[lines_arr[j][0], lines_arr[j][1]]]),
|
| 282 |
+
-np.array([lines_arr[i][2], lines_arr[j][2]]))
|
| 283 |
+
inters.append(ip)
|
| 284 |
+
except Exception:
|
| 285 |
+
pass
|
| 286 |
+
if inters:
|
| 287 |
+
p0 = np.mean(inters, axis=0)
|
| 288 |
+
else:
|
| 289 |
+
p0 = np.array([img_pil.width / 2, img_pil.height / 2])
|
| 290 |
+
|
| 291 |
+
noise = []
|
| 292 |
+
for (p1, p2) in r_pairs:
|
| 293 |
+
noise += add_noise_lines_for_line(p1, p2, n=4, sigma=2.0)
|
| 294 |
+
|
| 295 |
+
res = minimize(lambda x: total_distances(x, lines_arr, noise), p0, method='Powell')
|
| 296 |
+
vps['red'] = (float(res.x[0]), float(res.x[1]))
|
| 297 |
+
|
| 298 |
+
out = draw_overlay(img_pil, y_lines or [], r_lines or [], y_pairs or [], r_pairs or [], vps=vps)
|
| 299 |
+
return out, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def reset_all(image, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs):
|
| 303 |
+
base_pil = Image.fromarray(image) if not isinstance(image, Image.Image) else image
|
| 304 |
+
return base_pil, None, [], [], [], [], []
|
| 305 |
+
|
| 306 |
+
# ------------------------------ Build Blocks ------------------------------
|
| 307 |
+
|
| 308 |
+
def build_gradio_interface():
|
| 309 |
+
with gr.Blocks() as demo:
|
| 310 |
+
gr.Markdown("# grad.io — Vanishing-point picker (Gradio 3.50.2 sample)")
|
| 311 |
+
with gr.Row():
|
| 312 |
+
img_in = gr.Image(label="Upload image and then click to add points", type="numpy", interactive=True, height=800)
|
| 313 |
+
with gr.Column():
|
| 314 |
+
start_y = gr.Button("Start Yellow")
|
| 315 |
+
start_r = gr.Button("Start Red")
|
| 316 |
+
none_btn = gr.Button("Stop Drawing")
|
| 317 |
+
compute_btn = gr.Button("Compute vanishing points")
|
| 318 |
+
reset_btn = gr.Button("Reset")
|
| 319 |
+
gr.Markdown("\nClick the image to add points. Two points => one line. Add at least 2 lines per group to compute a vanishing point.")
|
| 320 |
+
|
| 321 |
+
# states
|
| 322 |
+
current_mode = gr.State(None)
|
| 323 |
+
current_points = gr.State([])
|
| 324 |
+
y_lines = gr.State([])
|
| 325 |
+
r_lines = gr.State([])
|
| 326 |
+
y_pairs = gr.State([])
|
| 327 |
+
r_pairs = gr.State([])
|
| 328 |
+
|
| 329 |
+
# link buttons to mode change
|
| 330 |
+
start_y.click(on_mode_change, inputs=[gr.State("yellow"), img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
|
| 331 |
+
outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
|
| 332 |
+
start_r.click(on_mode_change, inputs=[gr.State("red"), img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
|
| 333 |
+
outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
|
| 334 |
+
none_btn.click(on_mode_change, inputs=[gr.State(None), img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
|
| 335 |
+
outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
|
| 336 |
+
|
| 337 |
+
# image select event
|
| 338 |
+
img_in.select(on_image_select, inputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
|
| 339 |
+
outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
|
| 340 |
+
|
| 341 |
+
compute_btn.click(compute_vanishing_points, inputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
|
| 342 |
+
outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
|
| 343 |
+
|
| 344 |
+
reset_btn.click(reset_all, inputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs],
|
| 345 |
+
outputs=[img_in, current_mode, current_points, y_lines, r_lines, y_pairs, r_pairs])
|
| 346 |
+
|
| 347 |
+
return demo
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
if __name__ == '__main__':
|
| 351 |
+
demo = build_gradio_interface()
|
| 352 |
+
demo.queue()
|
| 353 |
+
demo.launch()
|