non-ai-forensics-tools / JPEG_Ghost.py
AMontiB
update
f9e90ec
import gradio as gr
import numpy as np
import imageio.v2 as imageio
import matplotlib.pyplot as plt
import os
import tempfile
from PIL import Image, ImageDraw
# --- Configuration ---
BOX_SIZE = 256
# --- Core Analysis & Helper Functions ---
def draw_box_on_image(image: np.ndarray, box_coords: tuple, color="red", width=3) -> np.ndarray:
"""Draws a bounding box on a NumPy image array."""
pil_image = Image.fromarray(image)
draw = ImageDraw.Draw(pil_image)
x, y = box_coords
rectangle = (x, y, x + BOX_SIZE, y + BOX_SIZE)
draw.rectangle(rectangle, outline=color, width=width)
return np.array(pil_image)
def diff_maps(im: np.ndarray, qf_range: np.ndarray, temp_dir: str) -> np.ndarray:
if im.dtype != np.uint8:
im = im.astype(np.uint8)
num_qfs = len(qf_range)
diff_array = np.zeros((im.shape[0], im.shape[1], num_qfs), dtype=np.float32)
temp_path = os.path.join(temp_dir, 'temp_recompress.jpg')
for i, q in enumerate(qf_range):
imageio.imwrite(temp_path, im, format='JPEG', quality=int(q))
recompressed_im = imageio.imread(temp_path)
diff = (im.astype(np.float32) - recompressed_im.astype(np.float32))**2
if diff.ndim == 3:
diff = np.mean(diff, axis=2)
diff_array[:, :, i] = diff
return diff_array
def on_upload_image(image: np.ndarray) -> tuple:
"""Called when an image is first uploaded to draw the initial box."""
initial_coords = (0, 0)
image_with_box = draw_box_on_image(image, initial_coords)
return image_with_box, image, initial_coords
def move_selection_box(evt: gr.SelectData, original_image: np.ndarray) -> tuple:
"""Called when the user clicks the image to move the box."""
x = evt.index[0] - BOX_SIZE // 2
y = evt.index[1] - BOX_SIZE // 2
img_h, img_w, _ = original_image.shape
x = max(0, min(x, img_w - BOX_SIZE))
y = max(0, min(y, img_h - BOX_SIZE))
new_coords = (int(x), int(y))
image_with_box = draw_box_on_image(original_image, new_coords)
return image_with_box, new_coords
def run_analysis(original_image: np.ndarray, box_coords: tuple, qf1: int, qf2: int, qf_start: int, qf_end: int):
if original_image is None:
raise gr.Error("Please upload an image first.")
if qf_start >= qf_end:
raise gr.Error("Analysis QF Start must be less than QF End.")
with tempfile.TemporaryDirectory() as temp_dir:
x, y = box_coords
patch_coords = (x, y, x + BOX_SIZE, y + BOX_SIZE)
path_qf1 = os.path.join(temp_dir, 'temp1.jpg')
path_composite = os.path.join(temp_dir, 'composite.jpg')
imageio.imwrite(path_qf1, original_image, quality=int(qf1))
im_low_q = imageio.imread(path_qf1)
xmin, ymin, xmax, ymax = patch_coords
im_low_q[ymin:ymax, xmin:xmax] = original_image[ymin:ymax, xmin:xmax]
imageio.imwrite(path_composite, im_low_q, quality=int(qf2))
im_composite = imageio.imread(path_composite)
qf_values = np.arange(int(qf_start), int(qf_end) + 1, 5)
if len(qf_values) == 0:
raise gr.Error("The selected QF range is empty.")
diffs = diff_maps(im_composite, qf_values, temp_dir)
diffs = np.clip(diffs, 0, 255)
num_plots = diffs.shape[2]
cols = 4
rows = int(np.ceil(num_plots / cols))
fig, axes = plt.subplots(rows, cols, figsize=(16, 4 * rows), squeeze=False)
fig.suptitle('Difference Images for Different Recompression Quality Factors', fontsize=16)
axes = axes.flatten()
for i in range(num_plots):
axes[i].imshow(diffs[:, :, i], cmap='gray', vmin=0, vmax=np.percentile(diffs, 99))
axes[i].set_title(f'QF = {qf_values[i]}')
axes[i].axis('off')
for i in range(num_plots, len(axes)):
axes[i].axis('off')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
return im_composite, fig
def build_demo():
# --- Build the Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 👻 JPEG Ghost Detection")
gr.Markdown(
"Detects forgeries by identifying regions within an image that were compressed with different **JPEG Quality Factors (QF)**. When an area from a different JPEG image is spliced in, it often carries the 'ghost' of its original compression level, which this tool can reveal. NOTE: This tool AUTOMATICALLY SIMULATE THE JPEG GHOST BY RECOMPRESSING THE SELECTED AREA. \n"
"\n"
"## Instructions:\n"
"1. **Upload** an image.\n"
"2. **Click** on the image to move the 256x256 selection box.\n"
"3. Press **Analyze Image** to process the selected region."
)
original_image_state = gr.State()
box_coords_state = gr.State()
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 1. Inputs")
image_display = gr.Image(type="numpy", label="Upload Image & Click to Select", interactive=True)
qf1_slider = gr.Slider(minimum=1, maximum=100, value=70, step=1, label="QF1: Background Quality")
qf2_slider = gr.Slider(minimum=1, maximum=100, value=85, step=1, label="QF2: Final Composite Quality")
gr.Markdown("#### Analysis QF Range")
with gr.Row():
qf_start_slider = gr.Slider(minimum=50, maximum=100, value=50, step=5, label="Start")
qf_end_slider = gr.Slider(minimum=50, maximum=100, value=90, step=5, label="End")
analyze_button = gr.Button("Analyze Image", variant="primary")
with gr.Column(scale=2):
gr.Markdown("### 2. Results")
composite_image_display = gr.Image(type="numpy", label="Generated Composite Image")
difference_plot_display = gr.Plot(label="Difference Maps")
# Event Listeners
image_display.upload(
fn=on_upload_image,
inputs=[image_display],
outputs=[image_display, original_image_state, box_coords_state]
)
image_display.select(
fn=move_selection_box,
inputs=[original_image_state],
outputs=[image_display, box_coords_state]
)
analyze_button.click(
fn=run_analysis,
inputs=[original_image_state, box_coords_state, qf1_slider, qf2_slider, qf_start_slider, qf_end_slider],
outputs=[composite_image_display, difference_plot_display]
)
return demo
if __name__ == "__main__":
app = build_demo()
app.launch(debug=True)