Image-Forensics-Detect / branches /edge_branch.py
dk2430098's picture
Upload folder using huggingface_hub
928b74f verified
"""
branches/edge_branch.py
------------------------
Branch 2: Edge Analysis Branch
STATUS: COMPLETE β€” no training required (classical CV)
Detects structural and edge-level forensic artifacts:
- Unnatural sharpness / over-smoothing (GAN hallucinations)
- Broken contour continuity (AI composition artifacts)
- Abnormal gradient distribution statistics
Research background:
AI-generated images often show unnatural edge sharpness,
lack of micro-texture at boundaries, or inconsistent edge
density across image regions.
Output:
{
"prob_fake" : float in [0, 1],
"confidence" : float in [0, 1],
"edge_map" : np.ndarray (H, W) β€” edge magnitude for visualization
}
"""
import numpy as np
import cv2
from scipy.stats import entropy, kurtosis
from utils.image_utils import to_uint8, to_grayscale
# ─────────────────────────────────────────────────────────────────
# Internal Helpers
# ─────────────────────────────────────────────────────────────────
def _sobel_edge_map(gray_u8: np.ndarray) -> np.ndarray:
"""Return normalized Sobel edge magnitude (H,W) in [0,1]."""
gx = cv2.Sobel(gray_u8, cv2.CV_64F, 1, 0, ksize=3)
gy = cv2.Sobel(gray_u8, cv2.CV_64F, 0, 1, ksize=3)
mag = np.sqrt(gx**2 + gy**2)
mag -= mag.min()
if mag.max() > 0:
mag /= mag.max()
return mag.astype(np.float32)
def _laplacian_edge_map(gray_u8: np.ndarray) -> np.ndarray:
"""Return normalized Laplacian edge map (H,W) in [0,1]."""
lap = cv2.Laplacian(gray_u8, cv2.CV_64F)
lap = np.abs(lap)
lap -= lap.min()
if lap.max() > 0:
lap /= lap.max()
return lap.astype(np.float32)
def _edge_density_score(edge_map: np.ndarray) -> float:
"""
Edge density = fraction of strong-edge pixels.
AI images can be over-sharpened (too dense) or hallucinated (sparse).
Returns anomaly score in [0, 1].
"""
threshold = 0.15
density = float(np.mean(edge_map > threshold))
# Real images: ~0.05–0.20 edge density
# Over-sharpened AI: > 0.30, Over-smooth: < 0.03
if density > 0.30:
score = min((density - 0.30) / 0.25, 1.0)
elif density < 0.03:
score = min((0.03 - density) / 0.03, 1.0) * 0.5
else:
score = 0.0
return float(score)
def _edge_sharpness_score(edge_map: np.ndarray) -> float:
"""
Mean edge brightness as a sharpness indicator.
Unnatural hyper-sharpness in AI images.
Returns score in [0, 1].
"""
mean_edge = float(np.mean(edge_map))
# Real camera images: 0.05–0.15 avg edge strength
score = np.clip((mean_edge - 0.12) / 0.18, 0.0, 1.0)
return float(score)
def _gradient_distribution_score(gray: np.ndarray) -> float:
"""
Analyze the distribution of gradient magnitudes.
Real images have a more natural (heavy-tailed) gradient histogram.
AI images can have an irregular, more uniform gradient distribution.
Uses entropy and kurtosis of gradient histogram.
Returns score in [0, 1].
"""
gray_u8 = (gray * 255).astype(np.uint8)
gx = cv2.Sobel(gray_u8, cv2.CV_64F, 1, 0, ksize=3).ravel()
gy = cv2.Sobel(gray_u8, cv2.CV_64F, 0, 1, ksize=3).ravel()
magnitudes = np.sqrt(gx**2 + gy**2)
# Histogram of gradient magnitudes
hist, _ = np.histogram(magnitudes, bins=64, range=(0, magnitudes.max() + 1e-6))
hist = hist / (hist.sum() + 1e-8)
ent = float(entropy(hist + 1e-8)) # High entropy β†’ more uniform β†’ suspicious
kurt = float(kurtosis(magnitudes)) # Low kurtosis β†’ less heavy-tailed β†’ suspicious
# Real images: entropy ~3–4, kurtosis >5
entropy_score = np.clip((ent - 3.5) / 1.5, 0.0, 1.0)
kurtosis_score = np.clip((5.0 - kurt) / 10.0, 0.0, 1.0)
return float(0.5 * entropy_score + 0.5 * kurtosis_score)
def _contour_continuity_score(edge_map: np.ndarray) -> float:
"""
Detect broken / fragmented contours β€” a hallmark of AI image artifacts.
Measures the average contour fragment length relative to image size.
Short, fragmented contours = suspicious.
Returns score in [0, 1].
"""
edge_u8 = (edge_map * 255).astype(np.uint8)
_, binary = cv2.threshold(edge_u8, 30, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 0:
return 0.5
lengths = [cv2.arcLength(c, closed=False) for c in contours]
mean_len = float(np.mean(lengths))
num_contours = len(contours)
# Many short fragments β†’ suspicious
H, W = edge_map.shape
diag = float(np.sqrt(H**2 + W**2))
frag_score = np.clip(1.0 - (mean_len / (diag * 0.3)), 0.0, 1.0)
count_score = np.clip((num_contours - 50) / 200.0, 0.0, 1.0)
return float(0.5 * frag_score + 0.5 * count_score)
# ─────────────────────────────────────────────────────────────────
# Public API
# ─────────────────────────────────────────────────────────────────
def run_edge_branch(img: np.ndarray) -> dict:
"""
Run the complete Edge Analysis Branch.
Args:
img : float32 numpy array (H, W, 3) in [0, 1] β€” RGB image
Returns:
dict with keys:
"prob_fake" : float β€” probability the image is AI-generated
"confidence" : float β€” certainty of this branch's estimate
"edge_map" : np.ndarray (H, W) float32 β€” Sobel edge magnitude
"""
gray = to_grayscale(img) # (H, W) in [0, 1]
gray_u8 = to_uint8(np.stack([gray]*3, axis=-1))[:, :, 0] # uint8 grayscale
# Compute edge maps
sobel_map = _sobel_edge_map(gray_u8)
laplacian_map = _laplacian_edge_map(gray_u8)
combined_edge = 0.6 * sobel_map + 0.4 * laplacian_map
# Individual forensic signals
density_score = _edge_density_score(sobel_map)
sharpness_score = _edge_sharpness_score(laplacian_map)
gradient_score = _gradient_distribution_score(gray)
contour_score = _contour_continuity_score(sobel_map)
# Weighted ensemble
prob_fake = (
0.25 * density_score +
0.25 * sharpness_score +
0.25 * gradient_score +
0.25 * contour_score
)
prob_fake = float(np.clip(prob_fake, 0.0, 1.0))
# Confidence: inter-signal agreement
scores = [density_score, sharpness_score, gradient_score, contour_score]
agreement = 1.0 - np.std(scores)
confidence = float(np.clip(agreement * 0.90, 0.1, 0.92))
return {
"prob_fake": prob_fake,
"confidence": confidence,
"edge_map": combined_edge, # (H, W) float32 β€” for visualization
}