Inam65's picture
Update app.py
07b113f verified
import gradio as gr
import numpy as np
from PIL import Image
import cv2
# -----------------------------
# STEP 1: FACE DETECTION + ROUTING (VERSION 2 - FOUNDATION)
# -----------------------------
# This version does NOT yet classify deepfakes.
# It ONLY decides:
# 1) Does the image contain a human face?
# 2) Route it to the correct analysis pipeline
# -----------------------------
# Load OpenCV Haar Cascade (lightweight & HF-friendly)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
def detect_faces(image_np):
"""
Detect faces using OpenCV Haar Cascade
Returns number of faces detected
"""
gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(60, 60)
)
return faces
def route_image(image):
if image is None:
return "No image provided", "N/A"
image_np = np.array(image)
faces = detect_faces(image_np)
# ROUTING LOGIC
if len(faces) > 0:
route = "πŸ§‘ Face Detected"
explanation = (
"This image contains one or more human faces.\n"
"β†’ It will be analyzed using **deepfake face-detection models**\n"
"(XceptionNet / EfficientNet in next step)."
)
else:
route = "πŸ–ΌοΈ No Face Detected"
explanation = (
"No human face detected in this image.\n"
"β†’ It will be analyzed using **generic AI-image detection models**\n"
"(GAN / diffusion detection)."
)
return route, explanation
# -----------------------------
# GRADIO UI
# -----------------------------
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🧭 Image Analysis Router (Version 2 – Step 1)
This step determines **how the image should be analyzed**:
- πŸ‘€ Face present β†’ Deepfake detection pipeline
- πŸ–ΌοΈ No face β†’ Generic AI-image detection pipeline
*(No deepfake classification is performed yet.)*
""")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="pil", label="Upload Any Image")
analyze_btn = gr.Button("Analyze Image πŸ”")
with gr.Column():
route_output = gr.Textbox(label="Routing Decision")
explanation_output = gr.Textbox(label="Explanation", lines=5)
analyze_btn.click(
fn=route_image,
inputs=image_input,
outputs=[route_output, explanation_output]
)
gr.Markdown("""
### βœ… Why this step matters
- Prevents misuse of face-only deepfake models
- Reduces false positives
- Makes the system work for **ANY image**
πŸ”œ **Next step:** Integrate XceptionNet / EfficientNet classifiers.
""")
# Launch
demo.launch()