import gradio as gr
# ---- IMPORT BACKENDS ----
from image_backend import predict_image_pil
from audio_backend import predict_audio
# =========================
# =========================
# IMAGE LOGIC (UNCHANGED)
# =========================
def analyze_image(image):
label, confidence, heatmap = predict_image_pil(image)
if label == "Fake":
if confidence >= 90:
risk = 'error High likelihood of deepfake'
elif confidence >= 60:
risk = 'warning Possibly deepfake'
else:
risk = 'help_outline Uncertain deepfake'
else:
if confidence >= 90:
risk = 'check_circle Likely real'
elif confidence >= 60:
risk = 'warning Possibly real'
else:
risk = 'help_outline Uncertain – needs review'
return label, f"{confidence} %", risk, heatmap
# =========================
# AUDIO LOGIC (UNCHANGED)
# =========================
def analyze_audio(audio_path):
label, confidence = predict_audio(audio_path)
if label == "fake":
if confidence >= 90:
risk = 'error High likelihood of deepfake'
elif confidence >= 60:
risk = 'warning Possibly deepfake'
else:
risk = 'help_outline Uncertain – needs review'
else:
if confidence >= 90:
risk = 'check_circle Likely real'
elif confidence >= 60:
risk = 'warning Possibly real'
else:
risk = 'help_outline Uncertain – needs review'
return label.capitalize(), f"{confidence} %", risk
# =========================
# UI
# =========================
with gr.Blocks(css="style.css") as demo:
# Load Material Icons
gr.Markdown("""
""")
gr.Markdown("# AI Driven Deepfake Detection System")
with gr.Tabs():
# =========================
# HOME TAB
# =========================
with gr.Tab("Home"):
gr.Markdown("""
## Welcome
This system detects AI-generated (deepfake) content in images and audio using
transformer-based deep learning models.
""")
gr.Markdown("""
### Supported inputs
- Images: JPG, PNG (face-centric images recommended)
- Audio: WAV, MP3, FLAC, M4A, OGG formats (clear speech preferred)
""")
gr.Markdown("""
### How to use
1. Select a detection mode using the tabs above.
2. Upload an image or audio file.
3. Click **Submit** to start analysis.
4. Review the prediction, confidence score, and risk assessment.
""")
gr.Markdown("""
### Understanding the results
- **Prediction**: Model decision (Real / Fake)
- **Confidence**: Certainty percentage of the prediction
- **Risk Assessment**:
- High likelihood → strong indication
- Possibly → caution advised
- Uncertain → manual review recommended
""")
gr.Markdown("""
### Explainability
For images, attention heatmaps highlight the facial regions that influenced
the model’s decision, supporting transparency and forensic analysis.
""")
gr.Markdown("""
### Data privacy & intended use
Uploaded files are processed temporarily and are not stored.
This system is intended as a decision-support tool and should not be used
as the sole source of verification.
""")
# =========================
# IMAGE TAB
# =========================
with gr.Tab("Image Deepfake"):
gr.Markdown("## Deepfake Image Detection")
with gr.Row():
with gr.Column(scale=1):
image_input = gr.Image(
label="Upload Image",
type="pil",
height=280
)
img_submit = gr.Button("Submit")
img_clear = gr.Button("Clear")
with gr.Column(scale=2):
img_pred = gr.Text(label="Prediction")
img_conf = gr.Text(label="Confidence")
img_risk = gr.HTML(label="Risk Assessment")
img_heatmap = gr.Image(
label="Explainability Heatmap",
height=280
)
img_submit.click(
fn=analyze_image,
inputs=image_input,
outputs=[img_pred, img_conf, img_risk, img_heatmap]
)
img_clear.click(
fn=lambda: (None, "", "", None),
inputs=None,
outputs=[image_input, img_pred, img_conf, img_risk]
)
# =========================
# AUDIO TAB
# =========================
with gr.Tab("Audio Deepfake"):
gr.Markdown("## Deepfake Audio Detection")
with gr.Row():
with gr.Column(scale=1):
audio_input = gr.Audio(
label="Upload Audio (.wav)",
type="filepath"
)
aud_submit = gr.Button("Submit")
aud_clear = gr.Button("Clear")
with gr.Column(scale=2):
aud_pred = gr.Text(label="Prediction")
aud_conf = gr.Text(label="Confidence")
aud_risk = gr.HTML(label="Risk Assessment")
aud_submit.click(
fn=analyze_audio,
inputs=audio_input,
outputs=[aud_pred, aud_conf, aud_risk]
)
aud_clear.click(
fn=lambda: (None, "", ""),
inputs=None,
outputs=[audio_input, aud_pred, aud_conf]
)
demo.launch()