import gradio as gr
# ---- IMPORT BACKENDS ----
from image_backend import predict_image_pil
from audio_backend import predict_audio
# =========================
# =========================
# IMAGE LOGIC
# =========================
def analyze_image(image):
if image is None:
return "", "", "", None
label, confidence, heatmap = predict_image_pil(image)
if label == "Fake":
if confidence >= 90:
risk = 'error High likelihood of deepfake'
elif confidence >= 60:
risk = 'warning Possibly deepfake'
else:
risk = 'help_outline Uncertain deepfake'
else:
if confidence >= 90:
risk = 'check_circle Likely real'
elif confidence >= 60:
risk = 'warning Possibly real'
else:
risk = 'help_outline Uncertain – needs review'
return label, f"{confidence} %", risk, heatmap
# =========================
# AUDIO LOGIC
# =========================
def analyze_audio(audio_path):
if audio_path is None:
return (
"No Input",
"-",
'warning Please upload an audio file.',
None
)
label, confidence, spec_img, error = predict_audio(audio_path)
if error is not None:
return (
"Invalid Input",
"-",
f'error {error}',
None
)
if label == "fake":
if confidence >= 90:
risk = 'error High likelihood of deepfake'
elif confidence >= 60:
risk = 'warning Possibly deepfake'
else:
risk = 'help_outline Uncertain – needs review'
else:
if confidence >= 90:
risk = 'check_circle Likely real'
elif confidence >= 60:
risk = 'warning Possibly real'
else:
risk = 'help_outline Uncertain – needs review'
return label.capitalize(), f"{confidence} %", risk, spec_img
# =========================
# UI
# =========================
with gr.Blocks() as demo:
# Load Material Icons
gr.Markdown("""
""")
gr.Markdown("# AI Driven Deepfake Detection System")
with gr.Tabs():
# =========================
# HOME TAB (RESTORED)
# =========================
with gr.Tab("Home"):
gr.Markdown("""
## Welcome
This system detects AI-generated (deepfake) content in images and audio using
transformer-based deep learning models.
""")
gr.Markdown("""
### Supported inputs
- Images: JPG, PNG (face-centric images recommended)
- Audio: WAV, MP3, FLAC, M4A, OGG formats (clear speech preferred)
""")
gr.Markdown("""
### How to use
1. Select a detection mode using the tabs above.
2. Upload an image or audio file.
3. Click **Submit** to start analysis.
4. Review the prediction, confidence score, and risk assessment.
""")
gr.Markdown("""
### Understanding the results
- **Prediction**: Model decision (Real / Fake)
- **Confidence**: Certainty percentage of the prediction
- **Risk Assessment**:
- High likelihood → strong indication
- Possibly → caution advised
- Uncertain → manual review recommended
""")
gr.Markdown("""
### Explainability
For images, attention heatmaps highlight the facial regions that influenced
the model’s decision, supporting transparency and forensic analysis.
""")
gr.Markdown("""
### Data privacy & intended use
Uploaded files are processed temporarily and are not stored.
This system is intended as a decision-support tool and should not be used
as the sole source of verification.
""")
# =========================
# IMAGE TAB
# =========================
with gr.Tab("Image Deepfake"):
gr.Markdown("## Deepfake Image Detection")
with gr.Row():
with gr.Column(scale=1):
image_input = gr.Image(
label="Upload Image",
type="pil",
height=280
)
img_submit = gr.Button("Submit")
img_clear = gr.Button("Clear")
with gr.Column(scale=2):
img_pred = gr.Text(label="Prediction")
img_conf = gr.Text(label="Confidence")
img_risk = gr.HTML(label="Risk Assessment", value="")
img_heatmap = gr.Image(
label="Explainability Heatmap",
height=280
)
img_submit.click(
analyze_image,
image_input,
[img_pred, img_conf, img_risk, img_heatmap]
)
img_clear.click(
lambda: (None, "", "", "", None),
None,
[image_input, img_pred, img_conf, img_risk, img_heatmap]
)
# =========================
# AUDIO TAB
# =========================
with gr.Tab("Audio Deepfake"):
gr.Markdown("## Deepfake Audio Detection")
with gr.Row():
with gr.Column(scale=1):
audio_input = gr.Audio(
label="Upload Audio (WAV, MP3, FLAC, M4A, OGG)",
type="filepath"
)
aud_submit = gr.Button("Submit")
aud_clear = gr.Button("Clear")
with gr.Column(scale=2):
aud_pred = gr.Text(label="Prediction")
aud_conf = gr.Text(label="Confidence")
aud_risk = gr.HTML(label="Risk Assessment", value="")
aud_spec = gr.Image(
label="Audio Spectrogram (Model Input)",
height=280,
value=None
)
aud_submit.click(
analyze_audio,
audio_input,
[aud_pred, aud_conf, aud_risk, aud_spec]
)
aud_clear.click(
lambda: (None, "", "", "", None),
None,
[audio_input, aud_pred, aud_conf, aud_risk, aud_spec]
)
demo.launch(css="style.css")