|
|
import gradio as gr |
|
|
|
|
|
|
|
|
from image_backend import predict_image_pil |
|
|
from audio_backend import predict_audio |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_image(image): |
|
|
if image is None: |
|
|
return "", "", "", None |
|
|
|
|
|
label, confidence, heatmap = predict_image_pil(image) |
|
|
|
|
|
if label == "Fake": |
|
|
if confidence >= 90: |
|
|
risk = '<span class="material-icons">error</span> High likelihood of deepfake' |
|
|
elif confidence >= 60: |
|
|
risk = '<span class="material-icons">warning</span> Possibly deepfake' |
|
|
else: |
|
|
risk = '<span class="material-icons">help_outline</span> Uncertain deepfake' |
|
|
else: |
|
|
if confidence >= 90: |
|
|
risk = '<span class="material-icons">check_circle</span> Likely real' |
|
|
elif confidence >= 60: |
|
|
risk = '<span class="material-icons">warning</span> Possibly real' |
|
|
else: |
|
|
risk = '<span class="material-icons">help_outline</span> Uncertain – needs review' |
|
|
|
|
|
return label, f"{confidence} %", risk, heatmap |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_audio(audio_path): |
|
|
if audio_path is None: |
|
|
return ( |
|
|
"No Input", |
|
|
"-", |
|
|
'<span class="material-icons">warning</span> Please upload an audio file.', |
|
|
None |
|
|
) |
|
|
|
|
|
label, confidence, spec_img, error = predict_audio(audio_path) |
|
|
|
|
|
if error is not None: |
|
|
return ( |
|
|
"Invalid Input", |
|
|
"-", |
|
|
f'<span class="material-icons">error</span> {error}', |
|
|
None |
|
|
) |
|
|
|
|
|
if label == "fake": |
|
|
if confidence >= 90: |
|
|
risk = '<span class="material-icons">error</span> High likelihood of deepfake' |
|
|
elif confidence >= 60: |
|
|
risk = '<span class="material-icons">warning</span> Possibly deepfake' |
|
|
else: |
|
|
risk = '<span class="material-icons">help_outline</span> Uncertain – needs review' |
|
|
else: |
|
|
if confidence >= 90: |
|
|
risk = '<span class="material-icons">check_circle</span> Likely real' |
|
|
elif confidence >= 60: |
|
|
risk = '<span class="material-icons">warning</span> Possibly real' |
|
|
else: |
|
|
risk = '<span class="material-icons">help_outline</span> Uncertain – needs review' |
|
|
|
|
|
return label.capitalize(), f"{confidence} %", risk, spec_img |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet"> |
|
|
""") |
|
|
|
|
|
gr.Markdown("# AI Driven Deepfake Detection System") |
|
|
|
|
|
with gr.Tabs(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Tab("Home"): |
|
|
gr.Markdown(""" |
|
|
## Welcome |
|
|
|
|
|
This system detects AI-generated (deepfake) content in images and audio using |
|
|
transformer-based deep learning models. |
|
|
""") |
|
|
|
|
|
gr.Markdown(""" |
|
|
### Supported inputs |
|
|
- Images: JPG, PNG (face-centric images recommended) |
|
|
- Audio: WAV, MP3, FLAC, M4A, OGG formats (clear speech preferred) |
|
|
""") |
|
|
|
|
|
gr.Markdown(""" |
|
|
### How to use |
|
|
1. Select a detection mode using the tabs above. |
|
|
2. Upload an image or audio file. |
|
|
3. Click **Submit** to start analysis. |
|
|
4. Review the prediction, confidence score, and risk assessment. |
|
|
""") |
|
|
|
|
|
gr.Markdown(""" |
|
|
### Understanding the results |
|
|
- **Prediction**: Model decision (Real / Fake) |
|
|
- **Confidence**: Certainty percentage of the prediction |
|
|
- **Risk Assessment**: |
|
|
- High likelihood → strong indication |
|
|
- Possibly → caution advised |
|
|
- Uncertain → manual review recommended |
|
|
""") |
|
|
|
|
|
gr.Markdown(""" |
|
|
### Explainability |
|
|
For images, attention heatmaps highlight the facial regions that influenced |
|
|
the model’s decision, supporting transparency and forensic analysis. |
|
|
""") |
|
|
|
|
|
gr.Markdown(""" |
|
|
### Data privacy & intended use |
|
|
Uploaded files are processed temporarily and are not stored. |
|
|
This system is intended as a decision-support tool and should not be used |
|
|
as the sole source of verification. |
|
|
""") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Tab("Image Deepfake"): |
|
|
gr.Markdown("## Deepfake Image Detection") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
image_input = gr.Image( |
|
|
label="Upload Image", |
|
|
type="pil", |
|
|
height=280 |
|
|
) |
|
|
img_submit = gr.Button("Submit") |
|
|
img_clear = gr.Button("Clear") |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
img_pred = gr.Text(label="Prediction") |
|
|
img_conf = gr.Text(label="Confidence") |
|
|
img_risk = gr.HTML(label="Risk Assessment", value="") |
|
|
img_heatmap = gr.Image( |
|
|
label="Explainability Heatmap", |
|
|
height=280 |
|
|
) |
|
|
|
|
|
img_submit.click( |
|
|
analyze_image, |
|
|
image_input, |
|
|
[img_pred, img_conf, img_risk, img_heatmap] |
|
|
) |
|
|
|
|
|
img_clear.click( |
|
|
lambda: (None, "", "", "", None), |
|
|
None, |
|
|
[image_input, img_pred, img_conf, img_risk, img_heatmap] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Tab("Audio Deepfake"): |
|
|
gr.Markdown("## Deepfake Audio Detection") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
audio_input = gr.Audio( |
|
|
label="Upload Audio (WAV, MP3, FLAC, M4A, OGG)", |
|
|
type="filepath" |
|
|
) |
|
|
aud_submit = gr.Button("Submit") |
|
|
aud_clear = gr.Button("Clear") |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
aud_pred = gr.Text(label="Prediction") |
|
|
aud_conf = gr.Text(label="Confidence") |
|
|
aud_risk = gr.HTML(label="Risk Assessment", value="") |
|
|
aud_spec = gr.Image( |
|
|
label="Audio Spectrogram (Model Input)", |
|
|
height=280, |
|
|
value=None |
|
|
) |
|
|
|
|
|
aud_submit.click( |
|
|
analyze_audio, |
|
|
audio_input, |
|
|
[aud_pred, aud_conf, aud_risk, aud_spec] |
|
|
) |
|
|
|
|
|
aud_clear.click( |
|
|
lambda: (None, "", "", "", None), |
|
|
None, |
|
|
[audio_input, aud_pred, aud_conf, aud_risk, aud_spec] |
|
|
) |
|
|
|
|
|
demo.launch(css="style.css") |