Prasanna-ETH's picture
Update app.py
08913ba verified
import os
import gradio as gr
from transformers import pipeline
# Get the token from environment variables (important for Hugging Face Spaces)
hf_token = os.environ.get("HF_TOKEN")
# Step 2: Load the Video Hunter
# Load the model that has LSTM "Memory" built-in
# Note: trust_remote_code=True is REQUIRED for this custom architecture
# Note: token=hf_token is REQUIRED for this gated repository
video_detector = pipeline(
"video-classification",
model="Naman712/Deep-fake-detection",
trust_remote_code=True,
token=hf_token
)
# Step 3: The "Temporal" Analysis Logic
def analyze_kyc_video(video_file):
if not video_file:
return "ERROR: No video provided."
try:
# The AI looks at frame sequences to find "unnatural movement"
results = video_detector(video_file)
# The model returns a list of dicts like [{'label': 'FAKE', 'score': 0.9}]
top_result = results[0]
label = str(top_result['label']).lower()
score = top_result['score']
if "fake" in label or "0" in label:
return f"🚨 ALERT: DEEPFAKE DETECTED!\n\nπŸ“Š Fake Confidence: {score:.1%}\n(Spotted AI mask jitters or artificial movement)"
else:
# It ignores hand-shaking because LSTM sees it as a natural physical movement
return f"βœ… SUCCESS: LIVE HUMAN VERIFIED.\n\nπŸ“Š Real Confidence: {score:.1%}\n(Natural movement detected)"
except Exception as e:
return f"ERROR analyzing video: {str(e)}"
# Step 4: Build the Demo Interface (Gradio)
with gr.Blocks(title="IOB Sentinel: Spatio-Temporal KYC Verifier") as demo:
gr.Markdown("# IOB Sentinel: Video KYC Deepfake Detector")
gr.Markdown("Uses ResNext+LSTM to distinguish between natural movement and AI mask jitters.")
with gr.Tabs():
with gr.TabItem("Upload Video"):
upload_input = gr.Video(label="Upload KYC Video")
upload_output = gr.Textbox(label="Result")
upload_btn = gr.Button("Analyze Uploaded Video", variant="primary")
upload_btn.click(fn=analyze_kyc_video, inputs=upload_input, outputs=upload_output)
with gr.TabItem("Live Webcam"):
webcam_input = gr.Video(sources=["webcam"], label="Record Live Video")
webcam_output = gr.Textbox(label="Result")
webcam_btn = gr.Button("Analyze Live Video", variant="primary")
webcam_btn.click(fn=analyze_kyc_video, inputs=webcam_input, outputs=webcam_output)
if __name__ == "__main__":
demo.launch()