Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from predictor import predict_video_with_cam | |
| def inference(video): | |
| if video is None: | |
| return "Please upload or select a video.", [] | |
| cams, final_label, final_conf, faces_detected, total_frames = predict_video_with_cam(video, max_frames=5) | |
| if faces_detected == 0: | |
| return "No face detected in any of the frames. Please try a different video.", [] | |
| summary = f"Video-Level Prediction → {final_label} ({final_conf:.2%})" | |
| frame_lines = [f"Frame {i+1} → {label} ({conf:.2%})" for i, (label, conf, _) in enumerate(cams)] | |
| result_text = summary + "\n" + "\n".join(frame_lines) | |
| if faces_detected < total_frames: | |
| result_text += f"\nOnly {faces_detected} out of {total_frames} frames contained detectable faces." | |
| images = [img for _, _, img in cams] | |
| return result_text, images | |
| with gr.Blocks() as demo: | |
| # Title and description (centered with HTML) | |
| gr.Markdown( | |
| """ | |
| <div style="text-align: center;"> | |
| <h2>Deepfake Detection App with Grad-CAM</h2> | |
| <p>Upload your own video or choose one of the examples below to see how the model works.</p> | |
| <p><strong>Model Info:</strong> EfficientNet-B0 trained on a FaceForensics++ subset. Grad-CAM is used to improve explainability.</p> | |
| </div> | |
| """ | |
| ) | |
| with gr.Row(): | |
| video_input = gr.Video(label="Upload or select a video") | |
| prediction_output = gr.Textbox(label="Prediction Summary", lines=8, interactive=False) | |
| gradcam_output = gr.Gallery( | |
| label="Grad-CAM Visuals", | |
| columns=5, | |
| height="200px", | |
| object_fit="scale-down", | |
| show_label=True, | |
| container=True | |
| ) | |
| with gr.Row(): | |
| predict_btn = gr.Button("Predict") | |
| clear_btn = gr.Button("Clear") | |
| predict_btn.click(fn=inference, inputs=video_input, outputs=[prediction_output, gradcam_output]) | |
| clear_btn.click(fn=lambda: (None, "", []), inputs=[], outputs=[video_input, prediction_output, gradcam_output]) | |
| # Example videos (smaller and labeled) | |
| gr.Markdown("### Try with Example Videos") | |
| example_videos = [ | |
| ("videos/fake1.mp4", "Fake Video 1"), | |
| ("videos/fake2.mp4", "Fake Video 2"), | |
| ("videos/fake3.mp4", "Fake Video 3"), | |
| ("videos/real1.mp4", "Real Video 1"), | |
| ("videos/real2.mp4", "Real Video 2"), | |
| ("videos/real3.mp4", "Real Video 3"), | |
| ("videos/no_face.mp4", "No Face Video"), | |
| ] | |
| with gr.Row(): | |
| for path, label in example_videos: | |
| with gr.Column(scale=1, min_width=150): | |
| gr.Markdown(f"<center><sub>{label}</sub></center>") | |
| example_video = gr.Video(value=path, interactive=False, height=100, show_label=False) | |
| example_button = gr.Button("Use this video") | |
| example_button.click(fn=lambda p=path: p, inputs=[], outputs=video_input) | |
| if __name__ == "__main__": | |
| demo.launch() | |