import gradio as gr import torch from transformers import pipeline DEVICE = 0 if torch.cuda.is_available() else -1 print("🚀 DeepGuard AI Running...") # ================= LOAD MODELS ================= image_model = pipeline("image-classification", device=DEVICE) audio_model = pipeline("audio-classification", device=DEVICE) # ================= IMAGE ================= def detect_image(file): try: result = image_model(file) label = result[0]['label'] score = result[0]['score'] * 100 verdict = "FAKE 🔴" if "fake" in label.lower() else "REAL 🟢" return f"{verdict}\nConfidence: {score:.2f}%" except Exception as e: return str(e) # ================= AUDIO ================= def detect_audio(file): try: result = audio_model(file) return str(result) except Exception as e: return str(e) # ================= VIDEO ================= def detect_video(file): # Simple placeholder (video deepfake is heavy) return "⚠️ Video detection running (basic mode)" # ================= MAIN DETECT ================= def detect(file): if file is None: return "Upload a file" name = file.name.lower() if name.endswith((".jpg", ".jpeg", ".png")): return detect_image(file.name) elif name.endswith((".wav", ".mp3")): return detect_audio(file.name) elif name.endswith((".mp4", ".avi")): return detect_video(file.name) else: return "Unsupported file" # ================= UI ================= with gr.Blocks() as demo: gr.Markdown("# 🔍 DeepGuard AI - Full Detection System") file_input = gr.File(label="Upload Image / Video / Audio") btn = gr.Button("Analyze") output = gr.Textbox() btn.click(detect, inputs=file_input, outputs=output) # ================= RUN ================= if __name__ == "__main__": demo.launch()