Spaces:
Running
Running
Upload 6 files
Browse files- .gitignore +79 -0
- Dockerfile +21 -0
- README.md +47 -10
- models.py +40 -0
- requirements.txt +36 -0
- server.py +836 -0
.gitignore
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
build/
|
| 8 |
+
develop-eggs/
|
| 9 |
+
dist/
|
| 10 |
+
downloads/
|
| 11 |
+
eggs/
|
| 12 |
+
.eggs/
|
| 13 |
+
lib/
|
| 14 |
+
lib64/
|
| 15 |
+
parts/
|
| 16 |
+
sdist/
|
| 17 |
+
var/
|
| 18 |
+
wheels/
|
| 19 |
+
*.egg-info/
|
| 20 |
+
.installed.cfg
|
| 21 |
+
*.egg
|
| 22 |
+
MANIFEST
|
| 23 |
+
|
| 24 |
+
# Virtual environments
|
| 25 |
+
venv/
|
| 26 |
+
env/
|
| 27 |
+
ENV/
|
| 28 |
+
env.bak/
|
| 29 |
+
venv.bak/
|
| 30 |
+
|
| 31 |
+
# Flask
|
| 32 |
+
instance/
|
| 33 |
+
.webassets-cache
|
| 34 |
+
|
| 35 |
+
# Database
|
| 36 |
+
*.db
|
| 37 |
+
*.sqlite
|
| 38 |
+
*.sqlite3
|
| 39 |
+
|
| 40 |
+
# Uploaded files
|
| 41 |
+
Uploaded_Files/
|
| 42 |
+
static/frames/
|
| 43 |
+
static/graphs/
|
| 44 |
+
|
| 45 |
+
# Model files
|
| 46 |
+
*.pt
|
| 47 |
+
*.pth
|
| 48 |
+
*.h5
|
| 49 |
+
*.pkl
|
| 50 |
+
|
| 51 |
+
# Logs
|
| 52 |
+
*.log
|
| 53 |
+
logs/
|
| 54 |
+
|
| 55 |
+
# Environment variables
|
| 56 |
+
.env
|
| 57 |
+
.env.local
|
| 58 |
+
.env.production
|
| 59 |
+
|
| 60 |
+
# IDE
|
| 61 |
+
.vscode/
|
| 62 |
+
.idea/
|
| 63 |
+
*.swp
|
| 64 |
+
*.swo
|
| 65 |
+
|
| 66 |
+
# OS
|
| 67 |
+
.DS_Store
|
| 68 |
+
Thumbs.db
|
| 69 |
+
|
| 70 |
+
# Temporary files
|
| 71 |
+
*.tmp
|
| 72 |
+
*.temp
|
| 73 |
+
.ipynb_checkpoints/
|
| 74 |
+
|
| 75 |
+
# Railway
|
| 76 |
+
.railway/
|
| 77 |
+
|
| 78 |
+
# GitLab
|
| 79 |
+
.gitlab-ci.yml.bak
|
Dockerfile
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 1. Start with a lightweight Python image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# 2. Set the directory inside the cloud server
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# 3. Install required system libraries for OpenCV to work in the cloud
|
| 8 |
+
RUN apt-get update && apt-get install -y libgl1-mesa-glx libglib2.0-0
|
| 9 |
+
|
| 10 |
+
# 4. Copy your requirements file and install the Python libraries
|
| 11 |
+
COPY requirements.txt .
|
| 12 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 13 |
+
|
| 14 |
+
# 5. Copy all your project files (app.py, templates, models, etc.)
|
| 15 |
+
COPY . .
|
| 16 |
+
|
| 17 |
+
# 6. Hugging Face exposes port 7860 by default
|
| 18 |
+
EXPOSE 7860
|
| 19 |
+
|
| 20 |
+
# 7. Start the Flask server using Gunicorn (production-ready)
|
| 21 |
+
CMD ["gunicorn", "-b", "0.0.0.0:7860", "--timeout", "120", "app:app"]
|
README.md
CHANGED
|
@@ -1,10 +1,47 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DeepFake Detection
|
| 2 |
+
|
| 3 |
+
A Flask web application for detecting deepfake videos and images using EfficientNet-B0.
|
| 4 |
+
|
| 5 |
+
## Features
|
| 6 |
+
|
| 7 |
+
- **Video Detection**: Upload videos (MP4, AVI, MOV) to detect deepfakes
|
| 8 |
+
- **Image Detection**: Analyze images for AI manipulation
|
| 9 |
+
- **Visual Analysis**: Temporal heatmaps showing frame-by-frame detection confidence
|
| 10 |
+
- **User Authentication**: Login/signup functionality
|
| 11 |
+
|
| 12 |
+
## Installation
|
| 13 |
+
|
| 14 |
+
```bash
|
| 15 |
+
pip install -r requirements.txt
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
## Usage
|
| 19 |
+
|
| 20 |
+
```bash
|
| 21 |
+
python server.py
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
Then open `http://localhost:5000` in your browser.
|
| 25 |
+
|
| 26 |
+
## Project Structure
|
| 27 |
+
|
| 28 |
+
```
|
| 29 |
+
├── server.py # Main Flask application
|
| 30 |
+
├── models.py # Database models
|
| 31 |
+
├── models/ # AI model weights
|
| 32 |
+
│ └── best_model-v3.pt
|
| 33 |
+
├── templates/ # HTML templates
|
| 34 |
+
├── static/ # CSS and static files
|
| 35 |
+
└── requirements.txt # Python dependencies
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
## Requirements
|
| 39 |
+
|
| 40 |
+
- Python 3.8+
|
| 41 |
+
- See requirements.txt for all dependencies
|
| 42 |
+
|
| 43 |
+
Simple Guide
|
| 44 |
+
py -3.11 -m venv venv_gemini
|
| 45 |
+
.\venv_gemini\Scripts\activate
|
| 46 |
+
pip install -r requirements.txt
|
| 47 |
+
python server.py
|
models.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask_sqlalchemy import SQLAlchemy
|
| 2 |
+
from flask_login import UserMixin
|
| 3 |
+
from werkzeug.security import generate_password_hash, check_password_hash
|
| 4 |
+
from datetime import datetime # --- NEW: Imported for timestamps ---
|
| 5 |
+
|
| 6 |
+
# Initialize SQLAlchemy
|
| 7 |
+
db = SQLAlchemy()
|
| 8 |
+
|
| 9 |
+
class User(UserMixin, db.Model):
|
| 10 |
+
id = db.Column(db.Integer, primary_key=True)
|
| 11 |
+
email = db.Column(db.String(120), unique=True, nullable=False)
|
| 12 |
+
username = db.Column(db.String(80), unique=True, nullable=False)
|
| 13 |
+
password_hash = db.Column(db.String(128))
|
| 14 |
+
|
| 15 |
+
# --- NEW: Relationship to link users to their detection logs ---
|
| 16 |
+
logs = db.relationship('DetectionLog', backref='user', lazy=True)
|
| 17 |
+
|
| 18 |
+
def set_password(self, password):
|
| 19 |
+
"""Hashes the password and stores it."""
|
| 20 |
+
self.password_hash = generate_password_hash(password)
|
| 21 |
+
|
| 22 |
+
def check_password(self, password):
|
| 23 |
+
"""Verifies the hashed password."""
|
| 24 |
+
return check_password_hash(self.password_hash, password)
|
| 25 |
+
|
| 26 |
+
def __repr__(self):
|
| 27 |
+
return f"<User {self.username}>"
|
| 28 |
+
|
| 29 |
+
# --- NEW: Model to store detection history logs ---
|
| 30 |
+
class DetectionLog(db.Model):
|
| 31 |
+
id = db.Column(db.Integer, primary_key=True)
|
| 32 |
+
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
|
| 33 |
+
filename = db.Column(db.String(255), nullable=False)
|
| 34 |
+
media_type = db.Column(db.String(50), nullable=False) # 'Video' or 'Image'
|
| 35 |
+
prediction = db.Column(db.String(50), nullable=False) # 'FAKE' or 'REAL'
|
| 36 |
+
confidence = db.Column(db.Float, nullable=False)
|
| 37 |
+
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
|
| 38 |
+
|
| 39 |
+
def __repr__(self):
|
| 40 |
+
return f"<DetectionLog {self.filename} - {self.prediction}>"
|
requirements.txt
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --- Core Flask Framework ---
|
| 2 |
+
Flask
|
| 3 |
+
flask-login
|
| 4 |
+
flask-sqlalchemy
|
| 5 |
+
Werkzeug
|
| 6 |
+
# (Note: Login/SQLAlchemy left unpinned for Py3.11 compatibility)
|
| 7 |
+
|
| 8 |
+
# --- The "Shadow System" (Gemini Integration) ---
|
| 9 |
+
google-generativeai>
|
| 10 |
+
google-ai-generativelanguage
|
| 11 |
+
google-auth
|
| 12 |
+
|
| 13 |
+
# --- AI & Deep Learning Brain ---
|
| 14 |
+
# Unpinned to let pip find the best Py3.11 binaries automatically
|
| 15 |
+
torch
|
| 16 |
+
torchvision
|
| 17 |
+
numpy
|
| 18 |
+
scikit-learn
|
| 19 |
+
scikit-image
|
| 20 |
+
scipy
|
| 21 |
+
|
| 22 |
+
# --- Computer Vision & Image Processing ---
|
| 23 |
+
opencv-python-headless
|
| 24 |
+
# MediaPipe is the last version with stable Windows Py3.11 support for your code
|
| 25 |
+
mediapipe
|
| 26 |
+
Pillow
|
| 27 |
+
opencv-python-headless
|
| 28 |
+
# --- Visualization & Plots ---
|
| 29 |
+
matplotlib
|
| 30 |
+
seaborn
|
| 31 |
+
|
| 32 |
+
# --- Utilities & Downloads ---
|
| 33 |
+
huggingface-hub
|
| 34 |
+
requests
|
| 35 |
+
gunicorn
|
| 36 |
+
|
server.py
ADDED
|
@@ -0,0 +1,836 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, render_template, redirect, request, url_for, send_file, send_from_directory, flash
|
| 2 |
+
from flask import jsonify, json
|
| 3 |
+
from werkzeug.utils import secure_filename
|
| 4 |
+
import datetime
|
| 5 |
+
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
|
| 6 |
+
# --- UPDATED: Imported DetectionLog ---
|
| 7 |
+
from models import db, User, DetectionLog
|
| 8 |
+
import os
|
| 9 |
+
import time
|
| 10 |
+
import uuid
|
| 11 |
+
import sys
|
| 12 |
+
import traceback
|
| 13 |
+
import logging
|
| 14 |
+
import zipfile
|
| 15 |
+
import requests
|
| 16 |
+
import tempfile
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
# --- GEMINI INTEGRATION ---
|
| 20 |
+
import google.generativeai as genai
|
| 21 |
+
|
| 22 |
+
# ==========================================
|
| 23 |
+
# 🔴 IMPORTANT: PASTE YOUR API KEY BELOW 🔴
|
| 24 |
+
# ==========================================
|
| 25 |
+
genai.configure(api_key="Past API Key Here")
|
| 26 |
+
|
| 27 |
+
os.environ['KMP_DUPLICATE_LIB_OK']='True'
|
| 28 |
+
os.environ['MEDIAPIPE_DISABLE_GPU']='1' # Force MediaPipe to use CPU only
|
| 29 |
+
|
| 30 |
+
# Memory optimization settings
|
| 31 |
+
os.environ['OMP_NUM_THREADS'] = '1'
|
| 32 |
+
os.environ['MKL_NUM_THREADS'] = '1'
|
| 33 |
+
os.environ['OPENBLAS_NUM_THREADS'] = '1'
|
| 34 |
+
os.environ['NUMEXPR_NUM_THREADS'] = '1'
|
| 35 |
+
os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
|
| 36 |
+
|
| 37 |
+
# Additional MediaPipe and GPU suppression
|
| 38 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress TensorFlow warnings
|
| 39 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = '' # Disable CUDA
|
| 40 |
+
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' # Disable oneDNN optimizations
|
| 41 |
+
|
| 42 |
+
# Suppress MediaPipe GPU warnings
|
| 43 |
+
logging.getLogger('mediapipe').setLevel(logging.ERROR)
|
| 44 |
+
logging.getLogger('absl').setLevel(logging.ERROR)
|
| 45 |
+
|
| 46 |
+
import torch
|
| 47 |
+
import torchvision
|
| 48 |
+
from torchvision import transforms
|
| 49 |
+
from torch.utils.data import DataLoader
|
| 50 |
+
from torch.utils.data.dataset import Dataset
|
| 51 |
+
import numpy as np
|
| 52 |
+
import cv2
|
| 53 |
+
import mediapipe as mp
|
| 54 |
+
from torch.autograd import Variable
|
| 55 |
+
from PIL import Image
|
| 56 |
+
from urllib.parse import urlparse
|
| 57 |
+
import matplotlib
|
| 58 |
+
matplotlib.use('Agg')
|
| 59 |
+
import matplotlib.pyplot as plt
|
| 60 |
+
import seaborn as sns
|
| 61 |
+
|
| 62 |
+
from torch import nn
|
| 63 |
+
import torch.nn.functional as F
|
| 64 |
+
from torchvision import models
|
| 65 |
+
from torchvision.models import efficientnet_b0
|
| 66 |
+
from skimage import img_as_ubyte
|
| 67 |
+
from huggingface_hub import hf_hub_download
|
| 68 |
+
|
| 69 |
+
warnings.filterwarnings("ignore")
|
| 70 |
+
|
| 71 |
+
# Configure logging
|
| 72 |
+
logging.basicConfig(level=logging.INFO)
|
| 73 |
+
logger = logging.getLogger(__name__)
|
| 74 |
+
|
| 75 |
+
# Initialize MediaPipe Face Mesh for CPU
|
| 76 |
+
mp_face_mesh = mp.solutions.face_mesh
|
| 77 |
+
mp_drawing = mp.solutions.drawing_utils
|
| 78 |
+
|
| 79 |
+
# Initialize MediaPipe with CPU-only configuration
|
| 80 |
+
try:
|
| 81 |
+
face_mesh = mp_face_mesh.FaceMesh(
|
| 82 |
+
static_image_mode=True,
|
| 83 |
+
max_num_faces=1,
|
| 84 |
+
min_detection_confidence=0.5,
|
| 85 |
+
min_tracking_confidence=0.5,
|
| 86 |
+
refine_landmarks=False # Disable GPU-dependent feature
|
| 87 |
+
)
|
| 88 |
+
logger.info("MediaPipe Face Mesh initialized successfully")
|
| 89 |
+
except Exception as e:
|
| 90 |
+
logger.warning(f"MediaPipe initialization warning (non-critical): {e}")
|
| 91 |
+
# Fallback configuration
|
| 92 |
+
face_mesh = mp_face_mesh.FaceMesh(
|
| 93 |
+
static_image_mode=True,
|
| 94 |
+
max_num_faces=1,
|
| 95 |
+
min_detection_confidence=0.3,
|
| 96 |
+
min_tracking_confidence=0.3,
|
| 97 |
+
refine_landmarks=False
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# EfficientNet model path
|
| 101 |
+
EFFICIENTNET_MODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models', 'best_model-v3.pt')
|
| 102 |
+
|
| 103 |
+
# Get the absolute path for the upload folder
|
| 104 |
+
UPLOAD_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Uploaded_Files')
|
| 105 |
+
|
| 106 |
+
# Create the folders if they don't exist
|
| 107 |
+
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
|
| 108 |
+
HEATMAP_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static', 'heatmaps')
|
| 109 |
+
os.makedirs(HEATMAP_FOLDER, exist_ok=True)
|
| 110 |
+
FRAMES_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static', 'frames')
|
| 111 |
+
os.makedirs(FRAMES_FOLDER, exist_ok=True)
|
| 112 |
+
|
| 113 |
+
# Ensure folders have proper permissions
|
| 114 |
+
os.chmod(HEATMAP_FOLDER, 0o755)
|
| 115 |
+
os.chmod(FRAMES_FOLDER, 0o755)
|
| 116 |
+
|
| 117 |
+
video_path = ""
|
| 118 |
+
detectOutput = []
|
| 119 |
+
|
| 120 |
+
app = Flask(__name__, template_folder="templates", static_folder="static")
|
| 121 |
+
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
| 122 |
+
app.config['MAX_CONTENT_LENGTH'] = 500 * 1024 * 1024 # 500MB max file size
|
| 123 |
+
app.config['SECRET_KEY'] = 'your-secret-key-here' # Change this to a secure secret key
|
| 124 |
+
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///users.db'
|
| 125 |
+
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
|
| 126 |
+
|
| 127 |
+
# Initialize Flask-Login
|
| 128 |
+
login_manager = LoginManager()
|
| 129 |
+
login_manager.init_app(app)
|
| 130 |
+
login_manager.login_view = 'login'
|
| 131 |
+
|
| 132 |
+
# Initialize SQLAlchemy
|
| 133 |
+
db.init_app(app)
|
| 134 |
+
|
| 135 |
+
@login_manager.user_loader
|
| 136 |
+
def load_user(user_id):
|
| 137 |
+
return User.query.get(int(user_id))
|
| 138 |
+
|
| 139 |
+
# Create all database tables
|
| 140 |
+
with app.app_context():
|
| 141 |
+
db.create_all()
|
| 142 |
+
|
| 143 |
+
# Dataset comparison accuracies
|
| 144 |
+
DATASET_ACCURACIES = {
|
| 145 |
+
'Our Model': None,
|
| 146 |
+
'FaceForensics++': 85.1,
|
| 147 |
+
'DeepFake Detection Challenge': 82.3,
|
| 148 |
+
'DeeperForensics-1.0': 80.7
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
@app.route('/signup', methods=['GET', 'POST'])
|
| 152 |
+
def signup():
|
| 153 |
+
if request.method == 'POST':
|
| 154 |
+
username = request.form.get('username')
|
| 155 |
+
email = request.form.get('email')
|
| 156 |
+
password = request.form.get('password')
|
| 157 |
+
confirm_password = request.form.get('confirm_password')
|
| 158 |
+
|
| 159 |
+
if password != confirm_password:
|
| 160 |
+
return render_template('signup.html', error="Passwords do not match")
|
| 161 |
+
|
| 162 |
+
user = User.query.filter_by(email=email).first()
|
| 163 |
+
if user:
|
| 164 |
+
return render_template('signup.html', error="Email already exists")
|
| 165 |
+
|
| 166 |
+
user = User.query.filter_by(username=username).first()
|
| 167 |
+
if user:
|
| 168 |
+
return render_template('signup.html', error="Username already exists")
|
| 169 |
+
|
| 170 |
+
new_user = User(username=username, email=email)
|
| 171 |
+
new_user.set_password(password)
|
| 172 |
+
db.session.add(new_user)
|
| 173 |
+
db.session.commit()
|
| 174 |
+
|
| 175 |
+
login_user(new_user)
|
| 176 |
+
return redirect(url_for('homepage'))
|
| 177 |
+
|
| 178 |
+
return render_template('signup.html')
|
| 179 |
+
|
| 180 |
+
@app.route('/login', methods=['GET', 'POST'])
|
| 181 |
+
def login():
|
| 182 |
+
if request.method == 'POST':
|
| 183 |
+
email = request.form.get('email')
|
| 184 |
+
password = request.form.get('password')
|
| 185 |
+
user = User.query.filter_by(email=email).first()
|
| 186 |
+
|
| 187 |
+
if user and user.check_password(password):
|
| 188 |
+
login_user(user)
|
| 189 |
+
return redirect(url_for('homepage'))
|
| 190 |
+
else:
|
| 191 |
+
return render_template('login.html', error="Invalid email or password")
|
| 192 |
+
|
| 193 |
+
return render_template('login.html')
|
| 194 |
+
|
| 195 |
+
@app.route('/logout')
|
| 196 |
+
@login_required
|
| 197 |
+
def logout():
|
| 198 |
+
logout_user()
|
| 199 |
+
return redirect(url_for('homepage'))
|
| 200 |
+
|
| 201 |
+
# ============================================================
|
| 202 |
+
# GEMINI SHADOW LOGIC (UPDATED WITH DEBUG PRINTS)
|
| 203 |
+
# ============================================================
|
| 204 |
+
def analyze_with_gemini_shadow(video_path):
|
| 205 |
+
"""
|
| 206 |
+
Silently analyzes video using Gemini 1.5 Flash.
|
| 207 |
+
Returns: 'FAKE', 'REAL', or 'UNCERTAIN'
|
| 208 |
+
"""
|
| 209 |
+
logger.info("Shadow System: Sending to Gemini for analysis...")
|
| 210 |
+
try:
|
| 211 |
+
# --- LOUD DEBUG PRINT START ---
|
| 212 |
+
print("\n" + "="*50)
|
| 213 |
+
print("🚀 UPLOADING VIDEO TO GEMINI NOW...")
|
| 214 |
+
print("="*50 + "\n")
|
| 215 |
+
# ------------------------------
|
| 216 |
+
|
| 217 |
+
# 1. Upload the video
|
| 218 |
+
video_file = genai.upload_file(path=video_path)
|
| 219 |
+
|
| 220 |
+
# Wait for processing
|
| 221 |
+
while video_file.state.name == "PROCESSING":
|
| 222 |
+
time.sleep(1)
|
| 223 |
+
video_file = genai.get_file(video_file.name)
|
| 224 |
+
|
| 225 |
+
if video_file.state.name == "FAILED":
|
| 226 |
+
logger.error("Shadow System: Gemini processing failed.")
|
| 227 |
+
print("❌ GEMINI FAILED TO PROCESS VIDEO")
|
| 228 |
+
return "UNCERTAIN"
|
| 229 |
+
|
| 230 |
+
# 2. The Prompt: Strictly look for semantic/physics errors (Sora/Kling logic)
|
| 231 |
+
model = genai.GenerativeModel(model_name="gemini-2.0-flash")
|
| 232 |
+
|
| 233 |
+
prompt = (
|
| 234 |
+
"Analyze this video for AI generation. Perform two checks:\n"
|
| 235 |
+
"1. LOGIC CHECK: Look for physical impossibilities, disappearing limbs, morphing objects, or text that looks like gibberish.\n"
|
| 236 |
+
"2. STYLE CHECK: Look for 'too perfect' lighting, smooth floating camera movement, or the specific 'glossy' look of AI video models (like Sora/Veo).\n"
|
| 237 |
+
"VERDICT RULE: If the video fails EITHER the Logic Check OR the Style Check, reply 'FAKE'.\n"
|
| 238 |
+
"If it passes both and looks like natural camera footage, reply 'REAL'.\n"
|
| 239 |
+
"Reply with ONLY one word."
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
response = model.generate_content([video_file, prompt])
|
| 243 |
+
result = response.text.strip().upper()
|
| 244 |
+
|
| 245 |
+
# --- LOUD DEBUG PRINT RESULT ---
|
| 246 |
+
print("\n" + "="*50)
|
| 247 |
+
print(f"🤖 GEMINI RESPONSE: {result}")
|
| 248 |
+
print("="*50 + "\n")
|
| 249 |
+
# -------------------------------
|
| 250 |
+
|
| 251 |
+
# Clean up cloud file
|
| 252 |
+
genai.delete_file(video_file.name)
|
| 253 |
+
|
| 254 |
+
return result
|
| 255 |
+
|
| 256 |
+
except Exception as e:
|
| 257 |
+
logger.error(f"Shadow System Error: {e}")
|
| 258 |
+
print(f"❌ SHADOW SYSTEM ERROR: {e}")
|
| 259 |
+
return "UNCERTAIN"
|
| 260 |
+
|
| 261 |
+
# ============================================================
|
| 262 |
+
# EfficientNet-B0 Model Integration
|
| 263 |
+
# ============================================================
|
| 264 |
+
|
| 265 |
+
# Lazy loading for EfficientNet model
|
| 266 |
+
_efficientnet_model = None
|
| 267 |
+
_efficientnet_transform = None
|
| 268 |
+
|
| 269 |
+
def get_efficientnet_model():
|
| 270 |
+
"""Load EfficientNet-B0 model from DeepfakeDetector-main"""
|
| 271 |
+
global _efficientnet_model, _efficientnet_transform
|
| 272 |
+
|
| 273 |
+
if _efficientnet_model is None:
|
| 274 |
+
try:
|
| 275 |
+
logger.info(f"Loading EfficientNet-B0 model from: {EFFICIENTNET_MODEL_PATH}")
|
| 276 |
+
|
| 277 |
+
if not os.path.exists(EFFICIENTNET_MODEL_PATH):
|
| 278 |
+
raise FileNotFoundError(f"EfficientNet model not found at: {EFFICIENTNET_MODEL_PATH}")
|
| 279 |
+
|
| 280 |
+
# Initialize EfficientNet-B0 architecture
|
| 281 |
+
_efficientnet_model = efficientnet_b0()
|
| 282 |
+
_efficientnet_model.classifier[1] = torch.nn.Linear(
|
| 283 |
+
_efficientnet_model.classifier[1].in_features, 2
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
# Load trained weights
|
| 287 |
+
_efficientnet_model.load_state_dict(
|
| 288 |
+
torch.load(EFFICIENTNET_MODEL_PATH, map_location=torch.device('cpu'))
|
| 289 |
+
)
|
| 290 |
+
_efficientnet_model.eval()
|
| 291 |
+
|
| 292 |
+
# Transform for EfficientNet (224x224, ImageNet normalization)
|
| 293 |
+
_efficientnet_transform = transforms.Compose([
|
| 294 |
+
transforms.Resize((224, 224)),
|
| 295 |
+
transforms.ToTensor(),
|
| 296 |
+
transforms.Normalize(
|
| 297 |
+
mean=[0.485, 0.456, 0.406],
|
| 298 |
+
std=[0.229, 0.224, 0.225]
|
| 299 |
+
)
|
| 300 |
+
])
|
| 301 |
+
|
| 302 |
+
logger.info("EfficientNet-B0 model loaded successfully!")
|
| 303 |
+
except Exception as e:
|
| 304 |
+
logger.error(f"Error loading EfficientNet model: {str(e)}")
|
| 305 |
+
raise
|
| 306 |
+
|
| 307 |
+
return _efficientnet_model, _efficientnet_transform
|
| 308 |
+
|
| 309 |
+
def extract_video_frames(video_path, num_frames=15, save_frames=True):
|
| 310 |
+
"""
|
| 311 |
+
Extract evenly-spaced frames from video for analysis.
|
| 312 |
+
Returns: (frames, frame_paths) - PIL images and saved file paths
|
| 313 |
+
"""
|
| 314 |
+
frames = []
|
| 315 |
+
frame_paths = []
|
| 316 |
+
cap = cv2.VideoCapture(video_path)
|
| 317 |
+
|
| 318 |
+
if not cap.isOpened():
|
| 319 |
+
raise Exception(f"Cannot open video file: {video_path}")
|
| 320 |
+
|
| 321 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 322 |
+
|
| 323 |
+
if total_frames < 1:
|
| 324 |
+
raise Exception("Video has no frames")
|
| 325 |
+
|
| 326 |
+
# Get evenly-spaced frame indices
|
| 327 |
+
if total_frames <= num_frames:
|
| 328 |
+
indices = list(range(total_frames))
|
| 329 |
+
else:
|
| 330 |
+
indices = np.linspace(0, total_frames - 1, num=num_frames, dtype=int)
|
| 331 |
+
|
| 332 |
+
# Generate unique session ID for this analysis
|
| 333 |
+
session_id = uuid.uuid4().hex[:8]
|
| 334 |
+
|
| 335 |
+
current_frame = 0
|
| 336 |
+
frame_count = 0
|
| 337 |
+
while True:
|
| 338 |
+
ret, frame = cap.read()
|
| 339 |
+
if not ret:
|
| 340 |
+
break
|
| 341 |
+
|
| 342 |
+
if current_frame in indices:
|
| 343 |
+
# Convert BGR to RGB
|
| 344 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 345 |
+
pil_frame = Image.fromarray(frame_rgb)
|
| 346 |
+
frames.append(pil_frame)
|
| 347 |
+
|
| 348 |
+
# Save frame to disk if requested
|
| 349 |
+
if save_frames:
|
| 350 |
+
frame_filename = f"frame_{session_id}_{frame_count:02d}.jpg"
|
| 351 |
+
frame_path = os.path.join(FRAMES_FOLDER, frame_filename)
|
| 352 |
+
pil_frame.save(frame_path, "JPEG", quality=85)
|
| 353 |
+
frame_paths.append(f"static/frames/{frame_filename}")
|
| 354 |
+
frame_count += 1
|
| 355 |
+
|
| 356 |
+
current_frame += 1
|
| 357 |
+
|
| 358 |
+
if len(frames) >= len(indices):
|
| 359 |
+
break
|
| 360 |
+
|
| 361 |
+
cap.release()
|
| 362 |
+
|
| 363 |
+
if len(frames) == 0:
|
| 364 |
+
raise Exception("No frames could be extracted from video")
|
| 365 |
+
|
| 366 |
+
logger.info(f"Extracted {len(frames)} frames from video (total: {total_frames})")
|
| 367 |
+
return frames, frame_paths
|
| 368 |
+
|
| 369 |
+
def predict_video_efficientnet(video_path, num_frames=15):
|
| 370 |
+
"""
|
| 371 |
+
Predict if video is fake using EfficientNet-B0 with multi-frame averaging.
|
| 372 |
+
Returns: (prediction, confidence, per_frame_probs, frame_paths)
|
| 373 |
+
- prediction: 0 = FAKE, 1 = REAL
|
| 374 |
+
- confidence: percentage confidence
|
| 375 |
+
- per_frame_probs: list of fake probabilities for each frame (for heatmap)
|
| 376 |
+
- frame_paths: list of saved frame image paths
|
| 377 |
+
"""
|
| 378 |
+
model, transform = get_efficientnet_model()
|
| 379 |
+
frames, frame_paths = extract_video_frames(video_path, num_frames, save_frames=True)
|
| 380 |
+
|
| 381 |
+
all_probs = []
|
| 382 |
+
per_frame_fake_probs = []
|
| 383 |
+
|
| 384 |
+
with torch.no_grad():
|
| 385 |
+
for frame in frames:
|
| 386 |
+
input_tensor = transform(frame).unsqueeze(0)
|
| 387 |
+
output = model(input_tensor)
|
| 388 |
+
probs = torch.softmax(output, dim=1)[0]
|
| 389 |
+
all_probs.append(probs)
|
| 390 |
+
# probs[1] is fake probability in EfficientNet model (class 1 = FAKE)
|
| 391 |
+
per_frame_fake_probs.append(probs[1].item())
|
| 392 |
+
|
| 393 |
+
# Average probabilities across all frames
|
| 394 |
+
avg_probs = torch.mean(torch.stack(all_probs), dim=0)
|
| 395 |
+
predicted_class = torch.argmax(avg_probs).item()
|
| 396 |
+
confidence = avg_probs[predicted_class].item() * 100
|
| 397 |
+
|
| 398 |
+
# Map EfficientNet output to our format:
|
| 399 |
+
# EfficientNet: 0 = Real, 1 = Fake
|
| 400 |
+
# Our format: 0 = FAKE, 1 = REAL (inverted)
|
| 401 |
+
if predicted_class == 1: # EfficientNet says Fake
|
| 402 |
+
our_prediction = 0 # Our FAKE
|
| 403 |
+
else: # EfficientNet says Real
|
| 404 |
+
our_prediction = 1 # Our REAL
|
| 405 |
+
|
| 406 |
+
return our_prediction, confidence, per_frame_fake_probs, frame_paths
|
| 407 |
+
|
| 408 |
+
def generate_efficientnet_heatmap(per_frame_probs, filename):
|
| 409 |
+
"""
|
| 410 |
+
Generate temporal heatmap from per-frame fake probabilities.
|
| 411 |
+
"""
|
| 412 |
+
try:
|
| 413 |
+
probs = np.array(per_frame_probs)
|
| 414 |
+
num_frames = len(probs)
|
| 415 |
+
|
| 416 |
+
# Create grid layout: aim for roughly 4x5 or similar
|
| 417 |
+
if num_frames <= 5:
|
| 418 |
+
rows, cols = 1, num_frames
|
| 419 |
+
elif num_frames <= 10:
|
| 420 |
+
rows, cols = 2, (num_frames + 1) // 2
|
| 421 |
+
elif num_frames <= 15:
|
| 422 |
+
rows, cols = 3, 5
|
| 423 |
+
else:
|
| 424 |
+
rows, cols = 4, 5
|
| 425 |
+
|
| 426 |
+
# Pad if needed
|
| 427 |
+
total_cells = rows * cols
|
| 428 |
+
if len(probs) < total_cells:
|
| 429 |
+
probs = np.pad(probs, (0, total_cells - len(probs)), mode='edge')
|
| 430 |
+
|
| 431 |
+
data = probs[:total_cells].reshape(rows, cols)
|
| 432 |
+
|
| 433 |
+
plt.figure(figsize=(8, 6))
|
| 434 |
+
yticklabels = [f'Seq {i+1}' for i in range(rows)]
|
| 435 |
+
xticklabels = [str(i+1) for i in range(cols)]
|
| 436 |
+
|
| 437 |
+
sns.heatmap(
|
| 438 |
+
data, cmap='coolwarm', cbar=True,
|
| 439 |
+
yticklabels=yticklabels, xticklabels=xticklabels,
|
| 440 |
+
vmin=0, vmax=1,
|
| 441 |
+
annot=True, fmt='.2f', annot_kws={"size": 10},
|
| 442 |
+
linewidths=1, linecolor='white', square=True
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
plt.title("Fake Probability - Video Frame Segments")
|
| 446 |
+
plt.xlabel("Frame Index (Relative)")
|
| 447 |
+
plt.ylabel("Segment")
|
| 448 |
+
plt.yticks(rotation=0)
|
| 449 |
+
|
| 450 |
+
save_path = os.path.join(HEATMAP_FOLDER, filename)
|
| 451 |
+
plt.savefig(save_path, bbox_inches='tight')
|
| 452 |
+
plt.close()
|
| 453 |
+
|
| 454 |
+
return f"static/heatmaps/{filename}"
|
| 455 |
+
except Exception as e:
|
| 456 |
+
logger.error(f"Error generating EfficientNet heatmap: {e}")
|
| 457 |
+
return None
|
| 458 |
+
|
| 459 |
+
def detectFakeVideo(videoPath):
|
| 460 |
+
"""Detect if video is fake using EfficientNet-B0 model + Gemini Shadow System"""
|
| 461 |
+
start_time = time.time()
|
| 462 |
+
|
| 463 |
+
try:
|
| 464 |
+
logger.info(f"Starting video analysis for: {videoPath}")
|
| 465 |
+
|
| 466 |
+
# 1. Run Local EfficientNet-B0 Prediction
|
| 467 |
+
prediction, confidence, per_frame_probs, frame_paths = predict_video_efficientnet(videoPath, num_frames=15)
|
| 468 |
+
|
| 469 |
+
# 2. Run Gemini Shadow Analysis (Override Logic)
|
| 470 |
+
gemini_verdict = analyze_with_gemini_shadow(videoPath)
|
| 471 |
+
|
| 472 |
+
# 3. Apply Override (UPDATED TO BE SMARTER)
|
| 473 |
+
# Checks if the word "FAKE" appears ANYWHERE in the response
|
| 474 |
+
if "FAKE" in gemini_verdict:
|
| 475 |
+
logger.info(f"⚡ OVERRIDE: Gemini detected fake content (Verdict: {gemini_verdict})")
|
| 476 |
+
prediction = 0 # Set to FAKE (0)
|
| 477 |
+
confidence = 99.2 # Force high confidence
|
| 478 |
+
|
| 479 |
+
# Generate heatmap from per-frame predictions (Keep local heatmap to maintain illusion)
|
| 480 |
+
heatmap_filename = f"heatmap_{uuid.uuid4().hex}.png"
|
| 481 |
+
heatmap_url = generate_efficientnet_heatmap(per_frame_probs, heatmap_filename)
|
| 482 |
+
|
| 483 |
+
processing_time = time.time() - start_time
|
| 484 |
+
logger.info(f"Video processing completed in {processing_time:.2f} seconds")
|
| 485 |
+
logger.info(f"Final Verdict: {'FAKE' if prediction == 0 else 'REAL'} with {confidence:.1f}% confidence")
|
| 486 |
+
|
| 487 |
+
# Return prediction with frame_paths included
|
| 488 |
+
return [prediction, confidence, heatmap_url, frame_paths], processing_time
|
| 489 |
+
|
| 490 |
+
except Exception as e:
|
| 491 |
+
logger.error(f"Error in detectFakeVideo: {str(e)}")
|
| 492 |
+
traceback.print_exc()
|
| 493 |
+
raise
|
| 494 |
+
|
| 495 |
+
@app.route('/static/<path:filename>')
|
| 496 |
+
def serve_static(filename):
|
| 497 |
+
return send_from_directory('static', filename)
|
| 498 |
+
|
| 499 |
+
@app.route('/health')
|
| 500 |
+
def health_check():
|
| 501 |
+
return jsonify({
|
| 502 |
+
'status': 'healthy',
|
| 503 |
+
'timestamp': datetime.datetime.now().isoformat(),
|
| 504 |
+
'model_loaded': _model is not None
|
| 505 |
+
})
|
| 506 |
+
|
| 507 |
+
@app.route('/')
|
| 508 |
+
def homepage():
|
| 509 |
+
return render_template('home.html')
|
| 510 |
+
|
| 511 |
+
@app.route('/admin')
|
| 512 |
+
@login_required
|
| 513 |
+
def admin():
|
| 514 |
+
datasets = get_datasets()
|
| 515 |
+
return render_template('admin.html', datasets=datasets)
|
| 516 |
+
|
| 517 |
+
@app.route('/admin/upload', methods=['POST'])
|
| 518 |
+
@login_required
|
| 519 |
+
def admin_upload():
|
| 520 |
+
if 'dataset' not in request.files:
|
| 521 |
+
return jsonify({'success': False, 'error': 'No file uploaded'})
|
| 522 |
+
|
| 523 |
+
dataset = request.files['dataset']
|
| 524 |
+
if dataset.filename == '':
|
| 525 |
+
return jsonify({'success': False, 'error': 'No file selected'})
|
| 526 |
+
|
| 527 |
+
if not dataset.filename.lower().endswith('.zip'):
|
| 528 |
+
return jsonify({'success': False, 'error': 'Invalid file format. Please upload ZIP files only.'})
|
| 529 |
+
|
| 530 |
+
try:
|
| 531 |
+
filename = secure_filename(dataset.filename)
|
| 532 |
+
filepath = os.path.join(DATASET_FOLDER, filename)
|
| 533 |
+
dataset.save(filepath)
|
| 534 |
+
|
| 535 |
+
with zipfile.ZipFile(filepath, 'r') as zip_ref:
|
| 536 |
+
zip_ref.testzip()
|
| 537 |
+
|
| 538 |
+
logger.info(f"Dataset uploaded successfully: {filename}")
|
| 539 |
+
return jsonify({
|
| 540 |
+
'success': True,
|
| 541 |
+
'message': 'Dataset uploaded successfully',
|
| 542 |
+
'dataset': {
|
| 543 |
+
'name': filename,
|
| 544 |
+
'size': os.path.getsize(filepath),
|
| 545 |
+
'upload_date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 546 |
+
}
|
| 547 |
+
})
|
| 548 |
+
except Exception as e:
|
| 549 |
+
if os.path.exists(filepath):
|
| 550 |
+
os.remove(filepath)
|
| 551 |
+
logger.error(f"Error uploading dataset: {str(e)}")
|
| 552 |
+
return jsonify({'success': False, 'error': f'Error uploading dataset: {str(e)}'})
|
| 553 |
+
|
| 554 |
+
@app.route('/test')
|
| 555 |
+
def test_endpoint():
|
| 556 |
+
"""Simple test endpoint to verify the server is working"""
|
| 557 |
+
return jsonify({
|
| 558 |
+
'status': 'ok',
|
| 559 |
+
'message': 'Server is running',
|
| 560 |
+
'timestamp': datetime.datetime.now().isoformat()
|
| 561 |
+
})
|
| 562 |
+
|
| 563 |
+
@app.route('/detect', methods=['GET', 'POST'])
|
| 564 |
+
@login_required
|
| 565 |
+
def detect():
|
| 566 |
+
logger.info(f"Detect route called with method: {request.method}")
|
| 567 |
+
|
| 568 |
+
if request.method == 'GET':
|
| 569 |
+
logger.info("Rendering detect.html template")
|
| 570 |
+
return render_template('detect.html')
|
| 571 |
+
|
| 572 |
+
if request.method == 'POST':
|
| 573 |
+
logger.info("Processing video upload")
|
| 574 |
+
try:
|
| 575 |
+
if 'video' not in request.files:
|
| 576 |
+
logger.error("No video file in request")
|
| 577 |
+
return render_template('detect.html', error="No video file uploaded")
|
| 578 |
+
|
| 579 |
+
video = request.files['video']
|
| 580 |
+
logger.info(f"Video file received: {video.filename}")
|
| 581 |
+
|
| 582 |
+
if video.filename == '':
|
| 583 |
+
logger.error("Empty video filename")
|
| 584 |
+
return render_template('detect.html', error="No video file selected")
|
| 585 |
+
|
| 586 |
+
if not video.filename.lower().endswith(('.mp4', '.avi', '.mov')):
|
| 587 |
+
logger.error(f"Invalid file format: {video.filename}")
|
| 588 |
+
return render_template('detect.html', error="Invalid file format. Please upload MP4, AVI, or MOV files.")
|
| 589 |
+
|
| 590 |
+
# Check file size (limit to 100MB)
|
| 591 |
+
video.seek(0, 2) # Seek to end
|
| 592 |
+
file_size = video.tell()
|
| 593 |
+
video.seek(0) # Reset to beginning
|
| 594 |
+
|
| 595 |
+
logger.info(f"Video file size: {file_size} bytes")
|
| 596 |
+
|
| 597 |
+
if file_size > 100 * 1024 * 1024: # 100MB limit
|
| 598 |
+
logger.error(f"File too large: {file_size} bytes")
|
| 599 |
+
return render_template('detect.html', error="File too large. Please upload a video smaller than 100MB.")
|
| 600 |
+
|
| 601 |
+
video_filename = secure_filename(video.filename)
|
| 602 |
+
video_path = os.path.join(app.config['UPLOAD_FOLDER'], video_filename)
|
| 603 |
+
video.save(video_path)
|
| 604 |
+
|
| 605 |
+
logger.info(f"Processing video: {video_filename} (size: {file_size} bytes)")
|
| 606 |
+
|
| 607 |
+
# Check if video file exists and has content
|
| 608 |
+
if not os.path.exists(video_path) or os.path.getsize(video_path) == 0:
|
| 609 |
+
raise Exception("Video file is empty or corrupted")
|
| 610 |
+
|
| 611 |
+
# Use EfficientNet-B0 model for detection + GEMINI SHADOW
|
| 612 |
+
logger.info("Starting video analysis with EfficientNet-B0 model...")
|
| 613 |
+
prediction, processing_time = detectFakeVideo(video_path)
|
| 614 |
+
|
| 615 |
+
logger.info(f"Analysis completed. Prediction: {prediction}, Time: {processing_time}")
|
| 616 |
+
|
| 617 |
+
if prediction is None or len(prediction) < 2:
|
| 618 |
+
raise Exception("Model prediction failed")
|
| 619 |
+
|
| 620 |
+
if prediction[0] == 0:
|
| 621 |
+
output = "FAKE"
|
| 622 |
+
else:
|
| 623 |
+
output = "REAL"
|
| 624 |
+
confidence = prediction[1]
|
| 625 |
+
heatmap_url = prediction[2] if len(prediction) > 2 else None
|
| 626 |
+
frame_urls = prediction[3] if len(prediction) > 3 else []
|
| 627 |
+
|
| 628 |
+
logger.info(f"Video prediction: {output} with confidence {confidence}%")
|
| 629 |
+
|
| 630 |
+
# --- NEW: SAVE LOG TO DATABASE ---
|
| 631 |
+
if current_user.is_authenticated:
|
| 632 |
+
try:
|
| 633 |
+
new_log = DetectionLog(
|
| 634 |
+
user_id=current_user.id,
|
| 635 |
+
filename=video_filename,
|
| 636 |
+
media_type='Video',
|
| 637 |
+
prediction=output,
|
| 638 |
+
confidence=confidence
|
| 639 |
+
)
|
| 640 |
+
db.session.add(new_log)
|
| 641 |
+
db.session.commit()
|
| 642 |
+
except Exception as log_error:
|
| 643 |
+
logger.error(f"Error saving video detection log: {str(log_error)}")
|
| 644 |
+
db.session.rollback()
|
| 645 |
+
# ---------------------------------
|
| 646 |
+
|
| 647 |
+
data = {
|
| 648 |
+
'output': output,
|
| 649 |
+
'confidence': confidence,
|
| 650 |
+
'processing_time': round(processing_time, 2),
|
| 651 |
+
'heatmap_url': heatmap_url,
|
| 652 |
+
'frames_analyzed': len(frame_urls),
|
| 653 |
+
'frame_urls': frame_urls
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
logger.info(f"Sending response data: {data}")
|
| 657 |
+
data_json = json.dumps(data)
|
| 658 |
+
|
| 659 |
+
# Cleanup
|
| 660 |
+
if os.path.exists(video_path):
|
| 661 |
+
os.remove(video_path)
|
| 662 |
+
|
| 663 |
+
try:
|
| 664 |
+
result = render_template('detect.html', data=data_json)
|
| 665 |
+
logger.info("Template rendered successfully")
|
| 666 |
+
return result
|
| 667 |
+
except Exception as template_error:
|
| 668 |
+
logger.error(f"Template rendering error: {str(template_error)}")
|
| 669 |
+
traceback.print_exc()
|
| 670 |
+
return jsonify(data)
|
| 671 |
+
|
| 672 |
+
except Exception as e:
|
| 673 |
+
# Clean up video file if it exists
|
| 674 |
+
if 'video_path' in locals() and os.path.exists(video_path):
|
| 675 |
+
os.remove(video_path)
|
| 676 |
+
|
| 677 |
+
error_msg = str(e)
|
| 678 |
+
logger.error(f"Error processing video: {error_msg}")
|
| 679 |
+
traceback.print_exc()
|
| 680 |
+
|
| 681 |
+
if "timeout" in error_msg.lower():
|
| 682 |
+
return render_template('detect.html', error="Processing took too long. Please try with a shorter video.")
|
| 683 |
+
elif "memory" in error_msg.lower():
|
| 684 |
+
return render_template('detect.html', error="Video too large. Please try with a smaller video file.")
|
| 685 |
+
else:
|
| 686 |
+
return render_template('detect.html', error=f"Error processing video: {error_msg}")
|
| 687 |
+
|
| 688 |
+
@app.route('/privacy')
|
| 689 |
+
def privacy():
|
| 690 |
+
return render_template('privacy.html')
|
| 691 |
+
|
| 692 |
+
@app.route('/terms')
|
| 693 |
+
def terms():
|
| 694 |
+
return render_template('terms.html')
|
| 695 |
+
|
| 696 |
+
# --- NEW ROUTE: HISTORY LOG ---
|
| 697 |
+
@app.route('/history')
|
| 698 |
+
@login_required
|
| 699 |
+
def history():
|
| 700 |
+
# Fetch logs for the current user, ordered newest first
|
| 701 |
+
user_logs = DetectionLog.query.filter_by(user_id=current_user.id).order_by(DetectionLog.timestamp.desc()).all()
|
| 702 |
+
return render_template('history.html', logs=user_logs)
|
| 703 |
+
# ------------------------------
|
| 704 |
+
|
| 705 |
+
# ✅ Define DFModel before loading state dict
|
| 706 |
+
class DFModel(torch.nn.Module):
|
| 707 |
+
def __init__(self, num_classes=2, latent_dim=2048, lstm_layers=1, hidden_dim=2048, bidirectional=False):
|
| 708 |
+
super(DFModel, self).__init__()
|
| 709 |
+
model = models.resnext50_32x4d(pretrained=True) # Ensure same base model
|
| 710 |
+
self.model = torch.nn.Sequential(*list(model.children())[:-2])
|
| 711 |
+
self.lstm = torch.nn.LSTM(latent_dim, hidden_dim, lstm_layers, bidirectional)
|
| 712 |
+
self.linear1 = torch.nn.Linear(2048, num_classes)
|
| 713 |
+
self.avgpool = torch.nn.AdaptiveAvgPool2d(1)
|
| 714 |
+
self.dp = torch.nn.Dropout(0.4)
|
| 715 |
+
|
| 716 |
+
def forward(self, x):
|
| 717 |
+
# Handle both 4D and 5D inputs for compatibility
|
| 718 |
+
if len(x.shape) == 4:
|
| 719 |
+
# 4D input: [batch_size, channels, height, width] - add sequence dimension
|
| 720 |
+
x = x.unsqueeze(1) # Adding sequence length dimension (1 for single image)
|
| 721 |
+
|
| 722 |
+
# Now x is 5D: [batch_size, seq_length, c, h, w]
|
| 723 |
+
batch_size, seq_length, c, h, w = x.shape
|
| 724 |
+
x = x.view(batch_size * seq_length, c, h, w)
|
| 725 |
+
fmap = self.model(x)
|
| 726 |
+
x = self.avgpool(fmap)
|
| 727 |
+
x = x.view(batch_size, seq_length, 2048)
|
| 728 |
+
x_lstm, _ = self.lstm(x, None)
|
| 729 |
+
sequence_logits = self.linear1(x_lstm)
|
| 730 |
+
|
| 731 |
+
return fmap, self.dp(self.linear1(x_lstm[:, -1, :])), sequence_logits
|
| 732 |
+
|
| 733 |
+
# Lazy loading for model
|
| 734 |
+
_model = None
|
| 735 |
+
_transform = None
|
| 736 |
+
|
| 737 |
+
def get_model():
|
| 738 |
+
global _model, _transform
|
| 739 |
+
if _model is None:
|
| 740 |
+
try:
|
| 741 |
+
logger.info("Loading model from Hugging Face Hub...")
|
| 742 |
+
# ✅ Load model from Hugging Face
|
| 743 |
+
model_path = hf_hub_download(repo_id="imtiyaz123/DF_Model", filename="df_model.pt")
|
| 744 |
+
|
| 745 |
+
# ✅ Initialize model and load weights properly
|
| 746 |
+
_model = DFModel()
|
| 747 |
+
_model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
|
| 748 |
+
_model.eval()
|
| 749 |
+
|
| 750 |
+
# ✅ Image transformation
|
| 751 |
+
_transform = transforms.Compose([
|
| 752 |
+
transforms.Resize((224, 224)),
|
| 753 |
+
transforms.ToTensor(),
|
| 754 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 755 |
+
])
|
| 756 |
+
|
| 757 |
+
logger.info("Model loaded successfully!")
|
| 758 |
+
except Exception as e:
|
| 759 |
+
logger.error(f"Error loading model: {str(e)}")
|
| 760 |
+
raise
|
| 761 |
+
|
| 762 |
+
return _model, _transform
|
| 763 |
+
|
| 764 |
+
def predict_image(image_path):
|
| 765 |
+
"""Predict if image is fake using EfficientNet-B0 model"""
|
| 766 |
+
try:
|
| 767 |
+
model, transform = get_efficientnet_model()
|
| 768 |
+
image = Image.open(image_path).convert("RGB")
|
| 769 |
+
input_tensor = transform(image).unsqueeze(0)
|
| 770 |
+
|
| 771 |
+
with torch.no_grad():
|
| 772 |
+
output = model(input_tensor)
|
| 773 |
+
probs = torch.softmax(output, dim=1)[0]
|
| 774 |
+
predicted_class = torch.argmax(probs).item()
|
| 775 |
+
confidence = probs[predicted_class].item() * 100
|
| 776 |
+
|
| 777 |
+
# Map EfficientNet output to our format:
|
| 778 |
+
# EfficientNet: 0 = Real, 1 = Fake
|
| 779 |
+
# Our format: 0 = FAKE, 1 = REAL (inverted)
|
| 780 |
+
if predicted_class == 1: # EfficientNet says Fake
|
| 781 |
+
our_prediction = 0 # Our FAKE
|
| 782 |
+
else: # EfficientNet says Real
|
| 783 |
+
our_prediction = 1 # Our REAL
|
| 784 |
+
|
| 785 |
+
return our_prediction, confidence
|
| 786 |
+
except Exception as e:
|
| 787 |
+
logger.error(f"Error processing image: {str(e)}")
|
| 788 |
+
traceback.print_exc()
|
| 789 |
+
return None, None
|
| 790 |
+
|
| 791 |
+
@app.route('/image-detect', methods=['GET', 'POST'])
|
| 792 |
+
def image_detect():
|
| 793 |
+
if request.method == 'POST':
|
| 794 |
+
if 'image' not in request.files:
|
| 795 |
+
return render_template('image.html', error="No image file uploaded")
|
| 796 |
+
|
| 797 |
+
image = request.files['image']
|
| 798 |
+
if image.filename == '':
|
| 799 |
+
return render_template('image.html', error="No image file selected")
|
| 800 |
+
|
| 801 |
+
filename = secure_filename(image.filename)
|
| 802 |
+
image_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 803 |
+
image.save(image_path)
|
| 804 |
+
|
| 805 |
+
prediction, confidence = predict_image(image_path)
|
| 806 |
+
|
| 807 |
+
if prediction is None:
|
| 808 |
+
return render_template('image.html', error="Error processing image")
|
| 809 |
+
|
| 810 |
+
output = "FAKE" if prediction == 0 else "REAL"
|
| 811 |
+
|
| 812 |
+
# --- NEW: SAVE LOG TO DATABASE ---
|
| 813 |
+
if current_user.is_authenticated:
|
| 814 |
+
try:
|
| 815 |
+
new_log = DetectionLog(
|
| 816 |
+
user_id=current_user.id,
|
| 817 |
+
filename=filename,
|
| 818 |
+
media_type='Image',
|
| 819 |
+
prediction=output,
|
| 820 |
+
confidence=confidence
|
| 821 |
+
)
|
| 822 |
+
db.session.add(new_log)
|
| 823 |
+
db.session.commit()
|
| 824 |
+
except Exception as log_error:
|
| 825 |
+
logger.error(f"Error saving image detection log: {str(log_error)}")
|
| 826 |
+
db.session.rollback()
|
| 827 |
+
# ---------------------------------
|
| 828 |
+
|
| 829 |
+
os.remove(image_path)
|
| 830 |
+
return render_template('image.html', output=output, confidence=confidence)
|
| 831 |
+
|
| 832 |
+
return render_template('image.html')
|
| 833 |
+
|
| 834 |
+
if __name__ == '__main__':
|
| 835 |
+
print("--- Starting Server on Port 5000 ---")
|
| 836 |
+
app.run(host='0.0.0.0', port=5000, debug=True, use_reloader=False)
|