Spaces:
Sleeping
Sleeping
Commit ·
49c4c8c
1
Parent(s): ccaffc1
Initial commit: TrueFrame
Browse files- .gitignore +18 -0
- app.py +232 -0
- audio_detect.py +102 -0
- combined_detect.py +100 -0
- frame_extractor.py +73 -0
- image_detect.py +81 -0
- requirement.txt +6 -0
- setup_model.py +27 -0
- static/avatars/Female1.png +3 -0
- static/avatars/Female2.png +3 -0
- static/avatars/Male1.png +3 -0
- static/avatars/Male2.png +3 -0
- templates/index.html +383 -0
- templates/login.html +394 -0
- templates/register.html +338 -0
- templates/result.html +168 -0
- video_detect.py +64 -0
.gitignore
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Ignore Python cache
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.pyc
|
| 4 |
+
|
| 5 |
+
# Ignore uploaded and processed media
|
| 6 |
+
uploads/
|
| 7 |
+
processed_frames/
|
| 8 |
+
|
| 9 |
+
# Ignore AI Models and large binaries
|
| 10 |
+
local_clip_model/
|
| 11 |
+
*.bin
|
| 12 |
+
*.safetensors
|
| 13 |
+
*.h5
|
| 14 |
+
*.pt
|
| 15 |
+
|
| 16 |
+
# Virtual environment (if you have one)
|
| 17 |
+
venv/
|
| 18 |
+
env/
|
app.py
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
import certifi
|
| 4 |
+
from flask import Flask, request, render_template, redirect, url_for, session, flash
|
| 5 |
+
from authlib.integrations.flask_client import OAuth
|
| 6 |
+
from flask_pymongo import PyMongo
|
| 7 |
+
from werkzeug.security import generate_password_hash, check_password_hash
|
| 8 |
+
from frame_extractor import FrameExtractor
|
| 9 |
+
|
| 10 |
+
# --- IMPORT DETECTORS ---
|
| 11 |
+
from video_detect import VideoDeepfakeDetector
|
| 12 |
+
from image_detect import ImageDeepfakeDetector
|
| 13 |
+
from audio_detect import AudioDeepfakeDetector
|
| 14 |
+
from combined_detect import CombinedDeepfakeDetector # 👈 NEW FEATURE
|
| 15 |
+
|
| 16 |
+
app = Flask(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
app.secret_key = 'super_secret_key_change_this_for_production'
|
| 20 |
+
|
| 21 |
+
app.config['GOOGLE_CLIENT_ID'] = "199550854045-o17gcos3v84ca47r3vjuf6h0erqnjtds.apps.googleusercontent.com"
|
| 22 |
+
app.config['GOOGLE_CLIENT_SECRET'] = "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"
|
| 23 |
+
app.config["MONGO_URI"] = "mongodb+srv://gauravymhatre_db_user:ojvk39Q0CDDCOA3X@gaurav-mhatre.pdgnaul.mongodb.net/deepfake_db?retryWrites=true&w=majority"
|
| 24 |
+
app.config['SESSION_COOKIE_SECURE'] = False
|
| 25 |
+
app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
|
| 26 |
+
|
| 27 |
+
oauth = OAuth(app)
|
| 28 |
+
mongo = PyMongo(app, tls=True, tlsAllowInvalidCertificates=True)
|
| 29 |
+
|
| 30 |
+
google = oauth.register(
|
| 31 |
+
name='google',
|
| 32 |
+
client_id=app.config['GOOGLE_CLIENT_ID'],
|
| 33 |
+
client_secret=app.config['GOOGLE_CLIENT_SECRET'],
|
| 34 |
+
server_metadata_url='https://accounts.google.com/.well-known/openid-configuration',
|
| 35 |
+
client_kwargs={'scope': 'openid email profile'},
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
UPLOAD_FOLDER = 'uploads'
|
| 39 |
+
if os.path.exists(UPLOAD_FOLDER):
|
| 40 |
+
shutil.rmtree(UPLOAD_FOLDER)
|
| 41 |
+
os.makedirs(UPLOAD_FOLDER)
|
| 42 |
+
|
| 43 |
+
# ============================
|
| 44 |
+
# ⚡ GLOBALLY LOAD AI MODELS
|
| 45 |
+
# ============================
|
| 46 |
+
print("⚡ Starting Server & Pre-loading Audio Model...")
|
| 47 |
+
audio_detector = AudioDeepfakeDetector()
|
| 48 |
+
|
| 49 |
+
video_detector = None
|
| 50 |
+
image_detector = None
|
| 51 |
+
extractor = None
|
| 52 |
+
|
| 53 |
+
def get_video_detector():
|
| 54 |
+
global video_detector, extractor
|
| 55 |
+
if video_detector is None:
|
| 56 |
+
print("⚡ Loading Video AI Model...")
|
| 57 |
+
video_detector = VideoDeepfakeDetector()
|
| 58 |
+
extractor = FrameExtractor()
|
| 59 |
+
return video_detector, extractor
|
| 60 |
+
|
| 61 |
+
def get_image_detector():
|
| 62 |
+
global image_detector
|
| 63 |
+
if image_detector is None:
|
| 64 |
+
print("⚡ Loading Image AI Model...")
|
| 65 |
+
image_detector = ImageDeepfakeDetector()
|
| 66 |
+
return image_detector
|
| 67 |
+
|
| 68 |
+
# ============================
|
| 69 |
+
# 🔐 AUTHENTICATION ROUTES
|
| 70 |
+
# ============================
|
| 71 |
+
@app.route('/login', methods=['GET', 'POST'])
|
| 72 |
+
def login_page():
|
| 73 |
+
if session.get('logged_in'): return redirect(url_for('index'))
|
| 74 |
+
|
| 75 |
+
if request.method == 'POST':
|
| 76 |
+
email = request.form.get('email')
|
| 77 |
+
password = request.form.get('password')
|
| 78 |
+
|
| 79 |
+
try:
|
| 80 |
+
user = mongo.db.users.find_one({"email": email})
|
| 81 |
+
except Exception as e:
|
| 82 |
+
return f"❌ Database Connection Error: {e}"
|
| 83 |
+
|
| 84 |
+
if user and check_password_hash(user['password'], password):
|
| 85 |
+
session['logged_in'] = True
|
| 86 |
+
session['user_name'] = user['name']
|
| 87 |
+
session['user_email'] = user['email']
|
| 88 |
+
return redirect(url_for('index'))
|
| 89 |
+
else:
|
| 90 |
+
flash("Invalid email or password.")
|
| 91 |
+
|
| 92 |
+
return render_template('login.html')
|
| 93 |
+
|
| 94 |
+
@app.route('/register', methods=['GET', 'POST'])
|
| 95 |
+
def register():
|
| 96 |
+
if request.method == 'POST':
|
| 97 |
+
name = request.form.get('name')
|
| 98 |
+
email = request.form.get('email')
|
| 99 |
+
password = request.form.get('password')
|
| 100 |
+
|
| 101 |
+
try:
|
| 102 |
+
existing_user = mongo.db.users.find_one({"email": email})
|
| 103 |
+
if existing_user:
|
| 104 |
+
flash("Email already registered. Please login.")
|
| 105 |
+
return redirect(url_for('login_page'))
|
| 106 |
+
|
| 107 |
+
hashed_password = generate_password_hash(password)
|
| 108 |
+
mongo.db.users.insert_one({
|
| 109 |
+
"name": name,
|
| 110 |
+
"email": email,
|
| 111 |
+
"password": hashed_password,
|
| 112 |
+
"auth_type": "manual"
|
| 113 |
+
})
|
| 114 |
+
except Exception as e:
|
| 115 |
+
return f"❌ Database Error: {e}"
|
| 116 |
+
|
| 117 |
+
flash("Account created! Please login.")
|
| 118 |
+
return redirect(url_for('login_page'))
|
| 119 |
+
|
| 120 |
+
return render_template('register.html')
|
| 121 |
+
|
| 122 |
+
@app.route('/login/google')
|
| 123 |
+
def google_login():
|
| 124 |
+
redirect_uri = url_for('authorize', _external=True)
|
| 125 |
+
return google.authorize_redirect(redirect_uri)
|
| 126 |
+
|
| 127 |
+
@app.route('/authorize')
|
| 128 |
+
def authorize():
|
| 129 |
+
try:
|
| 130 |
+
token = google.authorize_access_token()
|
| 131 |
+
user_info = token.get('userinfo')
|
| 132 |
+
|
| 133 |
+
existing_user = mongo.db.users.find_one({"email": user_info['email']})
|
| 134 |
+
|
| 135 |
+
if not existing_user:
|
| 136 |
+
mongo.db.users.insert_one({
|
| 137 |
+
"name": user_info['name'],
|
| 138 |
+
"email": user_info['email'],
|
| 139 |
+
"picture": user_info['picture'],
|
| 140 |
+
"auth_type": "google",
|
| 141 |
+
"password": ""
|
| 142 |
+
})
|
| 143 |
+
|
| 144 |
+
session['logged_in'] = True
|
| 145 |
+
session['user_email'] = user_info['email']
|
| 146 |
+
session['user_name'] = user_info['name']
|
| 147 |
+
session['profile_pic'] = user_info.get('picture')
|
| 148 |
+
|
| 149 |
+
return redirect(url_for('index'))
|
| 150 |
+
|
| 151 |
+
except Exception as e:
|
| 152 |
+
return f"Login failed: {e}"
|
| 153 |
+
|
| 154 |
+
@app.route('/logout')
|
| 155 |
+
def logout():
|
| 156 |
+
session.clear()
|
| 157 |
+
return redirect(url_for('login_page'))
|
| 158 |
+
|
| 159 |
+
# ============================
|
| 160 |
+
# 🏠 MAIN APP ROUTE
|
| 161 |
+
# ============================
|
| 162 |
+
@app.route('/', methods=['GET', 'POST'])
|
| 163 |
+
def index():
|
| 164 |
+
if not session.get('logged_in'): return redirect(url_for('login_page'))
|
| 165 |
+
user_name = session.get('user_name', 'User')
|
| 166 |
+
|
| 167 |
+
if request.method == 'POST':
|
| 168 |
+
if 'file' not in request.files: return redirect(request.url)
|
| 169 |
+
file = request.files['file']
|
| 170 |
+
mode = request.form.get('mode')
|
| 171 |
+
|
| 172 |
+
if file.filename == '': return redirect(request.url)
|
| 173 |
+
|
| 174 |
+
if mode == 'audio':
|
| 175 |
+
filename = "input_audio.mp3"
|
| 176 |
+
file_path = os.path.join(UPLOAD_FOLDER, filename)
|
| 177 |
+
file.save(file_path)
|
| 178 |
+
|
| 179 |
+
verdict, confidence = audio_detector.predict(file_path)
|
| 180 |
+
|
| 181 |
+
css_class = "fake" if verdict == "DEEPFAKE DETECTED" else "real"
|
| 182 |
+
return render_template('result.html', result=verdict, css_class=css_class, confidence=f"{confidence*100:.1f}", type="Audio Only", extra_info="<p>Audio Analysis Complete</p>")
|
| 183 |
+
|
| 184 |
+
elif mode == 'image':
|
| 185 |
+
filename = "input_image.jpg"
|
| 186 |
+
file_path = os.path.join(UPLOAD_FOLDER, filename)
|
| 187 |
+
file.save(file_path)
|
| 188 |
+
|
| 189 |
+
detector = get_image_detector()
|
| 190 |
+
verdict, confidence = detector.predict(file_path)
|
| 191 |
+
|
| 192 |
+
css_class = "fake" if verdict == "DEEPFAKE DETECTED" else "real"
|
| 193 |
+
return render_template('result.html', result=verdict, css_class=css_class, confidence=f"{confidence*100:.1f}", type="Image", extra_info="<p>Image Analysis Complete</p>")
|
| 194 |
+
|
| 195 |
+
# 👇 THE NEW COMBINED ROUTE 👇
|
| 196 |
+
elif mode == 'combined':
|
| 197 |
+
filename = "input_combined.mp4"
|
| 198 |
+
video_path = os.path.join(UPLOAD_FOLDER, filename)
|
| 199 |
+
file.save(video_path)
|
| 200 |
+
|
| 201 |
+
v_detector, v_extractor = get_video_detector()
|
| 202 |
+
a_detector = audio_detector
|
| 203 |
+
|
| 204 |
+
combined_detector = CombinedDeepfakeDetector(a_detector, v_detector, v_extractor)
|
| 205 |
+
final_result, details = combined_detector.predict(video_path)
|
| 206 |
+
|
| 207 |
+
css_class = "fake" if final_result == "DEEPFAKE DETECTED" else "real"
|
| 208 |
+
return render_template('result.html', result=final_result, css_class=css_class, confidence="N/A", type="Video + Audio Combined", extra_info=details)
|
| 209 |
+
|
| 210 |
+
else: # Video Only Mode (Frames)
|
| 211 |
+
filename = "input_video.mp4"
|
| 212 |
+
video_path = os.path.join(UPLOAD_FOLDER, filename)
|
| 213 |
+
file.save(video_path)
|
| 214 |
+
|
| 215 |
+
v_detector, v_extractor = get_video_detector()
|
| 216 |
+
image_paths = v_extractor.extract(video_path)
|
| 217 |
+
|
| 218 |
+
if not image_paths: return "Error: Could not extract frames."
|
| 219 |
+
|
| 220 |
+
fake_votes = 0
|
| 221 |
+
for img_path in image_paths:
|
| 222 |
+
result, _ = v_detector.predict(img_path)
|
| 223 |
+
if result == "DEEPFAKE DETECTED": fake_votes += 1
|
| 224 |
+
|
| 225 |
+
final_result = "DEEPFAKE DETECTED" if fake_votes > (len(image_paths) * 0.51) else "REAL"
|
| 226 |
+
css_class = "fake" if final_result == "DEEPFAKE DETECTED" else "real"
|
| 227 |
+
return render_template('result.html', result=final_result, css_class=css_class, confidence="N/A", type="Video (Frames Only)", extra_info=f"<p>Analyzed {len(image_paths)} visual frames.</p>")
|
| 228 |
+
|
| 229 |
+
return render_template('index.html', user_name=user_name)
|
| 230 |
+
|
| 231 |
+
if __name__ == '__main__':
|
| 232 |
+
app.run(debug=True, port=5000, use_reloader=False)
|
audio_detect.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import librosa
|
| 4 |
+
import soundfile as sf
|
| 5 |
+
import numpy as np
|
| 6 |
+
import subprocess
|
| 7 |
+
import traceback
|
| 8 |
+
import imageio_ffmpeg # ⚡ NEW: Added FFmpeg engine
|
| 9 |
+
from transformers import AutoModelForAudioClassification, AutoFeatureExtractor
|
| 10 |
+
|
| 11 |
+
class AudioDeepfakeDetector:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
self.model_name = "Hemgg/Deepfake-audio-detection"
|
| 14 |
+
self.model = None
|
| 15 |
+
self.extractor = None
|
| 16 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 17 |
+
|
| 18 |
+
# Limit CPU threads to prevent bottlenecking
|
| 19 |
+
if self.device == "cpu":
|
| 20 |
+
torch.set_num_threads(4)
|
| 21 |
+
|
| 22 |
+
print(f"⚡ Loading Audio AI Model: {self.model_name}...")
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
self.extractor = AutoFeatureExtractor.from_pretrained(self.model_name)
|
| 26 |
+
self.model = AutoModelForAudioClassification.from_pretrained(self.model_name).to(self.device)
|
| 27 |
+
self.model.eval()
|
| 28 |
+
|
| 29 |
+
print(f" ℹ️ Labels: {self.model.config.id2label}")
|
| 30 |
+
print("✅ Audio Model Loaded Successfully.")
|
| 31 |
+
|
| 32 |
+
except Exception as e:
|
| 33 |
+
print(f"❌ Failed to load Audio Model: {e}")
|
| 34 |
+
traceback.print_exc()
|
| 35 |
+
|
| 36 |
+
def predict(self, audio_path):
|
| 37 |
+
if not self.model:
|
| 38 |
+
return "ERROR: MODEL NOT LOADED", 0.0
|
| 39 |
+
|
| 40 |
+
temp_wav = "uploads/temp_fast_audio.wav"
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
print(f"🔍 Analyzing audio: {audio_path}")
|
| 44 |
+
|
| 45 |
+
# ⚡ ULTRA-FAST FFMPEG PRE-PROCESSING
|
| 46 |
+
# Instantly chops the file to 4 seconds, forces Mono, and sets 16kHz
|
| 47 |
+
ffmpeg_exe = imageio_ffmpeg.get_ffmpeg_exe()
|
| 48 |
+
command = [
|
| 49 |
+
ffmpeg_exe,
|
| 50 |
+
"-y", # Overwrite existing files
|
| 51 |
+
"-i", audio_path, # Input file (mp3, wav, etc.)
|
| 52 |
+
"-t", "4", # ⚡ Only grab the first 4 seconds
|
| 53 |
+
"-ac", "1", # ⚡ Force Mono (1 channel)
|
| 54 |
+
"-ar", "16000", # ⚡ Force 16000Hz sample rate
|
| 55 |
+
temp_wav # Output perfectly formatted temp file
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
# Run the command silently and instantly
|
| 59 |
+
subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
| 60 |
+
|
| 61 |
+
# Now Soundfile can load it in a fraction of a millisecond
|
| 62 |
+
# because it requires ZERO math or resampling from Python!
|
| 63 |
+
if os.path.exists(temp_wav):
|
| 64 |
+
audio, sr = sf.read(temp_wav)
|
| 65 |
+
os.remove(temp_wav) # Clean up temp file
|
| 66 |
+
else:
|
| 67 |
+
raise Exception("FFmpeg failed to process audio.")
|
| 68 |
+
|
| 69 |
+
# Ensure data format matches PyTorch requirements
|
| 70 |
+
audio = audio.astype(np.float32)
|
| 71 |
+
|
| 72 |
+
inputs = self.extractor(audio, sampling_rate=sr, return_tensors="pt", padding=True)
|
| 73 |
+
inputs = {key: val.to(self.device) for key, val in inputs.items()}
|
| 74 |
+
|
| 75 |
+
# Fast AI Inference
|
| 76 |
+
with torch.inference_mode():
|
| 77 |
+
logits = self.model(**inputs).logits
|
| 78 |
+
|
| 79 |
+
probs = torch.nn.functional.softmax(logits, dim=-1)
|
| 80 |
+
confidence, predicted_class_id = torch.max(probs, dim=-1)
|
| 81 |
+
|
| 82 |
+
raw_label = self.model.config.id2label[predicted_class_id.item()]
|
| 83 |
+
|
| 84 |
+
is_fake = False
|
| 85 |
+
check_label = raw_label.lower()
|
| 86 |
+
|
| 87 |
+
if "ai" in check_label or "fake" in check_label or "spoof" in check_label:
|
| 88 |
+
is_fake = True
|
| 89 |
+
|
| 90 |
+
label = "DEEPFAKE DETECTED" if is_fake else "REAL"
|
| 91 |
+
score = confidence.item()
|
| 92 |
+
|
| 93 |
+
print(f"✅ AI Verdict: {raw_label} -> {label} ({score*100:.1f}%)")
|
| 94 |
+
return label, score
|
| 95 |
+
|
| 96 |
+
except Exception as e:
|
| 97 |
+
print(f"❌ AUDIO ERROR: {e}")
|
| 98 |
+
traceback.print_exc()
|
| 99 |
+
return "ERROR", 0.0
|
| 100 |
+
|
| 101 |
+
if __name__ == "__main__":
|
| 102 |
+
detector = AudioDeepfakeDetector()
|
combined_detect.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import subprocess
|
| 3 |
+
import concurrent.futures
|
| 4 |
+
import imageio_ffmpeg # This automatically installed when you installed moviepy
|
| 5 |
+
|
| 6 |
+
class CombinedDeepfakeDetector:
|
| 7 |
+
def __init__(self, audio_detector, video_detector, extractor):
|
| 8 |
+
self.audio_detector = audio_detector
|
| 9 |
+
self.video_detector = video_detector
|
| 10 |
+
self.extractor = extractor
|
| 11 |
+
|
| 12 |
+
def analyze_audio(self, video_path):
|
| 13 |
+
temp_audio_path = "uploads/temp_combined_audio.wav"
|
| 14 |
+
try:
|
| 15 |
+
if os.path.exists(temp_audio_path):
|
| 16 |
+
os.remove(temp_audio_path)
|
| 17 |
+
|
| 18 |
+
# ⚡ ULTRA-FAST AUDIO EXTRACTION
|
| 19 |
+
# We command FFmpeg to instantly rip just the first 4 seconds
|
| 20 |
+
# and format it perfectly for the AI model natively.
|
| 21 |
+
ffmpeg_exe = imageio_ffmpeg.get_ffmpeg_exe()
|
| 22 |
+
command = [
|
| 23 |
+
ffmpeg_exe,
|
| 24 |
+
"-y", # Overwrite existing files
|
| 25 |
+
"-i", video_path, # Input video
|
| 26 |
+
"-t", "4", # ⚡ STOP reading after exactly 4 seconds
|
| 27 |
+
"-vn", # ⚡ Ignore video stream completely
|
| 28 |
+
"-acodec", "pcm_s16le", # Format as standard WAV
|
| 29 |
+
"-ar", "16000", # ⚡ Pre-resample to 16kHz so Python doesn't have to
|
| 30 |
+
temp_audio_path
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
# Run the command silently
|
| 34 |
+
subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
| 35 |
+
|
| 36 |
+
if os.path.exists(temp_audio_path):
|
| 37 |
+
verdict, _ = self.audio_detector.predict(temp_audio_path)
|
| 38 |
+
os.remove(temp_audio_path)
|
| 39 |
+
return verdict
|
| 40 |
+
else:
|
| 41 |
+
return "NO AUDIO FOUND"
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"❌ Fast Audio Extraction Error: {e}")
|
| 44 |
+
return "ERROR"
|
| 45 |
+
|
| 46 |
+
def analyze_video(self, video_path):
|
| 47 |
+
image_paths = self.extractor.extract(video_path)
|
| 48 |
+
if not image_paths:
|
| 49 |
+
return "ERROR"
|
| 50 |
+
|
| 51 |
+
# ⚡ OPTIMIZATION 1: SMART SAMPLING
|
| 52 |
+
max_frames_to_check = 10
|
| 53 |
+
if len(image_paths) > max_frames_to_check:
|
| 54 |
+
step = len(image_paths) // max_frames_to_check
|
| 55 |
+
image_paths = image_paths[::step][:max_frames_to_check]
|
| 56 |
+
|
| 57 |
+
total_frames = len(image_paths)
|
| 58 |
+
fake_votes = 0
|
| 59 |
+
threshold = total_frames * 0.51
|
| 60 |
+
|
| 61 |
+
for i, img_path in enumerate(image_paths):
|
| 62 |
+
result, _ = self.video_detector.predict(img_path)
|
| 63 |
+
if result == "DEEPFAKE DETECTED":
|
| 64 |
+
fake_votes += 1
|
| 65 |
+
|
| 66 |
+
# ⚡ OPTIMIZATION 2 & 3: EARLY STOPPING
|
| 67 |
+
if fake_votes > threshold:
|
| 68 |
+
return "DEEPFAKE DETECTED"
|
| 69 |
+
|
| 70 |
+
remaining_frames = total_frames - (i + 1)
|
| 71 |
+
if (fake_votes + remaining_frames) < threshold:
|
| 72 |
+
return "REAL"
|
| 73 |
+
|
| 74 |
+
return "DEEPFAKE DETECTED" if fake_votes > threshold else "REAL"
|
| 75 |
+
|
| 76 |
+
def predict(self, video_path):
|
| 77 |
+
print(f"🔍 Starting ULTRA-FAST Combined Analysis for: {video_path}")
|
| 78 |
+
|
| 79 |
+
# Parallel execution: Audio and Video run at the exact same time
|
| 80 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 81 |
+
audio_future = executor.submit(self.analyze_audio, video_path)
|
| 82 |
+
video_future = executor.submit(self.analyze_video, video_path)
|
| 83 |
+
|
| 84 |
+
audio_verdict = audio_future.result()
|
| 85 |
+
video_verdict = video_future.result()
|
| 86 |
+
|
| 87 |
+
is_fake = (audio_verdict == "DEEPFAKE DETECTED") or (video_verdict == "DEEPFAKE DETECTED")
|
| 88 |
+
final_verdict = "DEEPFAKE DETECTED" if is_fake else "REAL"
|
| 89 |
+
|
| 90 |
+
details = f"""
|
| 91 |
+
<div style='text-align: left; background: #f8f9fa; padding: 15px; border-radius: 8px; margin-top: 15px; color: #333; border: 1px solid #ddd;'>
|
| 92 |
+
<h4 style='margin-top:0; margin-bottom: 10px;'>Combined Analysis Breakdown:</h4>
|
| 93 |
+
<ul style='list-style-type: none; padding-left: 0; margin-bottom: 0;'>
|
| 94 |
+
<li style='margin-bottom: 5px;'>🎤 <b>Background Audio:</b> {audio_verdict}</li>
|
| 95 |
+
<li>🎞️ <b>Visual Frames:</b> {video_verdict} (Fast Scan)</li>
|
| 96 |
+
</ul>
|
| 97 |
+
</div>
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
return final_verdict, details
|
frame_extractor.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import os
|
| 3 |
+
import shutil
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
class FrameExtractor:
|
| 7 |
+
def extract(self, video_path):
|
| 8 |
+
"""
|
| 9 |
+
Crash-Proof Extraction Logic:
|
| 10 |
+
- Short Videos (< 25s): Extract 5 frames.
|
| 11 |
+
- Long Videos: Extract evenly spaced frames, but NEVER more than 30.
|
| 12 |
+
"""
|
| 13 |
+
cap = cv2.VideoCapture(video_path)
|
| 14 |
+
if not cap.isOpened():
|
| 15 |
+
print(f"Error: Could not open video {video_path}")
|
| 16 |
+
return []
|
| 17 |
+
|
| 18 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 19 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 20 |
+
|
| 21 |
+
if total_frames <= 0 or fps <= 0:
|
| 22 |
+
print("Error: Video file seems corrupted or empty.")
|
| 23 |
+
return []
|
| 24 |
+
|
| 25 |
+
duration = total_frames / fps
|
| 26 |
+
print(f"🎬 Video Duration: {duration:.1f}s ({total_frames} frames)")
|
| 27 |
+
|
| 28 |
+
# --- SMART SELECTION LOGIC ---
|
| 29 |
+
MAX_FRAMES = 30 # Safety Limit (Prevents Crashes)
|
| 30 |
+
|
| 31 |
+
target_indices = []
|
| 32 |
+
|
| 33 |
+
if duration < 25:
|
| 34 |
+
# Short Video: Force exactly 5 frames
|
| 35 |
+
count = 5
|
| 36 |
+
else:
|
| 37 |
+
# Long Video: Cap at 30 frames max
|
| 38 |
+
count = MAX_FRAMES
|
| 39 |
+
|
| 40 |
+
# Select 'count' frames spread evenly from start to end
|
| 41 |
+
# np.linspace handles the math perfectly
|
| 42 |
+
target_indices = np.linspace(0, total_frames - 1, count, dtype=int).tolist()
|
| 43 |
+
|
| 44 |
+
# --- EXTRACTION LOOP ---
|
| 45 |
+
extracted_files = []
|
| 46 |
+
|
| 47 |
+
base_name = os.path.splitext(os.path.basename(video_path))[0]
|
| 48 |
+
output_dir = os.path.join(os.path.dirname(video_path), base_name + "_frames")
|
| 49 |
+
|
| 50 |
+
if os.path.exists(output_dir):
|
| 51 |
+
shutil.rmtree(output_dir)
|
| 52 |
+
os.makedirs(output_dir)
|
| 53 |
+
|
| 54 |
+
saved_count = 0
|
| 55 |
+
|
| 56 |
+
for frame_idx in target_indices:
|
| 57 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
|
| 58 |
+
success, frame = cap.read()
|
| 59 |
+
|
| 60 |
+
if success:
|
| 61 |
+
output_filename = f"frame_{saved_count}.jpg"
|
| 62 |
+
output_path = os.path.join(output_dir, output_filename)
|
| 63 |
+
|
| 64 |
+
try:
|
| 65 |
+
cv2.imwrite(output_path, frame)
|
| 66 |
+
extracted_files.append(output_path)
|
| 67 |
+
saved_count += 1
|
| 68 |
+
except Exception as e:
|
| 69 |
+
print(f"Warning: Failed to save frame {saved_count}: {e}")
|
| 70 |
+
|
| 71 |
+
cap.release()
|
| 72 |
+
print(f"✅ Extraction Complete. Analyzed {len(extracted_files)} frames.")
|
| 73 |
+
return extracted_files
|
image_detect.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from transformers import CLIPProcessor, CLIPModel
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
class ImageDeepfakeDetector:
|
| 6 |
+
def __init__(self, local_model_path="./local_clip_model"):
|
| 7 |
+
print("⚡ Loading Image AI Model...")
|
| 8 |
+
try:
|
| 9 |
+
self.model = CLIPModel.from_pretrained(local_model_path)
|
| 10 |
+
self.processor = CLIPProcessor.from_pretrained(local_model_path)
|
| 11 |
+
print("✅ Image Model Ready.")
|
| 12 |
+
except Exception as e:
|
| 13 |
+
print(f"❌ Error: {e}")
|
| 14 |
+
print("Run 'setup_model.py' first.")
|
| 15 |
+
exit()
|
| 16 |
+
|
| 17 |
+
def predict(self, image_path):
|
| 18 |
+
try:
|
| 19 |
+
image = Image.open(image_path)
|
| 20 |
+
|
| 21 |
+
# --- THE "PHYSICS" FIX ---
|
| 22 |
+
# We are now detecting "Camera Physics" vs "Generative Patterns"
|
| 23 |
+
labels = [
|
| 24 |
+
# === REAL (Camera Physics) ===
|
| 25 |
+
# Real cameras leave specific traces: noise, focus fall-off, organic skin.
|
| 26 |
+
"photo taken with a canon or nikon dslr camera", # Specific camera brand hints
|
| 27 |
+
"real human skin with natural pores and imperfections", # Texture check
|
| 28 |
+
"authentic photograph with natural lighting shadows", # Lighting physics
|
| 29 |
+
"candid photo from a smartphone camera", # Lower quality real
|
| 30 |
+
"high resolution raw photography", # HD Real
|
| 31 |
+
|
| 32 |
+
# === FAKE (Generative Patterns) ===
|
| 33 |
+
# AI generators leave specific traces: overly smooth, distorted backgrounds.
|
| 34 |
+
"ai generated image by midjourney", # Specific Generator
|
| 35 |
+
"stable diffusion synthetic image", # Specific Generator
|
| 36 |
+
"deepfake face swap with artifacts", # Manipulation
|
| 37 |
+
"computer generated 3d character render", # CGI look
|
| 38 |
+
"perfectly smooth plastic skin texture", # The "AI Glow"
|
| 39 |
+
"unnatural distorted background details" # Background check
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
inputs = self.processor(
|
| 43 |
+
text=labels,
|
| 44 |
+
images=image,
|
| 45 |
+
return_tensors="pt",
|
| 46 |
+
padding=True
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
with torch.no_grad():
|
| 50 |
+
outputs = self.model(**inputs)
|
| 51 |
+
|
| 52 |
+
probs = outputs.logits_per_image.softmax(dim=1)
|
| 53 |
+
scores = probs.tolist()[0]
|
| 54 |
+
|
| 55 |
+
# --- SCORING ---
|
| 56 |
+
# Real = Indices 0 to 4 (5 labels)
|
| 57 |
+
real_score = sum(scores[:5])
|
| 58 |
+
|
| 59 |
+
# Fake = Indices 5 to 10 (6 labels)
|
| 60 |
+
fake_score = sum(scores[5:])
|
| 61 |
+
|
| 62 |
+
# --- "UNCERTAINTY" LOGIC ---
|
| 63 |
+
# If the scores are very close (e.g., 49% Real vs 51% Fake),
|
| 64 |
+
# it means the AI is guessing. In court, "Innocent until proven guilty".
|
| 65 |
+
# So, if the gap is small (< 5%), we assume it's REAL.
|
| 66 |
+
|
| 67 |
+
score_diff = abs(real_score - fake_score)
|
| 68 |
+
|
| 69 |
+
if fake_score > real_score:
|
| 70 |
+
# It thinks it's fake, but is it sure?
|
| 71 |
+
if score_diff < 0.05:
|
| 72 |
+
# Not sure enough -> Call it Real (but low confidence)
|
| 73 |
+
return "REAL", real_score
|
| 74 |
+
else:
|
| 75 |
+
return "DEEPFAKE DETECTED", fake_score
|
| 76 |
+
else:
|
| 77 |
+
return "REAL", real_score
|
| 78 |
+
|
| 79 |
+
except Exception as e:
|
| 80 |
+
print(f"Error predicting {image_path}: {e}")
|
| 81 |
+
return "ERROR", 0.0
|
requirement.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
transformers
|
| 3 |
+
torch
|
| 4 |
+
pillow
|
| 5 |
+
opencv-python
|
| 6 |
+
numpy
|
setup_model.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from transformers import CLIPProcessor, CLIPModel
|
| 3 |
+
|
| 4 |
+
# Define where we want to save the model locally
|
| 5 |
+
MODEL_PATH = "./local_clip_model"
|
| 6 |
+
|
| 7 |
+
def download_and_save_model():
|
| 8 |
+
print("⏳ Downloading OpenAI CLIP model... (This may take a minute)")
|
| 9 |
+
|
| 10 |
+
model_name = "openai/clip-vit-base-patch32"
|
| 11 |
+
|
| 12 |
+
# Download Model and Processor
|
| 13 |
+
model = CLIPModel.from_pretrained(model_name)
|
| 14 |
+
processor = CLIPProcessor.from_pretrained(model_name)
|
| 15 |
+
|
| 16 |
+
# Save them to the local directory
|
| 17 |
+
if not os.path.exists(MODEL_PATH):
|
| 18 |
+
os.makedirs(MODEL_PATH)
|
| 19 |
+
|
| 20 |
+
model.save_pretrained(MODEL_PATH)
|
| 21 |
+
processor.save_pretrained(MODEL_PATH)
|
| 22 |
+
|
| 23 |
+
print(f"✅ Model successfully saved to '{MODEL_PATH}'")
|
| 24 |
+
print("You can now run the project offline.")
|
| 25 |
+
|
| 26 |
+
if __name__ == "__main__":
|
| 27 |
+
download_and_save_model()
|
static/avatars/Female1.png
ADDED
|
|
Git LFS Details
|
static/avatars/Female2.png
ADDED
|
|
Git LFS Details
|
static/avatars/Male1.png
ADDED
|
|
Git LFS Details
|
static/avatars/Male2.png
ADDED
|
|
Git LFS Details
|
templates/index.html
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!doctype html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>TrueFrame - Dashboard</title>
|
| 7 |
+
<style>
|
| 8 |
+
:root {
|
| 9 |
+
--bg-dark: #0b111e;
|
| 10 |
+
--card-bg: #141c2b;
|
| 11 |
+
--card-hover: #1c273b;
|
| 12 |
+
--primary-cyan: #00e5ff;
|
| 13 |
+
--primary-blue: #0077ff;
|
| 14 |
+
--text-main: #ffffff;
|
| 15 |
+
--text-muted: #8b9bb4;
|
| 16 |
+
--border-color: rgba(255, 255, 255, 0.1);
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
body {
|
| 20 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
| 21 |
+
background-color: var(--bg-dark);
|
| 22 |
+
background-image:
|
| 23 |
+
radial-gradient(circle at 20% 20%, rgba(19, 54, 75, 0.945) 0%, transparent 50%),
|
| 24 |
+
radial-gradient(circle at 80% 80%, rgba(13, 42, 70, 0.801) 0%, transparent 50%);
|
| 25 |
+
color: var(--text-main);
|
| 26 |
+
margin: 0;
|
| 27 |
+
min-height: 100vh;
|
| 28 |
+
display: flex;
|
| 29 |
+
flex-direction: column;
|
| 30 |
+
align-items: center;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
/* --- Navbar & Profile Dropdown --- */
|
| 34 |
+
.navbar {
|
| 35 |
+
width: 100%;
|
| 36 |
+
padding: 15px 40px;
|
| 37 |
+
display: flex;
|
| 38 |
+
justify-content: space-between;
|
| 39 |
+
align-items: center;
|
| 40 |
+
box-sizing: border-box;
|
| 41 |
+
background-color: transparent;
|
| 42 |
+
|
| 43 |
+
backdrop-filter: blur(10px);
|
| 44 |
+
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
.brand {
|
| 48 |
+
font-size: 1.5em;
|
| 49 |
+
font-weight: bold;
|
| 50 |
+
color: var(--text-main);
|
| 51 |
+
letter-spacing: 1px;
|
| 52 |
+
display: flex;
|
| 53 |
+
align-items: center;
|
| 54 |
+
gap: 0px;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
.brand span { color: var(--primary-cyan); }
|
| 58 |
+
|
| 59 |
+
.profile-container {
|
| 60 |
+
position: relative;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
.avatar-btn {
|
| 64 |
+
width: 45px;
|
| 65 |
+
height: 45px;
|
| 66 |
+
border-radius: 50%;
|
| 67 |
+
background: linear-gradient(135deg, var(--primary-cyan), var(--primary-blue));
|
| 68 |
+
display: flex;
|
| 69 |
+
justify-content: center;
|
| 70 |
+
align-items: center;
|
| 71 |
+
cursor: pointer;
|
| 72 |
+
border: 2px solid transparent;
|
| 73 |
+
transition: 0.3s;
|
| 74 |
+
color: white;
|
| 75 |
+
font-weight: bold;
|
| 76 |
+
font-size: 1.2em;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
.avatar-btn:hover {
|
| 80 |
+
border-color: var(--primary-cyan);
|
| 81 |
+
box-shadow: 0 0 15px rgba(0, 229, 255, 0.4);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
.dropdown-menu {
|
| 85 |
+
position: absolute;
|
| 86 |
+
top: 60px;
|
| 87 |
+
right: 0;
|
| 88 |
+
background: var(--card-bg);
|
| 89 |
+
border: 1px solid var(--border-color);
|
| 90 |
+
border-radius: 12px;
|
| 91 |
+
width: 250px;
|
| 92 |
+
padding: 15px;
|
| 93 |
+
box-shadow: 0 10px 30px rgba(0,0,0,0.5);
|
| 94 |
+
display: none;
|
| 95 |
+
flex-direction: column;
|
| 96 |
+
opacity: 0;
|
| 97 |
+
transform: translateY(-10px);
|
| 98 |
+
transition: all 0.3s ease;
|
| 99 |
+
z-index: 100;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
.dropdown-menu.active {
|
| 103 |
+
display: flex;
|
| 104 |
+
opacity: 1;
|
| 105 |
+
transform: translateY(0);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
.dropdown-header {
|
| 109 |
+
display: flex;
|
| 110 |
+
align-items: center;
|
| 111 |
+
gap: 15px;
|
| 112 |
+
padding-bottom: 15px;
|
| 113 |
+
border-bottom: 1px solid var(--border-color);
|
| 114 |
+
margin-bottom: 15px;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
.dropdown-header .avatar-lg {
|
| 118 |
+
width: 50px;
|
| 119 |
+
height: 50px;
|
| 120 |
+
border-radius: 50%;
|
| 121 |
+
background: linear-gradient(135deg, var(--primary-cyan), var(--primary-blue));
|
| 122 |
+
display: flex;
|
| 123 |
+
justify-content: center;
|
| 124 |
+
align-items: center;
|
| 125 |
+
font-size: 1.5em;
|
| 126 |
+
font-weight: bold;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
.dropdown-header-info h4 { margin: 0 0 5px 0; font-size: 16px; }
|
| 130 |
+
.dropdown-header-info p { margin: 0; font-size: 12px; color: var(--text-muted); }
|
| 131 |
+
|
| 132 |
+
.logout-btn {
|
| 133 |
+
background: rgba(255, 59, 48, 0.1);
|
| 134 |
+
color: #ff3b30;
|
| 135 |
+
text-decoration: none;
|
| 136 |
+
padding: 10px;
|
| 137 |
+
border-radius: 8px;
|
| 138 |
+
text-align: center;
|
| 139 |
+
font-weight: bold;
|
| 140 |
+
transition: 0.3s;
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
.logout-btn:hover { background: #ff3b30; color: white; }
|
| 144 |
+
|
| 145 |
+
/* --- Main Content --- */
|
| 146 |
+
.main-container {
|
| 147 |
+
margin-top: 50px;
|
| 148 |
+
width: 100%;
|
| 149 |
+
max-width: 800px;
|
| 150 |
+
padding: 20px;
|
| 151 |
+
box-sizing: border-box;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
.header-titles { text-align: center; margin-bottom: 40px; }
|
| 155 |
+
.header-titles h1 { font-size: 2.5em; margin: 0 0 10px 0; }
|
| 156 |
+
.header-titles p { color: var(--text-muted); font-size: 1.1em; margin: 0; }
|
| 157 |
+
|
| 158 |
+
/* --- File Upload Area --- */
|
| 159 |
+
.upload-area {
|
| 160 |
+
background: var(--card-bg);
|
| 161 |
+
border: 2px dashed var(--text-muted);
|
| 162 |
+
border-radius: 16px;
|
| 163 |
+
padding: 40px 20px;
|
| 164 |
+
text-align: center;
|
| 165 |
+
cursor: pointer;
|
| 166 |
+
transition: all 0.3s;
|
| 167 |
+
margin-bottom: 30px;
|
| 168 |
+
position: relative;
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
.upload-area:hover {
|
| 172 |
+
border-color: var(--primary-cyan);
|
| 173 |
+
background: var(--card-hover);
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
.upload-area input[type="file"] {
|
| 177 |
+
position: absolute; width: 100%; height: 100%; top: 0; left: 0;
|
| 178 |
+
opacity: 0; cursor: pointer;
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
.upload-icon { font-size: 3em; margin-bottom: 10px; display: block; }
|
| 182 |
+
.upload-text { font-size: 1.2em; color: var(--text-main); font-weight: 500; }
|
| 183 |
+
.upload-subtext { display: block; color: var(--text-muted); margin-top: 5px; font-size: 0.9em; }
|
| 184 |
+
|
| 185 |
+
#fileName { margin-top: 15px; color: var(--primary-cyan); font-weight: bold; }
|
| 186 |
+
|
| 187 |
+
/* --- 4 Feature Cards --- */
|
| 188 |
+
.cards-grid {
|
| 189 |
+
display: grid;
|
| 190 |
+
grid-template-columns: repeat(2, 1fr);
|
| 191 |
+
gap: 20px;
|
| 192 |
+
margin-bottom: 40px;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
.feature-card-label { cursor: pointer; display: block; }
|
| 196 |
+
|
| 197 |
+
.feature-card-label input[type="radio"] { display: none; }
|
| 198 |
+
|
| 199 |
+
.feature-card {
|
| 200 |
+
background: var(--card-bg);
|
| 201 |
+
border: 2px solid var(--border-color);
|
| 202 |
+
border-radius: 16px;
|
| 203 |
+
padding: 25px 20px;
|
| 204 |
+
display: flex;
|
| 205 |
+
align-items: center;
|
| 206 |
+
gap: 15px;
|
| 207 |
+
transition: all 0.3s;
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
.feature-card:hover { transform: translateY(-3px); border-color: rgba(0, 229, 255, 0.4); }
|
| 211 |
+
|
| 212 |
+
.card-icon {
|
| 213 |
+
font-size: 2em;
|
| 214 |
+
background: rgba(255,255,255,0.05);
|
| 215 |
+
width: 60px; height: 60px;
|
| 216 |
+
display: flex; justify-content: center; align-items: center;
|
| 217 |
+
border-radius: 12px;
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
.card-info h3 { margin: 0 0 5px 0; font-size: 1.2em; }
|
| 221 |
+
.card-info p { margin: 0; color: var(--text-muted); font-size: 0.9em; }
|
| 222 |
+
|
| 223 |
+
/* Magic happens here: When radio is checked, style the card */
|
| 224 |
+
.feature-card-label input[type="radio"]:checked + .feature-card {
|
| 225 |
+
border-color: var(--primary-cyan);
|
| 226 |
+
background: linear-gradient(to bottom right, var(--card-bg), rgba(0, 229, 255, 0.05));
|
| 227 |
+
box-shadow: 0 0 20px rgba(0, 229, 255, 0.15);
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
.feature-card-label input[type="radio"]:checked + .feature-card .card-icon {
|
| 231 |
+
background: rgba(0, 229, 255, 0.1);
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
/* --- Submit Button --- */
|
| 235 |
+
.submit-btn {
|
| 236 |
+
background: linear-gradient(90deg, var(--primary-cyan), var(--primary-blue));
|
| 237 |
+
color: white;
|
| 238 |
+
border: none;
|
| 239 |
+
padding: 16px 40px;
|
| 240 |
+
border-radius: 30px;
|
| 241 |
+
font-size: 1.2em;
|
| 242 |
+
font-weight: bold;
|
| 243 |
+
cursor: pointer;
|
| 244 |
+
width: 100%;
|
| 245 |
+
transition: 0.3s;
|
| 246 |
+
box-shadow: 0 8px 20px rgba(0, 119, 255, 0.3);
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
.submit-btn:hover {
|
| 250 |
+
transform: scale(1.02);
|
| 251 |
+
box-shadow: 0 10px 25px rgba(0, 229, 255, 0.5);
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
/* Responsive */
|
| 255 |
+
@media (max-width: 600px) {
|
| 256 |
+
.cards-grid { grid-template-columns: 1fr; }
|
| 257 |
+
.navbar { padding: 15px 20px; }
|
| 258 |
+
}
|
| 259 |
+
</style>
|
| 260 |
+
</head>
|
| 261 |
+
<body>
|
| 262 |
+
|
| 263 |
+
<nav class="navbar">
|
| 264 |
+
<div class="brand">True<span>Frame</span></div>
|
| 265 |
+
|
| 266 |
+
<div class="profile-container">
|
| 267 |
+
<div class="avatar-btn" onclick="toggleDropdown()" id="userInitial">U</div>
|
| 268 |
+
|
| 269 |
+
<div class="dropdown-menu" id="profileDropdown">
|
| 270 |
+
<div class="dropdown-header">
|
| 271 |
+
<div class="avatar-lg" id="dropdownInitial">U</div>
|
| 272 |
+
<div class="dropdown-header-info">
|
| 273 |
+
<h4>{{ user_name }}</h4>
|
| 274 |
+
<p>Authenticated User</p>
|
| 275 |
+
</div>
|
| 276 |
+
</div>
|
| 277 |
+
<a href="/logout" class="logout-btn">Log Out</a>
|
| 278 |
+
</div>
|
| 279 |
+
</div>
|
| 280 |
+
</nav>
|
| 281 |
+
|
| 282 |
+
<div class="main-container">
|
| 283 |
+
|
| 284 |
+
<div class="header-titles">
|
| 285 |
+
<h1>Authenticate Media.</h1>
|
| 286 |
+
<p>Upload your file and select a detection model.</p>
|
| 287 |
+
</div>
|
| 288 |
+
|
| 289 |
+
<form method="POST" enctype="multipart/form-data">
|
| 290 |
+
|
| 291 |
+
<div class="upload-area">
|
| 292 |
+
<input type="file" name="file" required id="fileInput">
|
| 293 |
+
<span class="upload-icon">📂</span>
|
| 294 |
+
<span class="upload-text">Click or drag file to upload</span>
|
| 295 |
+
<span class="upload-subtext">Supports MP4, AVI, MP3, WAV, JPG, PNG</span>
|
| 296 |
+
<div id="fileName"></div>
|
| 297 |
+
</div>
|
| 298 |
+
|
| 299 |
+
<div class="cards-grid">
|
| 300 |
+
|
| 301 |
+
<label class="feature-card-label">
|
| 302 |
+
<input type="radio" name="mode" value="video" checked>
|
| 303 |
+
<div class="feature-card">
|
| 304 |
+
<div class="card-icon">🎞️</div>
|
| 305 |
+
<div class="card-info">
|
| 306 |
+
<h3>Video Scan</h3>
|
| 307 |
+
<p>Analyzes visual frames for manipulation</p>
|
| 308 |
+
</div>
|
| 309 |
+
</div>
|
| 310 |
+
</label>
|
| 311 |
+
|
| 312 |
+
<label class="feature-card-label">
|
| 313 |
+
<input type="radio" name="mode" value="image">
|
| 314 |
+
<div class="feature-card">
|
| 315 |
+
<div class="card-icon">🖼️</div>
|
| 316 |
+
<div class="card-info">
|
| 317 |
+
<h3>Image Scan</h3>
|
| 318 |
+
<p>Detects AI generated photos and faces</p>
|
| 319 |
+
</div>
|
| 320 |
+
</div>
|
| 321 |
+
</label>
|
| 322 |
+
|
| 323 |
+
<label class="feature-card-label">
|
| 324 |
+
<input type="radio" name="mode" value="audio">
|
| 325 |
+
<div class="feature-card">
|
| 326 |
+
<div class="card-icon">🎙️</div>
|
| 327 |
+
<div class="card-info">
|
| 328 |
+
<h3>Audio Scan</h3>
|
| 329 |
+
<p>Detects voice cloning and AI audio</p>
|
| 330 |
+
</div>
|
| 331 |
+
</div>
|
| 332 |
+
</label>
|
| 333 |
+
|
| 334 |
+
<label class="feature-card-label">
|
| 335 |
+
<input type="radio" name="mode" value="combined">
|
| 336 |
+
<div class="feature-card">
|
| 337 |
+
<div class="card-icon">⚡</div>
|
| 338 |
+
<div class="card-info">
|
| 339 |
+
<h3>Deep Scan</h3>
|
| 340 |
+
<p>Combined Video + Audio analysis</p>
|
| 341 |
+
</div>
|
| 342 |
+
</div>
|
| 343 |
+
</label>
|
| 344 |
+
|
| 345 |
+
</div>
|
| 346 |
+
|
| 347 |
+
<button type="submit" class="submit-btn">Analyze Media</button>
|
| 348 |
+
</form>
|
| 349 |
+
|
| 350 |
+
</div>
|
| 351 |
+
|
| 352 |
+
<script>
|
| 353 |
+
// Set User Initials for Profile Picture dynamically
|
| 354 |
+
const userName = "{{ user_name }}";
|
| 355 |
+
const initial = userName !== "" && userName !== "{{ user_name }}" ? userName.charAt(0).toUpperCase() : "U";
|
| 356 |
+
document.getElementById("userInitial").innerText = initial;
|
| 357 |
+
document.getElementById("dropdownInitial").innerText = initial;
|
| 358 |
+
|
| 359 |
+
// Display File Name on Upload
|
| 360 |
+
document.getElementById('fileInput').addEventListener('change', function(e) {
|
| 361 |
+
const fileName = e.target.files[0] ? e.target.files[0].name : '';
|
| 362 |
+
if(fileName) {
|
| 363 |
+
document.getElementById('fileName').innerHTML = `✅ Selected: ${fileName}`;
|
| 364 |
+
}
|
| 365 |
+
});
|
| 366 |
+
|
| 367 |
+
// Toggle Profile Dropdown
|
| 368 |
+
function toggleDropdown() {
|
| 369 |
+
const dropdown = document.getElementById("profileDropdown");
|
| 370 |
+
dropdown.classList.toggle("active");
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
// Close dropdown if clicked outside
|
| 374 |
+
window.addEventListener('click', function(e) {
|
| 375 |
+
const profileContainer = document.querySelector('.profile-container');
|
| 376 |
+
const dropdown = document.getElementById("profileDropdown");
|
| 377 |
+
if (!profileContainer.contains(e.target)) {
|
| 378 |
+
dropdown.classList.remove("active");
|
| 379 |
+
}
|
| 380 |
+
});
|
| 381 |
+
</script>
|
| 382 |
+
</body>
|
| 383 |
+
</html>
|
templates/login.html
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Login - TrueFrame</title>
|
| 7 |
+
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700&display=swap" rel="stylesheet">
|
| 8 |
+
<style>
|
| 9 |
+
/* --- CSS Reset and Variables --- */
|
| 10 |
+
* {
|
| 11 |
+
margin: 0;
|
| 12 |
+
padding: 0;
|
| 13 |
+
box-sizing: border-box;
|
| 14 |
+
font-family: 'Poppins', sans-serif;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
:root {
|
| 18 |
+
--bg-dark: #0a0e17;
|
| 19 |
+
--panel-dark: #131c2a;
|
| 20 |
+
--accent-primary: #00f2fe;
|
| 21 |
+
--accent-secondary: #4facfe;
|
| 22 |
+
--error-color: #ff4b4b;
|
| 23 |
+
--text-light: #e0e0e0;
|
| 24 |
+
--text-muted: #8d97a5;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
body {
|
| 28 |
+
height: 100vh;
|
| 29 |
+
display: flex;
|
| 30 |
+
justify-content: center;
|
| 31 |
+
align-items: center;
|
| 32 |
+
background-color: var(--bg-dark);
|
| 33 |
+
background-image:
|
| 34 |
+
radial-gradient(circle at 20% 20%, rgba(19, 54, 75, 0.945) 0%, transparent 50%),
|
| 35 |
+
radial-gradient(circle at 80% 80%, rgba(13, 42, 70, 0.801) 0%, transparent 50%);
|
| 36 |
+
overflow: hidden;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
/* --- Main Container --- */
|
| 40 |
+
.login-container {
|
| 41 |
+
display: flex;
|
| 42 |
+
width: 900px;
|
| 43 |
+
max-width: 95%;
|
| 44 |
+
height: 600px;
|
| 45 |
+
background-color: var(--panel-dark);
|
| 46 |
+
border-radius: 20px;
|
| 47 |
+
box-shadow: 0 15px 35px rgba(0, 0, 0, 0.5);
|
| 48 |
+
overflow: hidden;
|
| 49 |
+
border: 1px solid rgba(0, 242, 254, 0.1);
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
/* --- Left Side: Visuals --- */
|
| 53 |
+
.visual-side {
|
| 54 |
+
flex: 1;
|
| 55 |
+
background: linear-gradient(135deg, rgba(0, 242, 254, 0.1), rgba(79, 172, 254, 0.2));
|
| 56 |
+
display: flex;
|
| 57 |
+
flex-direction: column;
|
| 58 |
+
justify-content: center;
|
| 59 |
+
align-items: center;
|
| 60 |
+
position: relative;
|
| 61 |
+
overflow: hidden;
|
| 62 |
+
padding: 40px;
|
| 63 |
+
color: var(--text-light);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
.visual-content h2 {
|
| 67 |
+
font-size: 2rem;
|
| 68 |
+
font-weight: 700;
|
| 69 |
+
margin-bottom: 10px;
|
| 70 |
+
text-align: center;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
.visual-content h2 span {
|
| 74 |
+
color: var(--accent-primary);
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
.visual-content p {
|
| 78 |
+
color: var(--text-muted);
|
| 79 |
+
font-size: 0.9rem;
|
| 80 |
+
text-align: center;
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
/* --- INTEGRATED SCANNER CSS --- */
|
| 84 |
+
.scanner-graphic {
|
| 85 |
+
margin-top: 40px;
|
| 86 |
+
position: relative;
|
| 87 |
+
width: 150px;
|
| 88 |
+
height: 150px;
|
| 89 |
+
background-color: rgba(0, 0, 0, 0.2);
|
| 90 |
+
border-radius: 50%;
|
| 91 |
+
overflow: hidden;
|
| 92 |
+
box-shadow: 0 0 20px rgba(0, 242, 254, 0.2);
|
| 93 |
+
border: 2px solid rgba(0, 242, 254, 0.2);
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
#character-img {
|
| 97 |
+
width: 100%;
|
| 98 |
+
height: 100%;
|
| 99 |
+
object-fit: cover;
|
| 100 |
+
opacity: 0.6; /* Set to 0.4 for holographic effect */
|
| 101 |
+
transition: opacity 0.3s ease;
|
| 102 |
+
mix-blend-mode: screen;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
.scan-beam {
|
| 106 |
+
position: absolute;
|
| 107 |
+
top: 0;
|
| 108 |
+
left: -10%;
|
| 109 |
+
width: 120%;
|
| 110 |
+
height: 3px;
|
| 111 |
+
background: var(--accent-primary);
|
| 112 |
+
box-shadow: 0 0 15px var(--accent-primary), 0 0 30px var(--accent-secondary);
|
| 113 |
+
animation: scanning 2.5s linear infinite;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
@keyframes scanning {
|
| 117 |
+
0% { top: -5%; opacity: 0; }
|
| 118 |
+
5% { opacity: 1; }
|
| 119 |
+
95% { opacity: 1; }
|
| 120 |
+
100% { top: 105%; opacity: 0; }
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
/* --- Right Side: Form --- */
|
| 124 |
+
.form-side {
|
| 125 |
+
flex: 1;
|
| 126 |
+
padding: 50px;
|
| 127 |
+
display: flex;
|
| 128 |
+
flex-direction: column;
|
| 129 |
+
justify-content: center;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
.form-header {
|
| 133 |
+
margin-bottom: 25px;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
.form-header h1 {
|
| 137 |
+
color: var(--text-light);
|
| 138 |
+
font-size: 1.8rem;
|
| 139 |
+
font-weight: 600;
|
| 140 |
+
display: flex;
|
| 141 |
+
align-items: center;
|
| 142 |
+
gap: 10px;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
/* Flask Flashed Messages Styling */
|
| 146 |
+
.error-message {
|
| 147 |
+
background-color: rgba(255, 75, 75, 0.1);
|
| 148 |
+
border-left: 4px solid var(--error-color);
|
| 149 |
+
color: var(--error-color);
|
| 150 |
+
padding: 10px 15px;
|
| 151 |
+
border-radius: 4px;
|
| 152 |
+
font-size: 0.9rem;
|
| 153 |
+
margin-bottom: 20px;
|
| 154 |
+
font-weight: 500;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
/* Input Groups */
|
| 158 |
+
.input-group {
|
| 159 |
+
margin-bottom: 20px;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
.input-group input {
|
| 163 |
+
width: 100%;
|
| 164 |
+
padding: 12px 15px;
|
| 165 |
+
background-color: rgba(255, 255, 255, 0.05);
|
| 166 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 167 |
+
border-radius: 8px;
|
| 168 |
+
color: var(--text-light);
|
| 169 |
+
font-size: 1rem;
|
| 170 |
+
transition: all 0.3s ease;
|
| 171 |
+
box-sizing: border-box;
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
.input-group input::placeholder {
|
| 175 |
+
color: rgba(255, 255, 255, 0.4);
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
.input-group input:focus {
|
| 179 |
+
outline: none;
|
| 180 |
+
border-color: var(--accent-primary);
|
| 181 |
+
box-shadow: 0 0 10px rgba(0, 242, 254, 0.3);
|
| 182 |
+
background-color: rgba(255, 255, 255, 0.08);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/* Buttons */
|
| 186 |
+
.login-btn {
|
| 187 |
+
width: 100%;
|
| 188 |
+
padding: 14px;
|
| 189 |
+
border: none;
|
| 190 |
+
border-radius: 8px;
|
| 191 |
+
background: linear-gradient(to right, var(--accent-secondary), var(--accent-primary));
|
| 192 |
+
color: var(--bg-dark);
|
| 193 |
+
font-size: 1rem;
|
| 194 |
+
font-weight: 600;
|
| 195 |
+
cursor: pointer;
|
| 196 |
+
transition: transform 0.2s, box-shadow 0.2s;
|
| 197 |
+
margin-bottom: 15px;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
.login-btn:hover {
|
| 201 |
+
transform: translateY(-2px);
|
| 202 |
+
box-shadow: 0 5px 15px rgba(0, 242, 254, 0.4);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
/* Divider */
|
| 206 |
+
.divider {
|
| 207 |
+
margin: 20px 0;
|
| 208 |
+
border-top: 1px solid rgba(255, 255, 255, 0.1);
|
| 209 |
+
position: relative;
|
| 210 |
+
text-align: center;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
.divider span {
|
| 214 |
+
position: absolute;
|
| 215 |
+
top: -12px;
|
| 216 |
+
left: 50%;
|
| 217 |
+
transform: translateX(-50%);
|
| 218 |
+
background: var(--panel-dark);
|
| 219 |
+
padding: 0 15px;
|
| 220 |
+
color: var(--text-muted);
|
| 221 |
+
font-size: 0.85rem;
|
| 222 |
+
font-weight: 500;
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
/* Google Button */
|
| 226 |
+
.google-btn {
|
| 227 |
+
display: flex;
|
| 228 |
+
align-items: center;
|
| 229 |
+
justify-content: center;
|
| 230 |
+
width: 100%;
|
| 231 |
+
padding: 12px;
|
| 232 |
+
background: rgba(255, 255, 255, 0.05);
|
| 233 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 234 |
+
border-radius: 8px;
|
| 235 |
+
color: var(--text-light);
|
| 236 |
+
font-size: 1rem;
|
| 237 |
+
font-weight: 500;
|
| 238 |
+
text-decoration: none;
|
| 239 |
+
transition: all 0.3s ease;
|
| 240 |
+
cursor: pointer;
|
| 241 |
+
box-sizing: border-box;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
.google-btn:hover {
|
| 245 |
+
background: rgba(255, 255, 255, 0.1);
|
| 246 |
+
border-color: rgba(255, 255, 255, 0.3);
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
.google-btn img {
|
| 250 |
+
width: 20px;
|
| 251 |
+
margin-right: 12px;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
/* Bottom Links */
|
| 255 |
+
.signup-link {
|
| 256 |
+
text-align: center;
|
| 257 |
+
margin-top: 25px;
|
| 258 |
+
font-size: 0.9rem;
|
| 259 |
+
color: var(--text-muted);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
.signup-link a {
|
| 263 |
+
color: var(--accent-primary);
|
| 264 |
+
text-decoration: none;
|
| 265 |
+
font-weight: 600;
|
| 266 |
+
transition: color 0.2s;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
.signup-link a:hover {
|
| 270 |
+
color: var(--accent-secondary);
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
/* --- Responsive Design --- */
|
| 274 |
+
@media (max-width: 768px) {
|
| 275 |
+
.login-container {
|
| 276 |
+
flex-direction: column;
|
| 277 |
+
height: auto;
|
| 278 |
+
}
|
| 279 |
+
.visual-side {
|
| 280 |
+
padding: 30px 20px;
|
| 281 |
+
min-height: 200px;
|
| 282 |
+
}
|
| 283 |
+
.scanner-graphic {
|
| 284 |
+
display: none;
|
| 285 |
+
}
|
| 286 |
+
.form-side {
|
| 287 |
+
padding: 30px 20px;
|
| 288 |
+
}
|
| 289 |
+
}
|
| 290 |
+
</style>
|
| 291 |
+
</head>
|
| 292 |
+
<body>
|
| 293 |
+
|
| 294 |
+
<div class="login-container">
|
| 295 |
+
<div class="visual-side">
|
| 296 |
+
<div class="visual-content">
|
| 297 |
+
<h2>True<span>Frame</span></h2>
|
| 298 |
+
<p>Authenticating media. Protecting truth.</p>
|
| 299 |
+
</div>
|
| 300 |
+
|
| 301 |
+
<div class="scanner-graphic">
|
| 302 |
+
<img id="character-img" src="" alt="Analyzing Media...">
|
| 303 |
+
<div class="scan-beam"></div>
|
| 304 |
+
</div>
|
| 305 |
+
</div>
|
| 306 |
+
|
| 307 |
+
<div class="form-side">
|
| 308 |
+
<div class="form-header">
|
| 309 |
+
<h1>Login</h1>
|
| 310 |
+
</div>
|
| 311 |
+
|
| 312 |
+
{% with messages = get_flashed_messages() %}
|
| 313 |
+
{% if messages %}
|
| 314 |
+
<div class="error-message">{{ messages[0] }}</div>
|
| 315 |
+
{% endif %}
|
| 316 |
+
{% endwith %}
|
| 317 |
+
|
| 318 |
+
<form method="POST" action="/login">
|
| 319 |
+
|
| 320 |
+
<div class="input-group">
|
| 321 |
+
<input type="email" name="email" placeholder="Email Address" required>
|
| 322 |
+
</div>
|
| 323 |
+
|
| 324 |
+
<div class="input-group">
|
| 325 |
+
<input type="password" name="password" placeholder="Password" required>
|
| 326 |
+
</div>
|
| 327 |
+
|
| 328 |
+
<button type="submit" class="login-btn">Login</button>
|
| 329 |
+
</form>
|
| 330 |
+
|
| 331 |
+
<div class="divider"><span>OR</span></div>
|
| 332 |
+
|
| 333 |
+
<a href="/login/google" class="google-btn">
|
| 334 |
+
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 48 48" width="20px" height="20px" style="margin-right: 12px; display: block;">
|
| 335 |
+
<path fill="#EA4335" d="M24 9.5c3.54 0 6.71 1.22 9.21 3.6l6.85-6.85C35.9 2.38 30.47 0 24 0 14.62 0 6.51 5.38 2.56 13.22l7.98 6.19C12.43 13.72 17.74 9.5 24 9.5z"/>
|
| 336 |
+
<path fill="#4285F4" d="M46.98 24.55c0-1.57-.15-3.09-.38-4.55H24v9.02h12.94c-.58 2.96-2.26 5.48-4.78 7.18l7.73 6c4.51-4.18 7.09-10.36 7.09-17.65z"/>
|
| 337 |
+
<path fill="#FBBC05" d="M10.53 28.59c-.48-1.45-.76-2.99-.76-4.59s.27-3.14.76-4.59l-7.98-6.19C.92 16.46 0 20.12 0 24c0 3.88.92 7.54 2.56 10.78l7.97-6.19z"/>
|
| 338 |
+
<path fill="#34A853" d="M24 48c6.48 0 11.93-2.13 15.89-5.81l-7.73-6c-2.15 1.45-4.92 2.3-8.16 2.3-6.26 0-11.57-4.22-13.47-9.91l-7.98 6.19C6.51 42.62 14.62 48 24 48z"/>
|
| 339 |
+
<path fill="none" d="M0 0h48v48H0z"/>
|
| 340 |
+
</svg>
|
| 341 |
+
Sign in with Google
|
| 342 |
+
</a>
|
| 343 |
+
|
| 344 |
+
<div class="signup-link">
|
| 345 |
+
No account? <a href="/register">Register here</a>
|
| 346 |
+
</div>
|
| 347 |
+
</div>
|
| 348 |
+
</div>
|
| 349 |
+
|
| 350 |
+
<script>
|
| 351 |
+
// 1. Add your relative local paths here
|
| 352 |
+
const maleImages = [
|
| 353 |
+
"{{ url_for('static', filename='avatars/Male1.png') }}",
|
| 354 |
+
"{{ url_for('static', filename='avatars/Male2.png') }}"
|
| 355 |
+
];
|
| 356 |
+
|
| 357 |
+
const femaleImages = [
|
| 358 |
+
"{{ url_for('static', filename='avatars/Female1.png') }}",
|
| 359 |
+
"{{ url_for('static', filename='avatars/Female2.png') }}"
|
| 360 |
+
];
|
| 361 |
+
|
| 362 |
+
let showMaleNext = true;
|
| 363 |
+
const imgElement = document.getElementById('character-img');
|
| 364 |
+
|
| 365 |
+
// 2. Function to pick a random image and alternate genders
|
| 366 |
+
function getRandomImage() {
|
| 367 |
+
let selectedArray = showMaleNext ? maleImages : femaleImages;
|
| 368 |
+
showMaleNext = !showMaleNext;
|
| 369 |
+
|
| 370 |
+
const randomIndex = Math.floor(Math.random() * selectedArray.length);
|
| 371 |
+
return selectedArray[randomIndex];
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
// Initialize the first image
|
| 375 |
+
imgElement.src = getRandomImage();
|
| 376 |
+
|
| 377 |
+
// Loop to match the CSS scanner
|
| 378 |
+
setInterval(() => {
|
| 379 |
+
// Fade out
|
| 380 |
+
imgElement.style.opacity = 0;
|
| 381 |
+
|
| 382 |
+
setTimeout(() => {
|
| 383 |
+
// Swap image
|
| 384 |
+
imgElement.src = getRandomImage();
|
| 385 |
+
|
| 386 |
+
// Fade back in to 0.4
|
| 387 |
+
imgElement.style.opacity = 0.6;
|
| 388 |
+
}, 300);
|
| 389 |
+
|
| 390 |
+
}, 2500);
|
| 391 |
+
</script>
|
| 392 |
+
|
| 393 |
+
</body>
|
| 394 |
+
</html>
|
templates/register.html
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Register - TrueFrame</title>
|
| 7 |
+
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700&display=swap" rel="stylesheet">
|
| 8 |
+
<style>
|
| 9 |
+
/* --- CSS Reset and Variables --- */
|
| 10 |
+
* {
|
| 11 |
+
margin: 0;
|
| 12 |
+
padding: 0;
|
| 13 |
+
box-sizing: border-box;
|
| 14 |
+
font-family: 'Poppins', sans-serif;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
:root {
|
| 18 |
+
--bg-dark: #0a0e17;
|
| 19 |
+
--panel-dark: #131c2a;
|
| 20 |
+
--accent-primary: #00f2fe;
|
| 21 |
+
--accent-secondary: #4facfe;
|
| 22 |
+
--error-color: #ff4b4b;
|
| 23 |
+
--text-light: #e0e0e0;
|
| 24 |
+
--text-muted: #8d97a5;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
body {
|
| 28 |
+
height: 100vh;
|
| 29 |
+
display: flex;
|
| 30 |
+
justify-content: center;
|
| 31 |
+
align-items: center;
|
| 32 |
+
background-color: var(--bg-dark);
|
| 33 |
+
background-color: var(--bg-dark);
|
| 34 |
+
background-image:
|
| 35 |
+
radial-gradient(circle at 20% 20%, rgba(19, 54, 75, 0.945) 0%, transparent 50%),
|
| 36 |
+
radial-gradient(circle at 80% 80%, rgba(13, 42, 70, 0.801) 0%, transparent 50%);
|
| 37 |
+
overflow: hidden;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
/* --- Main Container --- */
|
| 41 |
+
.login-container {
|
| 42 |
+
display: flex;
|
| 43 |
+
width: 900px;
|
| 44 |
+
max-width: 95%;
|
| 45 |
+
height: 600px;
|
| 46 |
+
background-color: var(--panel-dark);
|
| 47 |
+
border-radius: 20px;
|
| 48 |
+
box-shadow: 0 15px 35px rgba(0, 0, 0, 0.5);
|
| 49 |
+
overflow: hidden;
|
| 50 |
+
border: 1px solid rgba(0, 242, 254, 0.1);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
/* --- Left Side: Visuals --- */
|
| 54 |
+
.visual-side {
|
| 55 |
+
flex: 1;
|
| 56 |
+
background: linear-gradient(135deg, rgba(0, 242, 254, 0.1), rgba(79, 172, 254, 0.2));
|
| 57 |
+
display: flex;
|
| 58 |
+
flex-direction: column;
|
| 59 |
+
justify-content: center;
|
| 60 |
+
align-items: center;
|
| 61 |
+
position: relative;
|
| 62 |
+
overflow: hidden;
|
| 63 |
+
padding: 40px;
|
| 64 |
+
color: var(--text-light);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
.visual-content h2 {
|
| 68 |
+
font-size: 2rem;
|
| 69 |
+
font-weight: 700;
|
| 70 |
+
margin-bottom: 10px;
|
| 71 |
+
text-align: center;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
.visual-content h2 span {
|
| 75 |
+
color: var(--accent-primary);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
.visual-content p {
|
| 79 |
+
color: var(--text-muted);
|
| 80 |
+
font-size: 0.9rem;
|
| 81 |
+
text-align: center;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
/* --- INTEGRATED SCANNER CSS --- */
|
| 85 |
+
.scanner-graphic {
|
| 86 |
+
margin-top: 40px;
|
| 87 |
+
position: relative;
|
| 88 |
+
width: 150px;
|
| 89 |
+
height: 150px;
|
| 90 |
+
background-color: rgba(0, 0, 0, 0.2);
|
| 91 |
+
border-radius: 50%;
|
| 92 |
+
overflow: hidden;
|
| 93 |
+
box-shadow: 0 0 20px rgba(0, 242, 254, 0.2);
|
| 94 |
+
border: 2px solid rgba(0, 242, 254, 0.2);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
#character-img {
|
| 98 |
+
width: 100%;
|
| 99 |
+
height: 100%;
|
| 100 |
+
object-fit: cover;
|
| 101 |
+
opacity: 0.4;
|
| 102 |
+
transition: opacity 0.3s ease;
|
| 103 |
+
mix-blend-mode: screen;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
.scan-beam {
|
| 107 |
+
position: absolute;
|
| 108 |
+
top: 0;
|
| 109 |
+
left: -10%;
|
| 110 |
+
width: 120%;
|
| 111 |
+
height: 3px;
|
| 112 |
+
background: var(--accent-primary);
|
| 113 |
+
box-shadow: 0 0 15px var(--accent-primary), 0 0 30px var(--accent-secondary);
|
| 114 |
+
animation: scanning 2.5s linear infinite;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
@keyframes scanning {
|
| 118 |
+
0% { top: -5%; opacity: 0; }
|
| 119 |
+
5% { opacity: 1; }
|
| 120 |
+
95% { opacity: 1; }
|
| 121 |
+
100% { top: 105%; opacity: 0; }
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
/* --- Right Side: Form --- */
|
| 125 |
+
.form-side {
|
| 126 |
+
flex: 1;
|
| 127 |
+
padding: 50px;
|
| 128 |
+
display: flex;
|
| 129 |
+
flex-direction: column;
|
| 130 |
+
justify-content: center;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
.form-header {
|
| 134 |
+
margin-bottom: 25px;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
.form-header h1 {
|
| 138 |
+
color: var(--text-light);
|
| 139 |
+
font-size: 1.8rem;
|
| 140 |
+
font-weight: 600;
|
| 141 |
+
display: flex;
|
| 142 |
+
align-items: center;
|
| 143 |
+
gap: 10px;
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
/* Flask Flashed Messages Styling */
|
| 147 |
+
.error-message {
|
| 148 |
+
background-color: rgba(255, 75, 75, 0.1);
|
| 149 |
+
border-left: 4px solid var(--error-color);
|
| 150 |
+
color: var(--error-color);
|
| 151 |
+
padding: 10px 15px;
|
| 152 |
+
border-radius: 4px;
|
| 153 |
+
font-size: 0.9rem;
|
| 154 |
+
margin-bottom: 20px;
|
| 155 |
+
font-weight: 500;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
/* Input Groups */
|
| 159 |
+
.input-group {
|
| 160 |
+
margin-bottom: 20px;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
.input-group input {
|
| 164 |
+
width: 100%;
|
| 165 |
+
padding: 12px 15px;
|
| 166 |
+
background-color: rgba(255, 255, 255, 0.05);
|
| 167 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 168 |
+
border-radius: 8px;
|
| 169 |
+
color: var(--text-light);
|
| 170 |
+
font-size: 1rem;
|
| 171 |
+
transition: all 0.3s ease;
|
| 172 |
+
box-sizing: border-box;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
.input-group input::placeholder {
|
| 176 |
+
color: rgba(255, 255, 255, 0.4);
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
.input-group input:focus {
|
| 180 |
+
outline: none;
|
| 181 |
+
border-color: var(--accent-primary);
|
| 182 |
+
box-shadow: 0 0 10px rgba(0, 242, 254, 0.3);
|
| 183 |
+
background-color: rgba(255, 255, 255, 0.08);
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
/* Buttons */
|
| 187 |
+
.login-btn {
|
| 188 |
+
width: 100%;
|
| 189 |
+
padding: 14px;
|
| 190 |
+
border: none;
|
| 191 |
+
border-radius: 8px;
|
| 192 |
+
background: linear-gradient(to right, #00f2fe, #4facfe);
|
| 193 |
+
color: var(--bg-dark);
|
| 194 |
+
font-size: 1rem;
|
| 195 |
+
font-weight: 600;
|
| 196 |
+
cursor: pointer;
|
| 197 |
+
transition: transform 0.2s, box-shadow 0.2s;
|
| 198 |
+
margin-top: 10px;
|
| 199 |
+
margin-bottom: 15px;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
.login-btn:hover {
|
| 203 |
+
transform: translateY(-2px);
|
| 204 |
+
box-shadow: 0 5px 15px rgba(0, 242, 254, 0.4);
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
/* Bottom Links */
|
| 208 |
+
.signup-link {
|
| 209 |
+
text-align: center;
|
| 210 |
+
margin-top: 25px;
|
| 211 |
+
font-size: 0.9rem;
|
| 212 |
+
color: var(--text-muted);
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
.signup-link a {
|
| 216 |
+
color: var(--accent-primary);
|
| 217 |
+
text-decoration: none;
|
| 218 |
+
font-weight: 600;
|
| 219 |
+
transition: color 0.2s;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
.signup-link a:hover {
|
| 223 |
+
color: var(--accent-secondary);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
/* --- Responsive Design --- */
|
| 227 |
+
@media (max-width: 768px) {
|
| 228 |
+
.login-container {
|
| 229 |
+
flex-direction: column;
|
| 230 |
+
height: auto;
|
| 231 |
+
}
|
| 232 |
+
.visual-side {
|
| 233 |
+
padding: 30px 20px;
|
| 234 |
+
min-height: 200px;
|
| 235 |
+
}
|
| 236 |
+
.scanner-graphic {
|
| 237 |
+
display: none;
|
| 238 |
+
}
|
| 239 |
+
.form-side {
|
| 240 |
+
padding: 30px 20px;
|
| 241 |
+
}
|
| 242 |
+
}
|
| 243 |
+
</style>
|
| 244 |
+
</head>
|
| 245 |
+
<body>
|
| 246 |
+
|
| 247 |
+
<div class="login-container">
|
| 248 |
+
<div class="visual-side">
|
| 249 |
+
<div class="visual-content">
|
| 250 |
+
<h2>True<span>Frame</span></h2>
|
| 251 |
+
<p>Authenticating media. Protecting truth.</p>
|
| 252 |
+
</div>
|
| 253 |
+
|
| 254 |
+
<div class="scanner-graphic">
|
| 255 |
+
<img id="character-img" src="" alt="Analyzing Media...">
|
| 256 |
+
<div class="scan-beam"></div>
|
| 257 |
+
</div>
|
| 258 |
+
</div>
|
| 259 |
+
|
| 260 |
+
<div class="form-side">
|
| 261 |
+
<div class="form-header">
|
| 262 |
+
<h1>Create Account</h1>
|
| 263 |
+
</div>
|
| 264 |
+
|
| 265 |
+
{% with messages = get_flashed_messages() %}
|
| 266 |
+
{% if messages %}
|
| 267 |
+
<div class="error-message">{{ messages[0] }}</div>
|
| 268 |
+
{% endif %}
|
| 269 |
+
{% endwith %}
|
| 270 |
+
|
| 271 |
+
<form method="POST" action="/register">
|
| 272 |
+
|
| 273 |
+
<div class="input-group">
|
| 274 |
+
<input type="text" name="name" placeholder="Full Name" required>
|
| 275 |
+
</div>
|
| 276 |
+
|
| 277 |
+
<div class="input-group">
|
| 278 |
+
<input type="email" name="email" placeholder="Email Address" required>
|
| 279 |
+
</div>
|
| 280 |
+
|
| 281 |
+
<div class="input-group">
|
| 282 |
+
<input type="password" name="password" placeholder="Create Password" required>
|
| 283 |
+
</div>
|
| 284 |
+
|
| 285 |
+
<button type="submit" class="login-btn">Sign Up</button>
|
| 286 |
+
</form>
|
| 287 |
+
|
| 288 |
+
<div class="signup-link">
|
| 289 |
+
Already have an account? <a href="/login">Login here</a>
|
| 290 |
+
</div>
|
| 291 |
+
</div>
|
| 292 |
+
</div>
|
| 293 |
+
|
| 294 |
+
<script>
|
| 295 |
+
// 1. Array of Flask static image paths
|
| 296 |
+
const maleImages = [
|
| 297 |
+
"{{ url_for('static', filename='avatars/Male1.png') }}",
|
| 298 |
+
"{{ url_for('static', filename='avatars/Male2.png') }}"
|
| 299 |
+
];
|
| 300 |
+
|
| 301 |
+
const femaleImages = [
|
| 302 |
+
"{{ url_for('static', filename='avatars/Female1.png') }}",
|
| 303 |
+
"{{ url_for('static', filename='avatars/Female2.png') }}"
|
| 304 |
+
];
|
| 305 |
+
|
| 306 |
+
let showMaleNext = true;
|
| 307 |
+
const imgElement = document.getElementById('character-img');
|
| 308 |
+
|
| 309 |
+
// 2. Function to pick a random image and alternate genders
|
| 310 |
+
function getRandomImage() {
|
| 311 |
+
let selectedArray = showMaleNext ? maleImages : femaleImages;
|
| 312 |
+
showMaleNext = !showMaleNext;
|
| 313 |
+
|
| 314 |
+
const randomIndex = Math.floor(Math.random() * selectedArray.length);
|
| 315 |
+
return selectedArray[randomIndex];
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
// Initialize the first image
|
| 319 |
+
imgElement.src = getRandomImage();
|
| 320 |
+
|
| 321 |
+
// Loop to match the CSS scanner
|
| 322 |
+
setInterval(() => {
|
| 323 |
+
// Fade out
|
| 324 |
+
imgElement.style.opacity = 0;
|
| 325 |
+
|
| 326 |
+
setTimeout(() => {
|
| 327 |
+
// Swap image
|
| 328 |
+
imgElement.src = getRandomImage();
|
| 329 |
+
|
| 330 |
+
// Fade back in to 0.4
|
| 331 |
+
imgElement.style.opacity = 0.4;
|
| 332 |
+
}, 300);
|
| 333 |
+
|
| 334 |
+
}, 2500);
|
| 335 |
+
</script>
|
| 336 |
+
|
| 337 |
+
</body>
|
| 338 |
+
</html>
|
templates/result.html
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!doctype html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>TrueFrame - Analysis Result</title>
|
| 7 |
+
<style>
|
| 8 |
+
:root {
|
| 9 |
+
--bg-dark: #0b111e;
|
| 10 |
+
--card-bg: #141c2b;
|
| 11 |
+
--primary-cyan: #00e5ff;
|
| 12 |
+
--primary-blue: #0077ff;
|
| 13 |
+
--text-main: #ffffff;
|
| 14 |
+
--text-muted: #8b9bb4;
|
| 15 |
+
--border-color: rgba(255, 255, 255, 0.1);
|
| 16 |
+
--danger: #ff3b30;
|
| 17 |
+
--success: #28a745;
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
body {
|
| 21 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
| 22 |
+
background-color: var(--bg-dark);
|
| 23 |
+
color: var(--text-main);
|
| 24 |
+
margin: 0;
|
| 25 |
+
min-height: 100vh;
|
| 26 |
+
display: flex;
|
| 27 |
+
flex-direction: column;
|
| 28 |
+
align-items: center;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
/* Navbar & Profile (Matched to Dashboard) */
|
| 32 |
+
.navbar {
|
| 33 |
+
width: 100%;
|
| 34 |
+
padding: 15px 40px;
|
| 35 |
+
display: flex;
|
| 36 |
+
justify-content: space-between;
|
| 37 |
+
align-items: center;
|
| 38 |
+
box-sizing: border-box;
|
| 39 |
+
background: rgba(20, 28, 43, 0.8);
|
| 40 |
+
backdrop-filter: blur(10px);
|
| 41 |
+
border-bottom: 1px solid var(--border-color);
|
| 42 |
+
}
|
| 43 |
+
.brand { font-size: 1.5em; font-weight: bold; letter-spacing: 1px; }
|
| 44 |
+
.brand span { color: var(--primary-cyan); }
|
| 45 |
+
|
| 46 |
+
/* Main Container */
|
| 47 |
+
.container {
|
| 48 |
+
margin: 60px 20px;
|
| 49 |
+
background: var(--card-bg);
|
| 50 |
+
border-radius: 24px;
|
| 51 |
+
padding: 50px;
|
| 52 |
+
width: 100%;
|
| 53 |
+
max-width: 600px;
|
| 54 |
+
text-align: center;
|
| 55 |
+
border: 1px solid var(--border-color);
|
| 56 |
+
box-shadow: 0 20px 40px rgba(0,0,0,0.4);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
h1 { font-size: 2.5em; margin: 0 0 10px 0; }
|
| 60 |
+
|
| 61 |
+
/* Verdict Styling */
|
| 62 |
+
.verdict-box {
|
| 63 |
+
padding: 20px;
|
| 64 |
+
border-radius: 16px;
|
| 65 |
+
font-size: 28px;
|
| 66 |
+
font-weight: 800;
|
| 67 |
+
text-transform: uppercase;
|
| 68 |
+
margin-bottom: 40px;
|
| 69 |
+
letter-spacing: 2px;
|
| 70 |
+
border: 2px solid transparent;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
.fake {
|
| 74 |
+
color: var(--danger);
|
| 75 |
+
background: rgba(255, 59, 48, 0.1);
|
| 76 |
+
border-color: var(--danger);
|
| 77 |
+
box-shadow: 0 0 20px rgba(255, 59, 48, 0.2);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
.real {
|
| 81 |
+
color: var(--primary-cyan);
|
| 82 |
+
background: rgba(0, 229, 255, 0.1);
|
| 83 |
+
border-color: var(--primary-cyan);
|
| 84 |
+
box-shadow: 0 0 20px rgba(0, 229, 255, 0.2);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/* Scan Details Info */
|
| 88 |
+
.details-panel {
|
| 89 |
+
text-align: left;
|
| 90 |
+
background: rgba(255,255,255,0.03);
|
| 91 |
+
padding: 30px;
|
| 92 |
+
border-radius: 16px;
|
| 93 |
+
margin-bottom: 30px;
|
| 94 |
+
border-left: 4px solid var(--primary-cyan);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
.details-panel h3 { margin-top: 0; color: var(--primary-cyan); font-size: 18px; text-transform: uppercase; }
|
| 98 |
+
.data-row { display: flex; justify-content: space-between; margin-bottom: 10px; font-size: 1.1em; }
|
| 99 |
+
.data-label { color: var(--text-muted); }
|
| 100 |
+
|
| 101 |
+
/* Buttons */
|
| 102 |
+
.btn-group { display: flex; gap: 15px; }
|
| 103 |
+
.btn {
|
| 104 |
+
flex: 1;
|
| 105 |
+
padding: 15px;
|
| 106 |
+
border-radius: 30px;
|
| 107 |
+
text-decoration: none;
|
| 108 |
+
font-weight: bold;
|
| 109 |
+
transition: 0.3s;
|
| 110 |
+
text-align: center;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
.btn-primary {
|
| 114 |
+
background: linear-gradient(90deg, var(--primary-cyan), var(--primary-blue));
|
| 115 |
+
color: white;
|
| 116 |
+
box-shadow: 0 8px 20px rgba(0, 119, 255, 0.3);
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
.btn-secondary {
|
| 120 |
+
background: rgba(255,255,255,0.05);
|
| 121 |
+
color: white;
|
| 122 |
+
border: 1px solid var(--border-color);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
.btn:hover { transform: translateY(-2px); filter: brightness(1.1); }
|
| 126 |
+
|
| 127 |
+
</style>
|
| 128 |
+
</head>
|
| 129 |
+
<body>
|
| 130 |
+
|
| 131 |
+
<nav class="navbar">
|
| 132 |
+
<div class="brand">True<span>Frame</span></div>
|
| 133 |
+
<div style="color: var(--text-muted); font-size: 0.9em;">Analysis Report</div>
|
| 134 |
+
</nav>
|
| 135 |
+
|
| 136 |
+
<div class="container">
|
| 137 |
+
<h1>Analysis Complete</h1>
|
| 138 |
+
|
| 139 |
+
<div class="verdict-box {{ css_class }}">
|
| 140 |
+
{{ result }}
|
| 141 |
+
</div>
|
| 142 |
+
|
| 143 |
+
<div class="details-panel">
|
| 144 |
+
<h3>Scan Metadata</h3>
|
| 145 |
+
<div class="data-row">
|
| 146 |
+
<span class="data-label">Media Type:</span>
|
| 147 |
+
<span>{{ type }}</span>
|
| 148 |
+
</div>
|
| 149 |
+
<div class="data-row">
|
| 150 |
+
<span class="data-label">AI Confidence:</span>
|
| 151 |
+
<span style="color: var(--primary-cyan); font-weight: bold;">{{ confidence }}%</span>
|
| 152 |
+
</div>
|
| 153 |
+
|
| 154 |
+
<hr style="border: 0; border-top: 1px solid var(--border-color); margin: 20px 0;">
|
| 155 |
+
|
| 156 |
+
<div style="color: var(--text-muted); font-size: 0.95em; line-height: 1.6;">
|
| 157 |
+
{{ extra_info | safe }}
|
| 158 |
+
</div>
|
| 159 |
+
</div>
|
| 160 |
+
|
| 161 |
+
<div class="btn-group">
|
| 162 |
+
<a href="/" class="btn btn-primary">New Analysis</a>
|
| 163 |
+
<a href="javascript:window.print()" class="btn btn-secondary">Download Report</a>
|
| 164 |
+
</div>
|
| 165 |
+
</div>
|
| 166 |
+
|
| 167 |
+
</body>
|
| 168 |
+
</html>
|
video_detect.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from transformers import CLIPProcessor, CLIPModel
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
class VideoDeepfakeDetector:
|
| 6 |
+
def __init__(self, local_model_path="./local_clip_model"):
|
| 7 |
+
print("⚡ Loading Video AI Model...")
|
| 8 |
+
try:
|
| 9 |
+
self.model = CLIPModel.from_pretrained(local_model_path)
|
| 10 |
+
self.processor = CLIPProcessor.from_pretrained(local_model_path)
|
| 11 |
+
print("✅ Video Model Ready.")
|
| 12 |
+
except Exception as e:
|
| 13 |
+
print(f"❌ Error: {e}")
|
| 14 |
+
print("Run 'setup_model.py' first.")
|
| 15 |
+
exit()
|
| 16 |
+
|
| 17 |
+
def predict(self, image_path):
|
| 18 |
+
try:
|
| 19 |
+
image = Image.open(image_path)
|
| 20 |
+
|
| 21 |
+
# --- VIDEO SPECIFIC LABELS ---
|
| 22 |
+
# These work best for Vlogs, Motion Blur, and Lower Quality Frames
|
| 23 |
+
labels = [
|
| 24 |
+
# --- REAL CATEGORY ---
|
| 25 |
+
"raw authentic photo from a camera",
|
| 26 |
+
"youtube vlog frame with text and emojis",
|
| 27 |
+
"low quality phone camera footage",
|
| 28 |
+
|
| 29 |
+
# --- FAKE CATEGORY ---
|
| 30 |
+
"ai generated image from text prompt",
|
| 31 |
+
"hyper-realistic cgi 3d render",
|
| 32 |
+
"digital art style synthetic face",
|
| 33 |
+
"deepfake face swap artifacts",
|
| 34 |
+
"unnatural smooth skin texture"
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
inputs = self.processor(
|
| 38 |
+
text=labels,
|
| 39 |
+
images=image,
|
| 40 |
+
return_tensors="pt",
|
| 41 |
+
padding=True
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
with torch.no_grad():
|
| 45 |
+
outputs = self.model(**inputs)
|
| 46 |
+
|
| 47 |
+
probs = outputs.logits_per_image.softmax(dim=1)
|
| 48 |
+
scores = probs.tolist()[0]
|
| 49 |
+
|
| 50 |
+
# Real = Indices 0, 1, 2
|
| 51 |
+
real_score = sum(scores[:3])
|
| 52 |
+
|
| 53 |
+
# Fake = Indices 3 to 7
|
| 54 |
+
fake_score = sum(scores[3:])
|
| 55 |
+
|
| 56 |
+
# Video Sensitivity: 0.51 (Stricter because video frames are messy)
|
| 57 |
+
if fake_score > 0.51:
|
| 58 |
+
return "DEEPFAKE DETECTED", fake_score
|
| 59 |
+
else:
|
| 60 |
+
return "REAL", real_score
|
| 61 |
+
|
| 62 |
+
except Exception as e:
|
| 63 |
+
print(f"Error predicting {image_path}: {e}")
|
| 64 |
+
return "ERROR", 0.0
|