Upload 23 files
Browse files- .dockerignore +27 -0
- .gitattributes +1 -0
- .gitignore +13 -0
- Dockerfile +29 -0
- Procfile +1 -0
- README.md +74 -11
- app.py +223 -0
- check_labels.py +4 -0
- debug_load.py +9 -0
- debug_predict.py +18 -0
- debug_single_image.py +29 -0
- frequency_analysis.py +97 -0
- identity_risk.py +128 -0
- inspect_dire.py +22 -0
- render.yaml +9 -0
- requirements.txt +13 -0
- static/bg-animation.js +113 -0
- static/script.js +405 -0
- static/style.css +2 -0
- templates/index.html +358 -0
- test_app_load.py +8 -0
- test_freq.py +17 -0
- test_image.jpg +3 -0
- train.py +151 -0
.dockerignore
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Git
|
| 2 |
+
.git
|
| 3 |
+
.gitignore
|
| 4 |
+
|
| 5 |
+
# Python/Environment
|
| 6 |
+
venv/
|
| 7 |
+
env/
|
| 8 |
+
.venv/
|
| 9 |
+
__pycache__/
|
| 10 |
+
*.pyc
|
| 11 |
+
*.pyo
|
| 12 |
+
*.pyd
|
| 13 |
+
|
| 14 |
+
# Large Data & Models (Download at runtime or mount as volumes)
|
| 15 |
+
dataset/
|
| 16 |
+
model/
|
| 17 |
+
weights/
|
| 18 |
+
|
| 19 |
+
# IDE
|
| 20 |
+
.vscode/
|
| 21 |
+
.idea/
|
| 22 |
+
|
| 23 |
+
# Local Dev/Debug
|
| 24 |
+
debug_*.py
|
| 25 |
+
test_*.py
|
| 26 |
+
tests/
|
| 27 |
+
*.log
|
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
test_image.jpg filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.py[cod]
|
| 3 |
+
*$py.class
|
| 4 |
+
.env
|
| 5 |
+
.venv
|
| 6 |
+
env/
|
| 7 |
+
venv/
|
| 8 |
+
.DS_Store
|
| 9 |
+
|
| 10 |
+
# Large Files
|
| 11 |
+
dataset/
|
| 12 |
+
model/
|
| 13 |
+
|
Dockerfile
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
FROM python:3.9-slim
|
| 3 |
+
|
| 4 |
+
# Set the working directory in the container
|
| 5 |
+
WORKDIR /code
|
| 6 |
+
|
| 7 |
+
# Install system dependencies (required for OpenCV)
|
| 8 |
+
RUN apt-get update && apt-get install -y \
|
| 9 |
+
libgl1-mesa-glx \
|
| 10 |
+
libglib2.0-0 \
|
| 11 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 12 |
+
|
| 13 |
+
# Copy requirements first to leverage Docker cache
|
| 14 |
+
COPY requirements.txt /code/requirements.txt
|
| 15 |
+
|
| 16 |
+
# Install PyTorch CPU-only (Detailed command significantly reduces size/memory usage)
|
| 17 |
+
RUN pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/cpu
|
| 18 |
+
|
| 19 |
+
# Install other python dependencies
|
| 20 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 21 |
+
|
| 22 |
+
# Copy the rest of the application
|
| 23 |
+
COPY . /code
|
| 24 |
+
|
| 25 |
+
# Expose the port
|
| 26 |
+
EXPOSE 7860
|
| 27 |
+
|
| 28 |
+
# Command to run the application
|
| 29 |
+
CMD ["gunicorn", "-b", "0.0.0.0:7860", "app:app"]
|
Procfile
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
web: gunicorn app:app
|
README.md
CHANGED
|
@@ -1,11 +1,74 @@
|
|
| 1 |
-
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: docker
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Coin Toss Detector
|
| 3 |
+
emoji: 🪙
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# COIN-TOSS: AI & Identity Risk Detection
|
| 11 |
+
|
| 12 |
+
## Overview
|
| 13 |
+
|
| 14 |
+
COIN-TOSS is an advanced web application designed to accurately detect AI-generated images and assess potential identity theft risks. By combining multiple state-of-the-art deep learning models with custom analysis logic ("Gap Trap V3"), it provides a reliable "Real" vs "AI" verdict without ambiguous percentages, while also identifying potential misuse of authentic images.
|
| 15 |
+
|
| 16 |
+
## Features
|
| 17 |
+
|
| 18 |
+
- **High-Accuracy AI Detection**:
|
| 19 |
+
- Utilizes a hybrid ensemble of models (`dima806/ai_vs_real_image_detection` and `prithivMLmods/Deep-Fake-Detector-v2-Model`).
|
| 20 |
+
- **Gap Trap V3 Logic**: A specialized algorithm to catch "uncanny valley" images and properly classify filtered real photos vs. high-quality deepfakes.
|
| 21 |
+
- **Frequency Analysis**: Visualizes invisible noise patterns (FFT) to detect checkerboard artifacts common in diffusion models.
|
| 22 |
+
- **Identity Theft Risk Analysis**:
|
| 23 |
+
- Analyzes "Real" images for biometric metrics (Face Visibility, Quality, etc.).
|
| 24 |
+
- Provides a risk assessment (Low/High) for using the image in sensitive contexts (KYC, Profiles).
|
| 25 |
+
- **User-Friendly Interface**:
|
| 26 |
+
- Simple drag-and-drop upload.
|
| 27 |
+
- Instant "Real" or "AI" verdict.
|
| 28 |
+
- Detailed analysis points explaining the decision.
|
| 29 |
+
|
| 30 |
+
## Workflow
|
| 31 |
+
|
| 32 |
+
### Prerequisites
|
| 33 |
+
|
| 34 |
+
- Python 3.8+
|
| 35 |
+
- Git
|
| 36 |
+
|
| 37 |
+
### Installation
|
| 38 |
+
|
| 39 |
+
1. **Clone the Repository**
|
| 40 |
+
```bash
|
| 41 |
+
git clone https://github.com/madhavmullick2025/COIN-TOSS.git
|
| 42 |
+
cd COIN-TOSS
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
2. **Install Dependencies**
|
| 46 |
+
It is recommended to use a virtual environment.
|
| 47 |
+
```bash
|
| 48 |
+
pip install -r requirements.txt
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
### Usage
|
| 52 |
+
|
| 53 |
+
1. **Start the Application**
|
| 54 |
+
```bash
|
| 55 |
+
python app.py
|
| 56 |
+
```
|
| 57 |
+
*Note: The first run may take a few moments to download the necessary model weights from HuggingFace.*
|
| 58 |
+
|
| 59 |
+
2. **Access the Interface**
|
| 60 |
+
Open your web browser and navigate to:
|
| 61 |
+
```
|
| 62 |
+
http://localhost:5002
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
3. **Analyze Images**
|
| 66 |
+
- Upload an image (JPG, PNG, WEBP).
|
| 67 |
+
- Click "Analyze" to see if it's Real or AI.
|
| 68 |
+
- If "Real", switch to the "Identity Risk" tab to see safety metrics.
|
| 69 |
+
|
| 70 |
+
## Tech Stack
|
| 71 |
+
|
| 72 |
+
- **Backend**: Python, Flask, PyTorch, Transformers (HuggingFace).
|
| 73 |
+
- **Frontend**: HTML5, CSS3, JavaScript.
|
| 74 |
+
- **AI Models**: ViT (Vision Transformer) based image classifiers.
|
app.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from flask import Flask, request, jsonify, render_template
|
| 4 |
+
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import io
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from identity_risk import IdentityRiskAnalyzer # [NEW]
|
| 9 |
+
try:
|
| 10 |
+
from frequency_analysis import FrequencyAnalyzer # [NEW]
|
| 11 |
+
except ImportError:
|
| 12 |
+
FrequencyAnalyzer = None
|
| 13 |
+
print("WARNING: FrequencyAnalyzer could not be imported. Feature disabled.")
|
| 14 |
+
|
| 15 |
+
app = Flask(__name__)
|
| 16 |
+
|
| 17 |
+
# --- Configuration ---
|
| 18 |
+
# Loading multiple models for an ensemble approach
|
| 19 |
+
# User requested "Academy/Winston AI" accuracy.
|
| 20 |
+
# Switching to NYUAD-ComNets/NYUAD_AI_Generated_Image_Detection
|
| 21 |
+
# This model is likely trained on a massive academic dataset for rigor.
|
| 22 |
+
# User requested "Winston AI" style accuracy.
|
| 23 |
+
# We combine a "Liberal" Model (umm-maybe - good with filters)
|
| 24 |
+
# and a "Conservative" Model (dima806 - strict on artifacts).
|
| 25 |
+
# User requested "Accuracy" for both AI and Real.
|
| 26 |
+
# Implementing "Gap Trap V2" Logic.
|
| 27 |
+
# This logic specifically targets the "Uncanny Valley" of AI Hyper-Realism.
|
| 28 |
+
MODEL_GENERAL = "dima806/ai_vs_real_image_detection"
|
| 29 |
+
MODEL_FACE = "prithivMLmods/Deep-Fake-Detector-v2-Model"
|
| 30 |
+
|
| 31 |
+
models = {}
|
| 32 |
+
processors = {}
|
| 33 |
+
risk_analyzer = None # [NEW]
|
| 34 |
+
freq_analyzer = None # [NEW]
|
| 35 |
+
|
| 36 |
+
# --- Load Models & Processors ---
|
| 37 |
+
def load_models():
|
| 38 |
+
global risk_analyzer
|
| 39 |
+
try:
|
| 40 |
+
print(f"Loading General Model: {MODEL_GENERAL}...")
|
| 41 |
+
models['general'] = AutoModelForImageClassification.from_pretrained(MODEL_GENERAL)
|
| 42 |
+
processors['general'] = AutoImageProcessor.from_pretrained(MODEL_GENERAL)
|
| 43 |
+
|
| 44 |
+
print(f"Loading Face Model: {MODEL_FACE}...")
|
| 45 |
+
models['face'] = AutoModelForImageClassification.from_pretrained(MODEL_FACE)
|
| 46 |
+
processors['face'] = AutoImageProcessor.from_pretrained(MODEL_FACE)
|
| 47 |
+
|
| 48 |
+
print("Loading Identity Risk Analyzer...")
|
| 49 |
+
risk_analyzer = IdentityRiskAnalyzer() # [NEW]
|
| 50 |
+
|
| 51 |
+
if FrequencyAnalyzer:
|
| 52 |
+
print("Loading Frequency Analyzer...")
|
| 53 |
+
global freq_analyzer
|
| 54 |
+
freq_analyzer = FrequencyAnalyzer() # [NEW]
|
| 55 |
+
else:
|
| 56 |
+
print("Skipping Frequency Analyzer...")
|
| 57 |
+
|
| 58 |
+
print("All models loaded successfully.")
|
| 59 |
+
except Exception as e:
|
| 60 |
+
print(f"Error loading models: {e}")
|
| 61 |
+
|
| 62 |
+
load_models()
|
| 63 |
+
|
| 64 |
+
# --- Routes ---
|
| 65 |
+
|
| 66 |
+
@app.route('/')
|
| 67 |
+
def home():
|
| 68 |
+
return render_template('index.html')
|
| 69 |
+
|
| 70 |
+
@app.route('/predict', methods=['POST'])
|
| 71 |
+
def predict():
|
| 72 |
+
if not models or not processors:
|
| 73 |
+
return jsonify({"error": "Models not loaded service unavailable"}), 503
|
| 74 |
+
|
| 75 |
+
if 'image' not in request.files:
|
| 76 |
+
return jsonify({"error": "No image uploaded"}), 400
|
| 77 |
+
|
| 78 |
+
file = request.files['image']
|
| 79 |
+
if file.filename == '':
|
| 80 |
+
return jsonify({"error": "No file selected"}), 400
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
# Read image
|
| 84 |
+
image_bytes = file.read()
|
| 85 |
+
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
| 86 |
+
|
| 87 |
+
# --- Inference Helper ---
|
| 88 |
+
def get_prob(model_key, img):
|
| 89 |
+
processor = processors[model_key]
|
| 90 |
+
model = models[model_key]
|
| 91 |
+
inputs = processor(images=img, return_tensors="pt")
|
| 92 |
+
with torch.no_grad():
|
| 93 |
+
outputs = model(**inputs)
|
| 94 |
+
return F.softmax(outputs.logits, dim=-1)
|
| 95 |
+
|
| 96 |
+
# 1. Get General Scores (dima806)
|
| 97 |
+
probs_gen = get_prob('general', image)
|
| 98 |
+
labels_gen = models['general'].config.id2label
|
| 99 |
+
idx_real_gen = 0 # Default assumption
|
| 100 |
+
if 'real' in str(labels_gen.get(0, '')).lower(): idx_real_gen = 0
|
| 101 |
+
elif 'real' in str(labels_gen.get(1, '')).lower(): idx_real_gen = 1
|
| 102 |
+
|
| 103 |
+
real_score_gen = probs_gen[0][idx_real_gen].item()
|
| 104 |
+
fake_score_gen = probs_gen[0][1-idx_real_gen].item()
|
| 105 |
+
|
| 106 |
+
# 2. Get Face Scores (Deepfake)
|
| 107 |
+
probs_face = get_prob('face', image)
|
| 108 |
+
# Deepfake labels: 0=Realism, 1=Deepfake
|
| 109 |
+
real_score_face = probs_face[0][0].item()
|
| 110 |
+
fake_score_face = probs_face[0][1].item()
|
| 111 |
+
|
| 112 |
+
# --- GAP TRAP V3 (v19.0) ---
|
| 113 |
+
# Refined Thresholds to trap "Noise" on non-face images.
|
| 114 |
+
# Digital Art often scores ~0.46 on Face Model.
|
| 115 |
+
# We raise the "High Quality" bar to 0.65.
|
| 116 |
+
|
| 117 |
+
print(f"DEBUG: General_Fake={fake_score_gen:.4f}, Face_Real={real_score_face:.4f}")
|
| 118 |
+
|
| 119 |
+
# --- Logic & Explanation Tracking ---
|
| 120 |
+
analysis_points = []
|
| 121 |
+
|
| 122 |
+
# Step 1: Default to General Model
|
| 123 |
+
if fake_score_gen > 0.5:
|
| 124 |
+
final_label = "AI"
|
| 125 |
+
final_prob = fake_score_gen
|
| 126 |
+
analysis_points.append("General analysis detected synthetic patterns/artifacts.")
|
| 127 |
+
else:
|
| 128 |
+
final_label = "Real"
|
| 129 |
+
final_prob = real_score_gen
|
| 130 |
+
analysis_points.append("No significant deepfake artifacts detected.")
|
| 131 |
+
analysis_points.append("Image noise patterns consistent with optical cameras.")
|
| 132 |
+
|
| 133 |
+
# Step 2: The Widened Gap Trap
|
| 134 |
+
if final_label == "AI":
|
| 135 |
+
# Zone A: Filtered Real (0.00 - 0.25) -> OVERRIDE REAL
|
| 136 |
+
# Zone B: Uncanny Valley / Noise (0.25 - 0.65) -> TRAP (STAY AI)
|
| 137 |
+
# Zone C: High Quality Real (0.65 - 1.00) -> OVERRIDE REAL
|
| 138 |
+
|
| 139 |
+
if real_score_face < 0.25:
|
| 140 |
+
print("DEBUG: Override -> Real (Filter Zone)")
|
| 141 |
+
final_label = "Real"
|
| 142 |
+
final_prob = 0.85
|
| 143 |
+
analysis_points = [] # Reset for override
|
| 144 |
+
analysis_points.append("Heavy smoothing detected, consistent with beauty filters.")
|
| 145 |
+
analysis_points.append("Underlying facial structure remains authentic.")
|
| 146 |
+
elif real_score_face > 0.65:
|
| 147 |
+
print("DEBUG: Override -> Real (High Quality Zone)")
|
| 148 |
+
final_label = "Real"
|
| 149 |
+
final_prob = real_score_face
|
| 150 |
+
analysis_points = [] # Reset for override
|
| 151 |
+
analysis_points.append("High-fidelity skin micro-textures confirm human subject.")
|
| 152 |
+
analysis_points.append("Lighting interaction with features appears natural.")
|
| 153 |
+
else:
|
| 154 |
+
print("DEBUG: Trap Triggered -> Confirmed AI (Uncanny Valley / Noise)")
|
| 155 |
+
analysis_points.append("Deep analysis confirms lack of authentic biological details.")
|
| 156 |
+
analysis_points.append("Texture inconsistencies found in detailed regions.")
|
| 157 |
+
|
| 158 |
+
# --- [NEW] Smart Tagging (UI Badge) ---
|
| 159 |
+
classification_tag = ""
|
| 160 |
+
if final_label == "AI":
|
| 161 |
+
if final_prob > 0.98:
|
| 162 |
+
classification_tag = "Completely generated by AI"
|
| 163 |
+
else:
|
| 164 |
+
classification_tag = "High-level Digital Manipulation"
|
| 165 |
+
else: # Real
|
| 166 |
+
if final_prob > 0.99:
|
| 167 |
+
classification_tag = "Raw Image / Authentic Source"
|
| 168 |
+
elif final_prob > 0.90:
|
| 169 |
+
classification_tag = "Likely Authentic (Filters)"
|
| 170 |
+
else:
|
| 171 |
+
classification_tag = "Heavily Processed / Filtered"
|
| 172 |
+
|
| 173 |
+
print(f"DEBUG: Generated Tag: {classification_tag}")
|
| 174 |
+
|
| 175 |
+
# --- [NEW] Identity Risk Check ---
|
| 176 |
+
risk_data = {}
|
| 177 |
+
if final_label == "Real" and risk_analyzer:
|
| 178 |
+
try:
|
| 179 |
+
print("Running Identity Risk Analysis...")
|
| 180 |
+
risk_data = risk_analyzer.analyze(image)
|
| 181 |
+
except Exception as risk_e:
|
| 182 |
+
print(f"Risk Analysis Error: {risk_e}")
|
| 183 |
+
risk_data = {"error": "Analysis failed"}
|
| 184 |
+
|
| 185 |
+
# --- [NEW] Frequency Analysis ---
|
| 186 |
+
frequency_map_b64 = ""
|
| 187 |
+
pattern_map_b64 = "" # [NEW]
|
| 188 |
+
if freq_analyzer:
|
| 189 |
+
try:
|
| 190 |
+
# We analyze the raw image for frequency artifacts
|
| 191 |
+
frequency_map_b64 = freq_analyzer.generate_spectrum(image)
|
| 192 |
+
pattern_map_b64 = freq_analyzer.generate_pattern_map(image) # [NEW]
|
| 193 |
+
except Exception as freq_e:
|
| 194 |
+
print(f"Frequency Analysis Error: {freq_e}")
|
| 195 |
+
|
| 196 |
+
return jsonify({
|
| 197 |
+
"prediction": final_label,
|
| 198 |
+
"confidence": float(f"{final_prob:.4f}"),
|
| 199 |
+
"classification_tag": classification_tag, # [NEW]
|
| 200 |
+
"analysis_points": analysis_points, # [NEW]
|
| 201 |
+
"risk_analysis": risk_data,
|
| 202 |
+
"frequency_analysis": frequency_map_b64, # [NEW]
|
| 203 |
+
"pattern_analysis": pattern_map_b64, # [NEW]
|
| 204 |
+
"all_scores": {
|
| 205 |
+
"Real": float(f"{1-final_prob if final_label=='AI' else final_prob:.4f}"),
|
| 206 |
+
"AI": float(f"{final_prob if final_label=='AI' else 1-final_prob:.4f}"),
|
| 207 |
+
"Debug_General_Fake": fake_score_gen,
|
| 208 |
+
"Debug_Face_Real": real_score_face,
|
| 209 |
+
"Debug_Mode": "Gap Trap V3 [0.25-0.65]"
|
| 210 |
+
}
|
| 211 |
+
})
|
| 212 |
+
|
| 213 |
+
except Exception as e:
|
| 214 |
+
print(f"Prediction error: {e}")
|
| 215 |
+
return jsonify({"error": str(e)}), 500
|
| 216 |
+
|
| 217 |
+
if __name__ == '__main__':
|
| 218 |
+
print("--- STARTING SERVER VERSION 19.2 (GAP TRAP V3 + ID RISK) ---")
|
| 219 |
+
try:
|
| 220 |
+
port = int(os.environ.get("PORT", 5002))
|
| 221 |
+
app.run(debug=False, host='0.0.0.0', port=port)
|
| 222 |
+
except Exception as e:
|
| 223 |
+
print(f"Startup Error: {e}")
|
check_labels.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import AutoModelForImageClassification
|
| 3 |
+
model = AutoModelForImageClassification.from_pretrained("prithivMLmods/Deep-Fake-Detector-v2-Model")
|
| 4 |
+
print("LABELS:", model.config.id2label)
|
debug_load.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import AutoModelForImageClassification, AutoImageProcessor
|
| 3 |
+
try:
|
| 4 |
+
print("Loading EfficientNet...")
|
| 5 |
+
m = AutoModelForImageClassification.from_pretrained("Dafilab/ai-image-detector")
|
| 6 |
+
p = AutoImageProcessor.from_pretrained("Dafilab/ai-image-detector")
|
| 7 |
+
print("Success")
|
| 8 |
+
except Exception as e:
|
| 9 |
+
print(f"FAILED: {e}")
|
debug_predict.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
url = 'http://127.0.0.1:5002/predict'
|
| 4 |
+
file_path = r'C:/Users/Madhav/.gemini/antigravity/brain/67c74e6e-1b77-4d5b-894d-787254bc31a0/uploaded_image_1767961715511.jpg'
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
with open(file_path, 'rb') as f:
|
| 8 |
+
files = {'image': f}
|
| 9 |
+
response = requests.post(url, files=files)
|
| 10 |
+
data = response.json()
|
| 11 |
+
print("FULL RESPONSE:", data)
|
| 12 |
+
scores = data.get('all_scores', {})
|
| 13 |
+
print(f"DEBUG - Face Real: {scores.get('Debug_Face_Real')}")
|
| 14 |
+
print(f"DEBUG - Gen Fake: {scores.get('Debug_General_Fake')}")
|
| 15 |
+
print(f"DEBUG - Analysis: {data.get('analysis_points')}")
|
| 16 |
+
print(f"Prediction: {data.get('prediction')}")
|
| 17 |
+
except Exception as e:
|
| 18 |
+
print(f"Error: {e}")
|
debug_single_image.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from identity_risk import IdentityRiskAnalyzer
|
| 2 |
+
from PIL import Image
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
# Path to the uploaded image
|
| 6 |
+
image_path = r"C:/Users/Madhav/.gemini/antigravity/brain/9cc8325c-cd76-426f-85e1-e3096464cb09/uploaded_image_1767977759111.png"
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
print(f"Loading image from: {image_path}")
|
| 10 |
+
image = Image.open(image_path).convert("RGB")
|
| 11 |
+
|
| 12 |
+
analyzer = IdentityRiskAnalyzer()
|
| 13 |
+
print("Running analysis...")
|
| 14 |
+
results = analyzer.analyze(image)
|
| 15 |
+
|
| 16 |
+
print("\n--- ANALYSIS RESULTS ---")
|
| 17 |
+
print(f"High Risk: {results['is_high_risk']}")
|
| 18 |
+
print(f"Risk Score: {results['risk_score']}")
|
| 19 |
+
|
| 20 |
+
print("\n[Passed Criteria (Risk Factors)]:")
|
| 21 |
+
for item in results['passed_criteria']:
|
| 22 |
+
print(f" - {item}")
|
| 23 |
+
|
| 24 |
+
print("\n[Failed Criteria / Details (Safety Factors)]:")
|
| 25 |
+
for item in results['details']:
|
| 26 |
+
print(f" - {item}")
|
| 27 |
+
|
| 28 |
+
except Exception as e:
|
| 29 |
+
print(f"Error: {e}")
|
frequency_analysis.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image, ImageFilter, ImageOps
|
| 4 |
+
import io
|
| 5 |
+
import base64
|
| 6 |
+
|
| 7 |
+
class FrequencyAnalyzer:
|
| 8 |
+
def __init__(self):
|
| 9 |
+
pass
|
| 10 |
+
|
| 11 |
+
def generate_spectrum(self, image: Image.Image) -> str:
|
| 12 |
+
"""
|
| 13 |
+
Generates a 2D Frequency Spectrum (Magnitude Plot) from a PIL Image.
|
| 14 |
+
Returns a base64 encoded PNG string of the spectrum.
|
| 15 |
+
"""
|
| 16 |
+
try:
|
| 17 |
+
# 1. Convert to Grayscale
|
| 18 |
+
img_gray = image.convert('L')
|
| 19 |
+
|
| 20 |
+
# 2. Convert to Numpy Array
|
| 21 |
+
img_array = np.array(img_gray)
|
| 22 |
+
|
| 23 |
+
# 3. Compute FFT (Fast Fourier Transform)
|
| 24 |
+
# f = np.fft.fft2(img_array)
|
| 25 |
+
# fshift = np.fft.fftshift(f)
|
| 26 |
+
# magnitude_spectrum = 20 * np.log(np.abs(fshift))
|
| 27 |
+
|
| 28 |
+
# Use a slightly more robust method for visualization
|
| 29 |
+
f = np.fft.fft2(img_array)
|
| 30 |
+
fshift = np.fft.fftshift(f)
|
| 31 |
+
# Add epsilon to avoid log(0)
|
| 32 |
+
magnitude_spectrum = 20 * np.log(np.abs(fshift) + 1e-9)
|
| 33 |
+
|
| 34 |
+
# 4. Normalize to 0-255 for standard image display
|
| 35 |
+
mag_min = magnitude_spectrum.min()
|
| 36 |
+
mag_max = magnitude_spectrum.max()
|
| 37 |
+
|
| 38 |
+
# Avoid division by zero if image is blank
|
| 39 |
+
if mag_max == mag_min:
|
| 40 |
+
normalized = np.zeros_like(magnitude_spectrum, dtype=np.uint8)
|
| 41 |
+
else:
|
| 42 |
+
normalized = 255 * (magnitude_spectrum - mag_min) / (mag_max - mag_min)
|
| 43 |
+
normalized = np.uint8(normalized)
|
| 44 |
+
|
| 45 |
+
# 5. Convert back to PIL Image
|
| 46 |
+
spectrum_img = Image.fromarray(normalized)
|
| 47 |
+
|
| 48 |
+
# 6. Encode to Base64
|
| 49 |
+
buffered = io.BytesIO()
|
| 50 |
+
spectrum_img.save(buffered, format="PNG")
|
| 51 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 52 |
+
|
| 53 |
+
return img_str
|
| 54 |
+
|
| 55 |
+
except Exception as e:
|
| 56 |
+
print(f"Error generating frequency spectrum: {e}")
|
| 57 |
+
return ""
|
| 58 |
+
|
| 59 |
+
def generate_pattern_map(self, image: Image.Image) -> str:
|
| 60 |
+
"""
|
| 61 |
+
Generates a Noise Pattern Map (High-Pass/Laplacian) to visualize
|
| 62 |
+
checkerboard artifacts.
|
| 63 |
+
"""
|
| 64 |
+
try:
|
| 65 |
+
# 1. Convert to Grayscale
|
| 66 |
+
img_gray = image.convert('L')
|
| 67 |
+
|
| 68 |
+
# 2. Apply Laplacian Filter (High-Pass)
|
| 69 |
+
# This removes smooth areas and keeps edges/noise
|
| 70 |
+
noise_map = img_gray.filter(ImageFilter.FIND_EDGES)
|
| 71 |
+
# Note: FIND_EDGES is a simple approximation. For better grid visualization,
|
| 72 |
+
# we can use a Kernel, but this is usually sufficient for "seeing" the grid.
|
| 73 |
+
|
| 74 |
+
# 3. Invert to make lines black on white (easier to see)
|
| 75 |
+
# noise_map = ImageOps.invert(noise_map)
|
| 76 |
+
# Actually, white on black (default) effectively shows the "glowing" grid lines.
|
| 77 |
+
|
| 78 |
+
# 4. Enhance Contrast/Brightness to make faint artifacts visible
|
| 79 |
+
# We convert to numpy to scale
|
| 80 |
+
arr = np.array(noise_map).astype(float)
|
| 81 |
+
|
| 82 |
+
# Amplify
|
| 83 |
+
arr = arr * 5.0 # Boost signal
|
| 84 |
+
arr = np.clip(arr, 0, 255).astype(np.uint8)
|
| 85 |
+
|
| 86 |
+
enhanced_map = Image.fromarray(arr)
|
| 87 |
+
|
| 88 |
+
# 5. Encode
|
| 89 |
+
buffered = io.BytesIO()
|
| 90 |
+
enhanced_map.save(buffered, format="PNG")
|
| 91 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 92 |
+
|
| 93 |
+
return img_str
|
| 94 |
+
|
| 95 |
+
except Exception as e:
|
| 96 |
+
print(f"Error generating pattern map: {e}")
|
| 97 |
+
return ""
|
identity_risk.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
class IdentityRiskAnalyzer:
|
| 6 |
+
def __init__(self):
|
| 7 |
+
# Load Haar Cascades
|
| 8 |
+
cascade_path = cv2.data.haarcascades
|
| 9 |
+
self.face_cascade = cv2.CascadeClassifier(os.path.join(cascade_path, 'haarcascade_frontalface_default.xml'))
|
| 10 |
+
self.eye_cascade = cv2.CascadeClassifier(os.path.join(cascade_path, 'haarcascade_eye.xml'))
|
| 11 |
+
# Smile cascade is often less reliable, but we can try if available, or skip expression strictness.
|
| 12 |
+
# self.smile_cascade = cv2.CascadeClassifier(os.path.join(cascade_path, 'haarcascade_smile.xml'))
|
| 13 |
+
|
| 14 |
+
def analyze(self, pil_image):
|
| 15 |
+
"""
|
| 16 |
+
Analyzes a PIL Image for identity theft risk using OpenCV.
|
| 17 |
+
"""
|
| 18 |
+
# Convert PIL to CV2 (BGR)
|
| 19 |
+
img_np = np.array(pil_image)
|
| 20 |
+
if img_np.shape[2] == 4: # RGBA to RGB
|
| 21 |
+
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGBA2RGB)
|
| 22 |
+
|
| 23 |
+
# OpenCV expects BGR
|
| 24 |
+
img_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
|
| 25 |
+
gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
|
| 26 |
+
h, w = gray.shape
|
| 27 |
+
|
| 28 |
+
results = {
|
| 29 |
+
"is_high_risk": False,
|
| 30 |
+
"risk_score": 0.0,
|
| 31 |
+
"details": [],
|
| 32 |
+
"passed_criteria": []
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
# 1. Face Visibility & Count
|
| 36 |
+
faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
| 37 |
+
|
| 38 |
+
if len(faces) == 0:
|
| 39 |
+
results["details"].append("No face detected.")
|
| 40 |
+
return results
|
| 41 |
+
|
| 42 |
+
if len(faces) > 1:
|
| 43 |
+
results["details"].append("Multiple faces detected.")
|
| 44 |
+
return results
|
| 45 |
+
|
| 46 |
+
results["passed_criteria"].append("Single face visible")
|
| 47 |
+
|
| 48 |
+
# Get Face ROI
|
| 49 |
+
(x, y, fw, fh) = faces[0]
|
| 50 |
+
face_roi_gray = gray[y:y+fh, x:x+fw]
|
| 51 |
+
|
| 52 |
+
face_ratio = (fw * fh) / (w * h)
|
| 53 |
+
if face_ratio < 0.05:
|
| 54 |
+
results["details"].append("Face too small relative to image.")
|
| 55 |
+
|
| 56 |
+
# 2. Alignment (Eyes)
|
| 57 |
+
eyes = self.eye_cascade.detectMultiScale(face_roi_gray)
|
| 58 |
+
|
| 59 |
+
if len(eyes) >= 2:
|
| 60 |
+
# Sort by x position to get left and right eye
|
| 61 |
+
eyes = sorted(eyes, key=lambda e: e[0])
|
| 62 |
+
(ex1, ey1, ew1, eh1) = eyes[0]
|
| 63 |
+
(ex2, ey2, ew2, eh2) = eyes[-1] # Farthest right
|
| 64 |
+
|
| 65 |
+
# Check Angle (Roll)
|
| 66 |
+
dy = (ey2 + eh2/2) - (ey1 + eh1/2)
|
| 67 |
+
dx = (ex2 + ew2/2) - (ex1 + ew1/2)
|
| 68 |
+
angle = np.degrees(np.arctan2(dy, dx))
|
| 69 |
+
|
| 70 |
+
if abs(angle) > 10:
|
| 71 |
+
results["details"].append(f"Face tilted (Angle: {angle:.1f}°).")
|
| 72 |
+
else:
|
| 73 |
+
results["passed_criteria"].append("Face vertically aligned")
|
| 74 |
+
|
| 75 |
+
# Check Centering (Yaw/Translation)
|
| 76 |
+
face_center_x = x + fw/2
|
| 77 |
+
img_center_x = w/2
|
| 78 |
+
deviation = abs(face_center_x - img_center_x)
|
| 79 |
+
|
| 80 |
+
if deviation > w * 0.15:
|
| 81 |
+
results["details"].append("Face not centered.")
|
| 82 |
+
else:
|
| 83 |
+
results["passed_criteria"].append("Face centered")
|
| 84 |
+
else:
|
| 85 |
+
# Can't see both eyes -> Maybe side profile or hair?
|
| 86 |
+
# For High Risk ID, we NEED eyes visible.
|
| 87 |
+
results["details"].append("Eyes not clearly visible or aligned.")
|
| 88 |
+
|
| 89 |
+
# 3. Image Quality (Blur)
|
| 90 |
+
laplacian_var = cv2.Laplacian(gray, cv2.CV_64F).var()
|
| 91 |
+
if laplacian_var < 50:
|
| 92 |
+
results["details"].append("Image too blurry.")
|
| 93 |
+
else:
|
| 94 |
+
results["passed_criteria"].append("High resolution/sharp")
|
| 95 |
+
|
| 96 |
+
# 4. Lighting & Background
|
| 97 |
+
margin = int(min(w, h) * 0.1)
|
| 98 |
+
if margin < 1: margin = 1
|
| 99 |
+
|
| 100 |
+
# Check top corners for background uniformity
|
| 101 |
+
top_strip = gray[0:margin, :]
|
| 102 |
+
bg_var = np.var(top_strip)
|
| 103 |
+
bg_mean = np.mean(top_strip)
|
| 104 |
+
|
| 105 |
+
if bg_var > 2500: # Relaxed threshold for "real world" plain walls
|
| 106 |
+
results["details"].append("Background not plain/uniform.")
|
| 107 |
+
elif bg_mean < 80: # Too dark
|
| 108 |
+
results["details"].append("Background too dark.")
|
| 109 |
+
else:
|
| 110 |
+
results["passed_criteria"].append("Plain light-colored background")
|
| 111 |
+
|
| 112 |
+
# 5. Expression (Heuristic)
|
| 113 |
+
# Without landmarks, checking "neutral" is hard.
|
| 114 |
+
# But we can skip strict Smile check as user requested leniency.
|
| 115 |
+
# We assume if it passes eye alignment and is frontal, it's risky enough.
|
| 116 |
+
results["passed_criteria"].append("Expression check skipped (Lenient)")
|
| 117 |
+
|
| 118 |
+
# Final Scoring
|
| 119 |
+
# High Risk if NO details (failures).
|
| 120 |
+
|
| 121 |
+
if len(results["details"]) == 0:
|
| 122 |
+
results["is_high_risk"] = True
|
| 123 |
+
results["risk_score"] = 0.95
|
| 124 |
+
else:
|
| 125 |
+
# Special Case: If only Background failed? No, ID photo needs plain BG.
|
| 126 |
+
results["is_high_risk"] = False
|
| 127 |
+
|
| 128 |
+
return results
|
inspect_dire.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import AutoModelForImageClassification
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
model_name = 'yevvonlim/DistilDIRE'
|
| 6 |
+
try:
|
| 7 |
+
print(f"Attempting to load {model_name}...")
|
| 8 |
+
model = AutoModelForImageClassification.from_pretrained(model_name)
|
| 9 |
+
print("Success!")
|
| 10 |
+
print("Labels:", model.config.id2label)
|
| 11 |
+
except Exception as e:
|
| 12 |
+
print(f"Error loading {model_name}: {e}")
|
| 13 |
+
|
| 14 |
+
# Check Organika as backup
|
| 15 |
+
model_name_2 = 'Organika/SDXL-Detector'
|
| 16 |
+
try:
|
| 17 |
+
print(f"Attempting to load {model_name_2}...")
|
| 18 |
+
model2 = AutoModelForImageClassification.from_pretrained(model_name_2)
|
| 19 |
+
print("Success Organika!")
|
| 20 |
+
print("Labels:", model2.config.id2label)
|
| 21 |
+
except Exception as e:
|
| 22 |
+
print(f"Error loading {model_name_2}: {e}")
|
render.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
services:
|
| 2 |
+
- type: web
|
| 3 |
+
name: coin-toss
|
| 4 |
+
env: python
|
| 5 |
+
buildCommand: pip install -r requirements.txt
|
| 6 |
+
startCommand: gunicorn app:app
|
| 7 |
+
envVars:
|
| 8 |
+
- key: PYTHON_VERSION
|
| 9 |
+
value: 3.10.0
|
requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
transformers
|
| 3 |
+
pillow
|
| 4 |
+
pandas
|
| 5 |
+
|
| 6 |
+
scikit-learn
|
| 7 |
+
accelerate
|
| 8 |
+
datasets
|
| 9 |
+
evaluate
|
| 10 |
+
|
| 11 |
+
opencv-python-headless
|
| 12 |
+
numpy
|
| 13 |
+
gunicorn
|
static/bg-animation.js
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Background Particle Animation
|
| 3 |
+
* Creates a "living" grid of particles that reacts to mouse movement.
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
const canvas = document.getElementById('bg-canvas');
|
| 7 |
+
const ctx = canvas.getContext('2d');
|
| 8 |
+
|
| 9 |
+
let particlesArray;
|
| 10 |
+
|
| 11 |
+
// Mouse Interactions
|
| 12 |
+
let mouse = {
|
| 13 |
+
x: null,
|
| 14 |
+
y: null,
|
| 15 |
+
radius: 150 // Interaction radius
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
window.addEventListener('mousemove', function (event) {
|
| 19 |
+
mouse.x = event.x;
|
| 20 |
+
mouse.y = event.y;
|
| 21 |
+
});
|
| 22 |
+
|
| 23 |
+
// Particle Class
|
| 24 |
+
class Particle {
|
| 25 |
+
constructor(x, y, loadingSpeed, size, color) {
|
| 26 |
+
this.x = x;
|
| 27 |
+
this.y = y;
|
| 28 |
+
this.baseX = x; // Remember original position
|
| 29 |
+
this.baseY = y;
|
| 30 |
+
this.size = size;
|
| 31 |
+
this.color = color;
|
| 32 |
+
this.density = (Math.random() * 30) + 1; // How heavy/slow it moves
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
draw() {
|
| 36 |
+
ctx.beginPath();
|
| 37 |
+
ctx.arc(this.x, this.y, this.size, 0, Math.PI * 2, false);
|
| 38 |
+
ctx.fillStyle = this.color;
|
| 39 |
+
ctx.fill();
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
update() {
|
| 43 |
+
// Mouse Interaction Physics
|
| 44 |
+
let dx = mouse.x - this.x;
|
| 45 |
+
let dy = mouse.y - this.y;
|
| 46 |
+
let distance = Math.sqrt(dx * dx + dy * dy);
|
| 47 |
+
let forceDirectionX = dx / distance;
|
| 48 |
+
let forceDirectionY = dy / distance;
|
| 49 |
+
let maxDistance = mouse.radius;
|
| 50 |
+
let force = (maxDistance - distance) / maxDistance;
|
| 51 |
+
let directionX = forceDirectionX * force * this.density;
|
| 52 |
+
let directionY = forceDirectionY * force * this.density;
|
| 53 |
+
|
| 54 |
+
if (distance < mouse.radius) {
|
| 55 |
+
// Move away from mouse (Repulsion) - feels "magnetic"
|
| 56 |
+
this.x -= directionX;
|
| 57 |
+
this.y -= directionY;
|
| 58 |
+
// Option: To make it attract instead, change -= to +=
|
| 59 |
+
} else {
|
| 60 |
+
// Return to original position (Elasticity)
|
| 61 |
+
if (this.x !== this.baseX) {
|
| 62 |
+
let dx = this.x - this.baseX;
|
| 63 |
+
this.x -= dx / 10; // Speed of return
|
| 64 |
+
}
|
| 65 |
+
if (this.y !== this.baseY) {
|
| 66 |
+
let dy = this.y - this.baseY;
|
| 67 |
+
this.y -= dy / 10;
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
function init() {
|
| 74 |
+
particlesArray = [];
|
| 75 |
+
// Create a grid of particles
|
| 76 |
+
const numberOfParticles = (canvas.width * canvas.height) / 9000; // Density control
|
| 77 |
+
|
| 78 |
+
// Random distribution (Starfield style)
|
| 79 |
+
for (let i = 0; i < numberOfParticles * 2; i++) {
|
| 80 |
+
let size = (Math.random() * 2) + 0.5; // Random size
|
| 81 |
+
let x = Math.random() * innerWidth;
|
| 82 |
+
let y = Math.random() * innerHeight;
|
| 83 |
+
let color = '#00DC82'; // Brand Green
|
| 84 |
+
|
| 85 |
+
particlesArray.push(new Particle(x, y, 1, size, color));
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
function animate() {
|
| 90 |
+
requestAnimationFrame(animate);
|
| 91 |
+
ctx.clearRect(0, 0, innerWidth, innerHeight);
|
| 92 |
+
|
| 93 |
+
for (let i = 0; i < particlesArray.length; i++) {
|
| 94 |
+
particlesArray[i].draw();
|
| 95 |
+
particlesArray[i].update();
|
| 96 |
+
}
|
| 97 |
+
// Connect particles with lines (Constellation effect) - Optional, can be heavy
|
| 98 |
+
// connect();
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
// Handle Resize
|
| 102 |
+
window.addEventListener('resize', function () {
|
| 103 |
+
canvas.width = innerWidth;
|
| 104 |
+
canvas.height = innerHeight;
|
| 105 |
+
mouse.radius = ((canvas.height / 80) * (canvas.height / 80));
|
| 106 |
+
init();
|
| 107 |
+
});
|
| 108 |
+
|
| 109 |
+
// Start
|
| 110 |
+
canvas.width = innerWidth;
|
| 111 |
+
canvas.height = innerHeight;
|
| 112 |
+
init();
|
| 113 |
+
animate();
|
static/script.js
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 2 |
+
const dropZone = document.getElementById('drop-zone');
|
| 3 |
+
const fileInput = document.getElementById('file-input');
|
| 4 |
+
const previewContainer = document.getElementById('preview-container');
|
| 5 |
+
const imagePreview = document.getElementById('image-preview');
|
| 6 |
+
const removeBtn = document.getElementById('remove-btn');
|
| 7 |
+
const analyzeBtn = document.getElementById('analyze-btn');
|
| 8 |
+
const resultContainer = document.getElementById('result-container');
|
| 9 |
+
const resultBadge = document.getElementById('result-badge');
|
| 10 |
+
|
| 11 |
+
const tabDetector = document.getElementById('tab-detector');
|
| 12 |
+
const tabRisk = document.getElementById('tab-risk');
|
| 13 |
+
let currentMode = 'detector'; // 'detector' or 'risk'
|
| 14 |
+
|
| 15 |
+
const modelLabel = document.getElementById('model-label');
|
| 16 |
+
const btnText = analyzeBtn.querySelector('.btn-text');
|
| 17 |
+
const loader = analyzeBtn.querySelector('.loader');
|
| 18 |
+
|
| 19 |
+
let currentFile = null;
|
| 20 |
+
|
| 21 |
+
// --- Tab Switching ---
|
| 22 |
+
function switchTab(mode) {
|
| 23 |
+
currentMode = mode;
|
| 24 |
+
resultContainer.classList.add('hidden'); // Hide previous results
|
| 25 |
+
|
| 26 |
+
if (mode === 'detector') {
|
| 27 |
+
tabDetector.className = 'px-6 py-2 rounded-full font-medium transition-all duration-300 bg-white text-black shadow-[0_0_20px_-5px_rgba(255,255,255,0.5)]';
|
| 28 |
+
tabRisk.className = 'px-6 py-2 rounded-full font-medium transition-all duration-300 bg-white/5 text-brand-gray hover:bg-white/10 hover:text-white border border-white/10';
|
| 29 |
+
btnText.textContent = 'Analyze Authenticity';
|
| 30 |
+
} else {
|
| 31 |
+
tabRisk.className = 'px-6 py-2 rounded-full font-medium transition-all duration-300 bg-brand-green text-black shadow-[0_0_20px_-5px_rgba(0,220,130,0.5)]';
|
| 32 |
+
tabDetector.className = 'px-6 py-2 rounded-full font-medium transition-all duration-300 bg-white/5 text-brand-gray hover:bg-white/10 hover:text-white border border-white/10';
|
| 33 |
+
btnText.textContent = 'Check Identity Risk';
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
tabDetector.addEventListener('click', () => switchTab('detector'));
|
| 38 |
+
tabRisk.addEventListener('click', () => switchTab('risk'));
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
// --- Drag & Drop ---
|
| 42 |
+
dropZone.addEventListener('click', () => fileInput.click());
|
| 43 |
+
|
| 44 |
+
dropZone.addEventListener('dragover', (e) => {
|
| 45 |
+
e.preventDefault();
|
| 46 |
+
dropZone.classList.add('dragover');
|
| 47 |
+
});
|
| 48 |
+
|
| 49 |
+
dropZone.addEventListener('dragleave', () => {
|
| 50 |
+
dropZone.classList.remove('dragover');
|
| 51 |
+
});
|
| 52 |
+
|
| 53 |
+
dropZone.addEventListener('drop', (e) => {
|
| 54 |
+
e.preventDefault();
|
| 55 |
+
dropZone.classList.remove('dragover');
|
| 56 |
+
if (e.dataTransfer.files.length) {
|
| 57 |
+
handleFile(e.dataTransfer.files[0]);
|
| 58 |
+
}
|
| 59 |
+
});
|
| 60 |
+
|
| 61 |
+
fileInput.addEventListener('change', (e) => {
|
| 62 |
+
if (e.target.files.length) {
|
| 63 |
+
handleFile(e.target.files[0]);
|
| 64 |
+
}
|
| 65 |
+
});
|
| 66 |
+
|
| 67 |
+
// --- File Handling ---
|
| 68 |
+
function handleFile(file) {
|
| 69 |
+
if (!file.type.startsWith('image/')) {
|
| 70 |
+
alert('Please upload an image file (JPG, PNG).');
|
| 71 |
+
return;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
currentFile = file;
|
| 75 |
+
const reader = new FileReader();
|
| 76 |
+
reader.onload = (e) => {
|
| 77 |
+
imagePreview.src = e.target.result;
|
| 78 |
+
dropZone.classList.add('hidden');
|
| 79 |
+
previewContainer.classList.remove('hidden');
|
| 80 |
+
analyzeBtn.disabled = false;
|
| 81 |
+
resultContainer.classList.add('hidden');
|
| 82 |
+
};
|
| 83 |
+
reader.readAsDataURL(file);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
// --- Reset ---
|
| 87 |
+
removeBtn.addEventListener('click', () => {
|
| 88 |
+
currentFile = null;
|
| 89 |
+
fileInput.value = '';
|
| 90 |
+
dropZone.classList.remove('hidden');
|
| 91 |
+
previewContainer.classList.add('hidden');
|
| 92 |
+
analyzeBtn.disabled = true;
|
| 93 |
+
resultContainer.classList.add('hidden');
|
| 94 |
+
});
|
| 95 |
+
|
| 96 |
+
// --- Prediction ---
|
| 97 |
+
analyzeBtn.addEventListener('click', async () => {
|
| 98 |
+
if (!currentFile) return;
|
| 99 |
+
|
| 100 |
+
// UI Loading State
|
| 101 |
+
analyzeBtn.disabled = true;
|
| 102 |
+
btnText.textContent = currentMode === 'detector' ? 'Analysing...' : 'Checking Risk...';
|
| 103 |
+
loader.classList.remove('hidden');
|
| 104 |
+
resultContainer.classList.add('hidden');
|
| 105 |
+
|
| 106 |
+
const formData = new FormData();
|
| 107 |
+
formData.append('image', currentFile);
|
| 108 |
+
|
| 109 |
+
try {
|
| 110 |
+
const response = await fetch('/predict', {
|
| 111 |
+
method: 'POST',
|
| 112 |
+
body: formData
|
| 113 |
+
});
|
| 114 |
+
|
| 115 |
+
if (!response.ok) throw new Error('API Error');
|
| 116 |
+
|
| 117 |
+
const data = await response.json();
|
| 118 |
+
displayResult(data);
|
| 119 |
+
|
| 120 |
+
} catch (error) {
|
| 121 |
+
console.error(error);
|
| 122 |
+
alert('Error processing image. Please try again.');
|
| 123 |
+
} finally {
|
| 124 |
+
// UI Reset State
|
| 125 |
+
analyzeBtn.disabled = false;
|
| 126 |
+
btnText.textContent = currentMode === 'detector' ? 'Analyze Authenticity' : 'Check Identity Risk';
|
| 127 |
+
loader.classList.add('hidden');
|
| 128 |
+
}
|
| 129 |
+
});
|
| 130 |
+
|
| 131 |
+
function displayResult(data) {
|
| 132 |
+
resultContainer.classList.remove('hidden');
|
| 133 |
+
const badgeContainer = document.getElementById('result-badge-container');
|
| 134 |
+
|
| 135 |
+
// Remove existing detailed warnings/lists
|
| 136 |
+
const existingWarning = document.getElementById('risk-warning');
|
| 137 |
+
if (existingWarning) existingWarning.remove();
|
| 138 |
+
const existingList = document.getElementById('risk-details');
|
| 139 |
+
if (existingList) existingList.remove();
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
const confidenceWrapper = document.getElementById('confidence-wrapper');
|
| 143 |
+
const explanationsSection = document.getElementById('explanations-section');
|
| 144 |
+
const qualificationTag = document.getElementById('qualification-tag'); // [NEW]
|
| 145 |
+
|
| 146 |
+
if (currentMode === 'detector') {
|
| 147 |
+
// --- AI Detector Mode ---
|
| 148 |
+
if (confidenceWrapper) confidenceWrapper.classList.remove('hidden'); // Show in Detector Mode
|
| 149 |
+
|
| 150 |
+
modelLabel.textContent = 'AI Detection Result';
|
| 151 |
+
const label = data.prediction.toLowerCase();
|
| 152 |
+
resultBadge.textContent = label.toUpperCase();
|
| 153 |
+
|
| 154 |
+
// [NEW] Qualification Tag Logic
|
| 155 |
+
if (qualificationTag) {
|
| 156 |
+
if (data.classification_tag) {
|
| 157 |
+
qualificationTag.classList.remove('hidden');
|
| 158 |
+
qualificationTag.textContent = data.classification_tag;
|
| 159 |
+
|
| 160 |
+
// Dynamic Styling based on content
|
| 161 |
+
qualificationTag.className = "px-3 py-1 rounded-full text-[10px] font-bold uppercase tracking-wider border transition-colors duration-300";
|
| 162 |
+
|
| 163 |
+
if (data.classification_tag.includes("AI") || data.classification_tag.includes("Manipulation")) {
|
| 164 |
+
qualificationTag.classList.add("bg-red-500/10", "border-red-500/20", "text-red-200");
|
| 165 |
+
} else if (data.classification_tag.includes("Raw")) {
|
| 166 |
+
qualificationTag.classList.add("bg-brand-green/10", "border-brand-green/20", "text-brand-green");
|
| 167 |
+
} else {
|
| 168 |
+
qualificationTag.classList.add("bg-yellow-500/10", "border-yellow-500/20", "text-yellow-200");
|
| 169 |
+
}
|
| 170 |
+
} else {
|
| 171 |
+
qualificationTag.classList.add('hidden');
|
| 172 |
+
}
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
// Confidence Update
|
| 176 |
+
const confidenceBar = document.getElementById('confidence-bar');
|
| 177 |
+
const confidenceValue = document.getElementById('confidence-value');
|
| 178 |
+
const percent = Math.round(data.confidence * 100);
|
| 179 |
+
|
| 180 |
+
// Animate Bar
|
| 181 |
+
setTimeout(() => {
|
| 182 |
+
confidenceBar.style.width = `${percent}%`;
|
| 183 |
+
}, 100);
|
| 184 |
+
confidenceValue.textContent = `${percent}%`;
|
| 185 |
+
|
| 186 |
+
resultBadge.className = 'text-3xl font-bold tracking-tight transition-colors duration-300';
|
| 187 |
+
confidenceBar.className = 'h-full w-0 rounded-full transition-all duration-1000 ease-out'; // Reset base
|
| 188 |
+
|
| 189 |
+
if (label.includes('real')) {
|
| 190 |
+
resultBadge.classList.add('text-brand-green', 'drop-shadow-[0_0_15px_rgba(0,220,130,0.5)]');
|
| 191 |
+
confidenceBar.classList.add('bg-brand-green', 'shadow-[0_0_10px_rgba(0,220,130,0.5)]');
|
| 192 |
+
} else if (label.includes('fake') || label.includes('ai')) {
|
| 193 |
+
resultBadge.classList.add('text-red-500', 'drop-shadow-[0_0_15px_rgba(255,50,50,0.5)]');
|
| 194 |
+
confidenceBar.classList.add('bg-red-500', 'shadow-[0_0_10px_rgba(255,50,50,0.5)]');
|
| 195 |
+
} else {
|
| 196 |
+
resultBadge.classList.add('text-brand-gray');
|
| 197 |
+
confidenceBar.classList.add('bg-gray-500');
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
// [NEW] Analysis Points Logic
|
| 201 |
+
const explanationsSection = document.getElementById('explanations-section');
|
| 202 |
+
const reasonsList = document.getElementById('reasons-list');
|
| 203 |
+
|
| 204 |
+
reasonsList.innerHTML = ''; // Clear previous
|
| 205 |
+
|
| 206 |
+
if (data.analysis_points && data.analysis_points.length > 0) {
|
| 207 |
+
explanationsSection.classList.remove('hidden');
|
| 208 |
+
data.analysis_points.forEach(point => {
|
| 209 |
+
const li = document.createElement('li');
|
| 210 |
+
li.className = 'text-gray-300 text-sm flex items-start gap-2.5 leading-relaxed';
|
| 211 |
+
li.innerHTML = `
|
| 212 |
+
<i class="fa-solid fa-circle-check text-brand-green text-[10px] mt-1.5 opacity-80"></i>
|
| 213 |
+
<span class="opacity-90">${point}</span>
|
| 214 |
+
`;
|
| 215 |
+
// Change icon for AI points
|
| 216 |
+
if (label.includes('ai')) {
|
| 217 |
+
li.innerHTML = `
|
| 218 |
+
<i class="fa-solid fa-circle-exclamation text-red-500 text-[10px] mt-1.5 opacity-80"></i>
|
| 219 |
+
<span class="opacity-90">${point}</span>
|
| 220 |
+
`;
|
| 221 |
+
}
|
| 222 |
+
reasonsList.appendChild(li);
|
| 223 |
+
});
|
| 224 |
+
} else {
|
| 225 |
+
explanationsSection.classList.add('hidden');
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
// Old Warning for "Real" images in Detection Mode (optional, keeping minimal as per request)
|
| 229 |
+
if (data.risk_analysis && data.risk_analysis.is_high_risk && label.includes('real')) {
|
| 230 |
+
const warningDiv = document.createElement('div');
|
| 231 |
+
warningDiv.id = 'risk-warning';
|
| 232 |
+
warningDiv.className = 'mt-4 p-3 rounded-xl bg-orange-500/10 border border-orange-500/20 backdrop-blur-sm animate-fade-in-up';
|
| 233 |
+
warningDiv.innerHTML = `
|
| 234 |
+
<div class="flex items-center gap-3">
|
| 235 |
+
<i class="fa-solid fa-triangle-exclamation text-orange-500"></i>
|
| 236 |
+
<p class="text-orange-200/80 text-xs">
|
| 237 |
+
Identity Risk Detected. Switch to the <b>Identity Risk</b> tab for details.
|
| 238 |
+
</p>
|
| 239 |
+
</div>
|
| 240 |
+
`;
|
| 241 |
+
const resultCard = resultContainer.querySelector('div');
|
| 242 |
+
resultCard.appendChild(warningDiv);
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
} else {
|
| 246 |
+
// --- Identity Risk Mode ---
|
| 247 |
+
modelLabel.textContent = 'Identity Theft Risk Analysis';
|
| 248 |
+
|
| 249 |
+
// Hide Detector-specific elements
|
| 250 |
+
if (confidenceWrapper) confidenceWrapper.classList.add('hidden');
|
| 251 |
+
if (explanationsSection) explanationsSection.classList.add('hidden');
|
| 252 |
+
|
| 253 |
+
// Check if analysis actually ran
|
| 254 |
+
if (data.prediction !== "Real") {
|
| 255 |
+
// It's AI, so no risk analysis
|
| 256 |
+
resultBadge.textContent = "N/A (AI Detected)";
|
| 257 |
+
resultBadge.className = 'text-xl font-semibold text-brand-gray';
|
| 258 |
+
|
| 259 |
+
const infoDiv = document.createElement('div');
|
| 260 |
+
infoDiv.id = 'risk-details';
|
| 261 |
+
infoDiv.className = 'mt-4 text-brand-gray/60 text-sm text-center';
|
| 262 |
+
infoDiv.textContent = "Identity risk analysis is only performed on real images.";
|
| 263 |
+
|
| 264 |
+
const resultCard = resultContainer.querySelector('div');
|
| 265 |
+
resultCard.appendChild(infoDiv);
|
| 266 |
+
return;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
if (!data.risk_analysis || data.risk_analysis.error) {
|
| 270 |
+
resultBadge.textContent = "Error";
|
| 271 |
+
return;
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
const isHighRisk = data.risk_analysis.is_high_risk;
|
| 275 |
+
resultBadge.textContent = isHighRisk ? "HIGH RISK" : "LOW RISK";
|
| 276 |
+
|
| 277 |
+
resultBadge.className = 'text-3xl font-bold tracking-tight transition-colors duration-300';
|
| 278 |
+
if (isHighRisk) {
|
| 279 |
+
resultBadge.classList.add('text-red-500', 'drop-shadow-[0_0_15px_rgba(255,50,50,0.5)]');
|
| 280 |
+
} else {
|
| 281 |
+
resultBadge.classList.add('text-brand-green', 'drop-shadow-[0_0_15px_rgba(0,220,130,0.5)]');
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
// Create Detailed List
|
| 285 |
+
const detailsDiv = document.createElement('div');
|
| 286 |
+
detailsDiv.id = 'risk-details';
|
| 287 |
+
detailsDiv.className = 'mt-6 pt-4 border-t border-white/10 animate-fade-in-up';
|
| 288 |
+
|
| 289 |
+
let html = '<div class="space-y-3">';
|
| 290 |
+
|
| 291 |
+
// Passed Criteria (Green Check) - Logic: "Passed" means it matched the ID criteria (so it IS risky)
|
| 292 |
+
// Wait, my passed_criteria list means "Matches ID standard".
|
| 293 |
+
// So if it's in passed_criteria, it contributes to High Risk.
|
| 294 |
+
|
| 295 |
+
const passed = data.risk_analysis.passed_criteria || [];
|
| 296 |
+
const failed = data.risk_analysis.details || []; // Reasons why it FAILED to be an ID photo (so good for safety)
|
| 297 |
+
|
| 298 |
+
// Show "Risk Factors" (The things that make it ID-like)
|
| 299 |
+
if (passed.length > 0) {
|
| 300 |
+
html += '<h5 class="text-xs font-semibold text-brand-gray uppercase mb-2">Risk Factors (ID Compliant)</h5>';
|
| 301 |
+
passed.forEach(item => {
|
| 302 |
+
html += `
|
| 303 |
+
<div class="flex items-center gap-2 text-sm text-red-200/80">
|
| 304 |
+
<i class="fa-solid fa-check text-red-500"></i>
|
| 305 |
+
<span>${item}</span>
|
| 306 |
+
</div>`;
|
| 307 |
+
});
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
// Show "Safety Factors" (The things that make it SAFE / Not ID-like)
|
| 311 |
+
if (failed.length > 0) {
|
| 312 |
+
html += '<h5 class="text-xs font-semibold text-brand-gray uppercase mt-4 mb-2">Safety Factors (Non-ID)</h5>';
|
| 313 |
+
failed.forEach(item => {
|
| 314 |
+
html += `
|
| 315 |
+
<div class="flex items-center gap-2 text-sm text-brand-green/80">
|
| 316 |
+
<i class="fa-solid fa-shield text-brand-green"></i>
|
| 317 |
+
<span>${item}</span>
|
| 318 |
+
</div>`;
|
| 319 |
+
});
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
html += '</div>';
|
| 323 |
+
detailsDiv.innerHTML = html;
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
const resultCard = resultContainer.querySelector('div');
|
| 328 |
+
resultCard.appendChild(detailsDiv);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
// --- Frequency Map Logic (Shared) ---
|
| 332 |
+
// This runs after the mode-specific logic
|
| 333 |
+
|
| 334 |
+
// --- Advanced Analysis Logic (Tabs + Maps) ---
|
| 335 |
+
const advancedToggleContainer = document.getElementById('advanced-toggle-container');
|
| 336 |
+
const advancedSection = document.getElementById('advanced-analysis-section');
|
| 337 |
+
const advancedBtn = document.getElementById('advanced-btn');
|
| 338 |
+
|
| 339 |
+
const frequencyMapImg = document.getElementById('frequency-map-img');
|
| 340 |
+
const patternMapImg = document.getElementById('pattern-map-img');
|
| 341 |
+
|
| 342 |
+
const tabFreq = document.getElementById('tab-freq');
|
| 343 |
+
const tabPattern = document.getElementById('tab-pattern');
|
| 344 |
+
const panelFreq = document.getElementById('panel-freq');
|
| 345 |
+
const panelPattern = document.getElementById('panel-pattern');
|
| 346 |
+
|
| 347 |
+
if (advancedToggleContainer && advancedSection) {
|
| 348 |
+
// 1. Reset State
|
| 349 |
+
advancedToggleContainer.classList.add('hidden');
|
| 350 |
+
advancedSection.classList.add('hidden');
|
| 351 |
+
advancedBtn.innerHTML = '<i class="fa-solid fa-chart-simple mr-2"></i>View Advanced Frequency Analysis';
|
| 352 |
+
|
| 353 |
+
// 2. Check if we have data to show
|
| 354 |
+
if (currentMode === 'detector' && (data.frequency_analysis || data.pattern_analysis)) {
|
| 355 |
+
advancedToggleContainer.classList.remove('hidden');
|
| 356 |
+
|
| 357 |
+
// Populate Images
|
| 358 |
+
if (data.frequency_analysis && frequencyMapImg) frequencyMapImg.src = 'data:image/png;base64,' + data.frequency_analysis;
|
| 359 |
+
if (data.pattern_analysis && patternMapImg) patternMapImg.src = 'data:image/png;base64,' + data.pattern_analysis;
|
| 360 |
+
|
| 361 |
+
// 3. Toggle Section Visibility
|
| 362 |
+
const newBtn = advancedBtn.cloneNode(true);
|
| 363 |
+
advancedBtn.parentNode.replaceChild(newBtn, advancedBtn);
|
| 364 |
+
|
| 365 |
+
newBtn.addEventListener('click', () => {
|
| 366 |
+
const isHidden = advancedSection.classList.contains('hidden');
|
| 367 |
+
if (isHidden) {
|
| 368 |
+
advancedSection.classList.remove('hidden');
|
| 369 |
+
newBtn.innerHTML = '<i class="fa-solid fa-chevron-up mr-2"></i>Hide Advanced Analysis';
|
| 370 |
+
} else {
|
| 371 |
+
advancedSection.classList.add('hidden');
|
| 372 |
+
newBtn.innerHTML = '<i class="fa-solid fa-chart-simple mr-2"></i>View Advanced Frequency Analysis';
|
| 373 |
+
}
|
| 374 |
+
});
|
| 375 |
+
|
| 376 |
+
// 4. Tab Switching Logic
|
| 377 |
+
// Update References & remove old listeners
|
| 378 |
+
const newTabFreq = tabFreq.cloneNode(true);
|
| 379 |
+
if (tabFreq) tabFreq.parentNode.replaceChild(newTabFreq, tabFreq);
|
| 380 |
+
|
| 381 |
+
const newTabPattern = tabPattern.cloneNode(true);
|
| 382 |
+
if (tabPattern) tabPattern.parentNode.replaceChild(newTabPattern, tabPattern);
|
| 383 |
+
|
| 384 |
+
const setActive = (activeTab, activePanel) => {
|
| 385 |
+
// Reset all
|
| 386 |
+
[newTabFreq, newTabPattern].forEach(t => {
|
| 387 |
+
if (t) t.className = "relative px-4 py-2 text-xs font-medium text-brand-gray bg-black/40 rounded-t-lg transition-colors border-t border-x border-white/5 hover:bg-white/5 hover:text-white cursor-pointer";
|
| 388 |
+
});
|
| 389 |
+
if (panelFreq) panelFreq.classList.add('hidden');
|
| 390 |
+
if (panelPattern) panelPattern.classList.add('hidden');
|
| 391 |
+
|
| 392 |
+
// Set Active
|
| 393 |
+
if (activeTab) activeTab.className = "relative px-4 py-2 text-xs font-medium text-white bg-white/10 rounded-t-lg transition-colors border-t border-x border-white/10 hover:bg-white/20 z-10 cursor-default";
|
| 394 |
+
if (activePanel) activePanel.classList.remove('hidden');
|
| 395 |
+
};
|
| 396 |
+
|
| 397 |
+
if (newTabFreq) newTabFreq.addEventListener('click', () => setActive(newTabFreq, panelFreq));
|
| 398 |
+
if (newTabPattern) newTabPattern.addEventListener('click', () => setActive(newTabPattern, panelPattern));
|
| 399 |
+
|
| 400 |
+
// Default to Freq
|
| 401 |
+
setActive(newTabFreq, panelFreq);
|
| 402 |
+
}
|
| 403 |
+
}
|
| 404 |
+
}
|
| 405 |
+
});
|
static/style.css
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* All styles are now handled by Tailwind CSS in index.html */
|
| 2 |
+
/* This file is kept to prevent 404s if referenced, but is intentionally empty. */
|
templates/index.html
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<title>V-VORTEX | AI Detection</title>
|
| 8 |
+
|
| 9 |
+
<!-- Google Fonts -->
|
| 10 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 11 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 12 |
+
<link href="https://fonts.googleapis.com/css2?family=Outfit:wght@300;400;500;600;700&display=swap" rel="stylesheet">
|
| 13 |
+
|
| 14 |
+
<!-- FontAwesome -->
|
| 15 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
|
| 16 |
+
|
| 17 |
+
<!-- Tailwind CSS -->
|
| 18 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
| 19 |
+
<script>
|
| 20 |
+
tailwind.config = {
|
| 21 |
+
theme: {
|
| 22 |
+
extend: {
|
| 23 |
+
fontFamily: {
|
| 24 |
+
sans: ['Outfit', 'sans-serif'],
|
| 25 |
+
},
|
| 26 |
+
colors: {
|
| 27 |
+
brand: {
|
| 28 |
+
dark: '#02040a',
|
| 29 |
+
green: '#00DC82',
|
| 30 |
+
gray: '#8899ac'
|
| 31 |
+
}
|
| 32 |
+
},
|
| 33 |
+
backgroundImage: {
|
| 34 |
+
'gradient-radial': 'radial-gradient(var(--tw-gradient-stops))',
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
</script>
|
| 40 |
+
|
| 41 |
+
<!-- Custom CSS (Only for what Tailwind can't easily do) -->
|
| 42 |
+
<link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
|
| 43 |
+
</head>
|
| 44 |
+
|
| 45 |
+
<body
|
| 46 |
+
class="bg-brand-dark text-white min-h-screen flex flex-col font-sans overflow-x-hidden selection:bg-brand-green selection:text-brand-dark relative">
|
| 47 |
+
|
| 48 |
+
<!-- Ambient Background Glows -->
|
| 49 |
+
<div class="fixed top-0 left-0 w-full h-full overflow-hidden pointer-events-none z-0">
|
| 50 |
+
<canvas id="bg-canvas" class="absolute top-0 left-0 w-full h-full opacity-40"></canvas>
|
| 51 |
+
<div class="absolute top-[-20%] left-[-10%] w-[800px] h-[800px] bg-brand-green/5 rounded-full blur-[120px]">
|
| 52 |
+
</div>
|
| 53 |
+
<div class="absolute bottom-[-10%] right-[-5%] w-[600px] h-[600px] bg-blue-600/5 rounded-full blur-[100px]">
|
| 54 |
+
</div>
|
| 55 |
+
</div>
|
| 56 |
+
|
| 57 |
+
<!-- Header -->
|
| 58 |
+
<header class="relative z-10 w-full max-w-7xl mx-auto px-6 py-8 flex justify-between items-center">
|
| 59 |
+
<div class="flex items-center gap-2">
|
| 60 |
+
<div class="w-8 h-8 rounded bg-brand-green flex items-center justify-center text-brand-dark font-bold">
|
| 61 |
+
<i class="fa-solid fa-coins"></i>
|
| 62 |
+
</div>
|
| 63 |
+
<span class="font-bold text-lg tracking-wide">COIN TOSS</span>
|
| 64 |
+
</div>
|
| 65 |
+
|
| 66 |
+
<a href="https://github.com/madhavmullick2025/COIN-TOSS" target="_blank"
|
| 67 |
+
class="text-brand-gray hover:text-white transition-colors">
|
| 68 |
+
<i class="fa-brands fa-github text-xl"></i>
|
| 69 |
+
</a>
|
| 70 |
+
</header>
|
| 71 |
+
|
| 72 |
+
<main class="relative z-10 flex-1 flex flex-col items-center justify-center px-4 mb-20">
|
| 73 |
+
|
| 74 |
+
<!-- Hero Section -->
|
| 75 |
+
<div class="text-center mb-12 animate-fade-in-up">
|
| 76 |
+
<div
|
| 77 |
+
class="inline-flex items-center gap-2 px-4 py-1.5 rounded-full bg-white/5 border border-white/10 mb-8 backdrop-blur-sm">
|
| 78 |
+
<i class="fa-solid fa-bolt text-brand-green text-xs"></i>
|
| 79 |
+
<span class="text-xs font-semibold text-brand-green tracking-wider uppercase">Powered by
|
| 80 |
+
UniversalFakeDetect & DIRET</span>
|
| 81 |
+
</div>
|
| 82 |
+
|
| 83 |
+
<div class="relative">
|
| 84 |
+
<h1 class="text-5xl md:text-7xl font-bold mb-4 leading-tight tracking-tight">
|
| 85 |
+
See beyond <br>
|
| 86 |
+
<span class="bg-gradient-to-r from-brand-green to-emerald-600 bg-clip-text text-transparent">the
|
| 87 |
+
pixels.</span>
|
| 88 |
+
</h1>
|
| 89 |
+
</div>
|
| 90 |
+
|
| 91 |
+
<p class="text-brand-gray max-w-xl mx-auto text-lg leading-relaxed">
|
| 92 |
+
Detect AI-generated images with precision. Visualize the artifacts that give them away through advanced
|
| 93 |
+
attention mapping and forensic analysis.
|
| 94 |
+
</p>
|
| 95 |
+
</div>
|
| 96 |
+
|
| 97 |
+
<!-- Tabs -->
|
| 98 |
+
<div class="flex items-center gap-4 mb-8">
|
| 99 |
+
<button id="tab-detector"
|
| 100 |
+
class="px-6 py-2 rounded-full font-medium transition-all duration-300 bg-white text-black shadow-[0_0_20px_-5px_rgba(255,255,255,0.5)]">
|
| 101 |
+
AI Detector
|
| 102 |
+
</button>
|
| 103 |
+
<button id="tab-risk"
|
| 104 |
+
class="px-6 py-2 rounded-full font-medium transition-all duration-300 bg-white/5 text-brand-gray hover:bg-white/10 hover:text-white border border-white/10">
|
| 105 |
+
Identity Risk
|
| 106 |
+
</button>
|
| 107 |
+
</div>
|
| 108 |
+
|
| 109 |
+
<!-- Upload Card -->
|
| 110 |
+
<div class="w-full max-w-[580px] perspective-1000">
|
| 111 |
+
<div
|
| 112 |
+
class="relative group bg-white/[0.03] hover:bg-white/[0.05] border border-white/10 rounded-3xl p-2 transition-all duration-300 backdrop-blur-xl shadow-2xl shadow-black/50">
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
<div id="drop-zone"
|
| 116 |
+
class="relative rounded-2xl border border-dashed border-white/20 bg-black/20 h-[300px] flex flex-col items-center justify-center transition-all duration-300 group-hover:border-brand-green/30 cursor-pointer overflow-hidden">
|
| 117 |
+
|
| 118 |
+
<input type="file" id="file-input" accept="image/*" class="hidden">
|
| 119 |
+
|
| 120 |
+
<div
|
| 121 |
+
class="upload-content text-center p-8 transition-transform duration-300 group-hover:-translate-y-2">
|
| 122 |
+
<div
|
| 123 |
+
class="w-16 h-16 rounded-full bg-white/5 flex items-center justify-center mx-auto mb-6 border border-white/10 group-hover:scale-110 group-hover:border-brand-green/50 transition-all duration-300">
|
| 124 |
+
<i class="fa-solid fa-arrow-up-from-bracket text-brand-green text-xl"></i>
|
| 125 |
+
</div>
|
| 126 |
+
<h3 class="text-white font-semibold text-xl mb-2">Drag & Drop or <span
|
| 127 |
+
class="text-brand-green">Browse</span></h3>
|
| 128 |
+
<p class="text-brand-gray text-sm">Supported formats: JPG, PNG, WEBP</p>
|
| 129 |
+
|
| 130 |
+
<button
|
| 131 |
+
class="mt-6 px-6 py-2.5 rounded-full bg-white/10 hover:bg-white/20 text-sm font-medium transition-all text-white border border-white/5">
|
| 132 |
+
Choose File
|
| 133 |
+
</button>
|
| 134 |
+
</div>
|
| 135 |
+
|
| 136 |
+
<!-- Overlay for drag hover -->
|
| 137 |
+
<div
|
| 138 |
+
class="absolute inset-0 bg-brand-green/10 opacity-0 group-hover:opacity-100 transition-opacity pointer-events-none flex items-center justify-center">
|
| 139 |
+
</div>
|
| 140 |
+
</div>
|
| 141 |
+
|
| 142 |
+
<!-- Preview Container (Initially Hidden) -->
|
| 143 |
+
<div id="preview-container"
|
| 144 |
+
class="hidden relative h-[300px] w-full rounded-2xl overflow-hidden bg-black/40">
|
| 145 |
+
<img id="image-preview" src="" alt="Preview" class="w-full h-full object-contain p-4" />
|
| 146 |
+
<button id="remove-btn"
|
| 147 |
+
class="absolute top-4 right-4 w-8 h-8 rounded-full bg-black/50 hover:bg-red-500/80 text-white flex items-center justify-center backdrop-blur-md transition-colors border border-white/10">
|
| 148 |
+
<i class="fa-solid fa-xmark text-sm"></i>
|
| 149 |
+
</button>
|
| 150 |
+
</div>
|
| 151 |
+
</div>
|
| 152 |
+
|
| 153 |
+
<!-- Analyze Button -->
|
| 154 |
+
<div class="mt-8 flex justify-center">
|
| 155 |
+
<button id="analyze-btn" disabled
|
| 156 |
+
class="group relative px-8 py-4 rounded-full bg-white text-black font-semibold text-lg flex items-center gap-3 transition-all hover:scale-105 disabled:opacity-50 disabled:hover:scale-100 disabled:cursor-not-allowed shadow-[0_0_40px_-10px_rgba(255,255,255,0.3)]">
|
| 157 |
+
<span class="btn-text">Analyze Image</span>
|
| 158 |
+
<i
|
| 159 |
+
class="fa-solid fa-wand-magic-sparkles text-brand-dark group-hover:rotate-12 transition-transform"></i>
|
| 160 |
+
|
| 161 |
+
<!-- Loader -->
|
| 162 |
+
<span
|
| 163 |
+
class="loader hidden w-5 h-5 border-2 border-brand-dark/30 border-t-brand-dark rounded-full animate-spin ml-2"></span>
|
| 164 |
+
</button>
|
| 165 |
+
</div>
|
| 166 |
+
</div>
|
| 167 |
+
|
| 168 |
+
<!-- Result Section -->
|
| 169 |
+
<div id="result-container" class="hidden mt-12 w-full max-w-[580px] animate-fade-in-up">
|
| 170 |
+
<div class="relative bg-white/[0.03] border border-white/10 rounded-2xl p-6 backdrop-blur-md">
|
| 171 |
+
<div class="flex items-center justify-between mb-2">
|
| 172 |
+
<span class="text-brand-gray text-sm font-medium uppercase tracking-wider">Detection Result</span>
|
| 173 |
+
<span id="model-label" class="text-xs text-brand-gray/50">--</span>
|
| 174 |
+
</div>
|
| 175 |
+
|
| 176 |
+
<div class="flex items-center gap-4">
|
| 177 |
+
<div id="result-badge-container" class="flex-1 flex flex-row items-center justify-between gap-2">
|
| 178 |
+
<h2 id="result-badge" class="text-3xl font-bold text-white tracking-tight">--</h2>
|
| 179 |
+
<div id="qualification-tag"
|
| 180 |
+
class="hidden px-3 py-1 rounded-full text-[10px] font-bold uppercase tracking-wider border border-white/10 bg-white/5 text-brand-gray/80">
|
| 181 |
+
--
|
| 182 |
+
</div>
|
| 183 |
+
</div>
|
| 184 |
+
</div>
|
| 185 |
+
|
| 186 |
+
<!-- Confidence Meter -->
|
| 187 |
+
<div id="confidence-wrapper" class="mt-6 mb-2">
|
| 188 |
+
<div class="flex justify-between items-center mb-2">
|
| 189 |
+
<span class="text-xs font-medium text-brand-gray uppercase tracking-wider">Confidence
|
| 190 |
+
Level</span>
|
| 191 |
+
<span id="confidence-value" class="text-sm font-bold text-white">0%</span>
|
| 192 |
+
</div>
|
| 193 |
+
<div class="w-full h-2 bg-white/10 rounded-full overflow-hidden">
|
| 194 |
+
<div id="confidence-bar"
|
| 195 |
+
class="h-full w-0 rounded-full transition-all duration-1000 ease-out bg-brand-green"></div>
|
| 196 |
+
</div>
|
| 197 |
+
</div>
|
| 198 |
+
|
| 199 |
+
</div>
|
| 200 |
+
|
| 201 |
+
<!-- Analysis Insights -->
|
| 202 |
+
<div id="explanations-section" class="hidden mt-6 pt-6 border-t border-white/10">
|
| 203 |
+
<h3 class="text-sm font-semibold text-brand-gray mb-4 uppercase tracking-wider flex items-center gap-2">
|
| 204 |
+
<i class="fa-solid fa-microscope text-brand-green"></i> Analysis Insights
|
| 205 |
+
</h3>
|
| 206 |
+
<ul id="reasons-list" class="space-y-3">
|
| 207 |
+
<!-- Points injected via JS -->
|
| 208 |
+
</ul>
|
| 209 |
+
</div>
|
| 210 |
+
|
| 211 |
+
<!-- Advanced Analysis -->
|
| 212 |
+
<div id="advanced-toggle-container" class="mt-8 text-center hidden mb-6 relative z-10">
|
| 213 |
+
<button id="advanced-btn"
|
| 214 |
+
class="text-brand-gray/60 hover:text-white text-xs font-medium tracking-wide transition-colors duration-300 border border-white/10 px-4 py-2 rounded-lg hover:bg-white/5 backdrop-blur-sm">
|
| 215 |
+
<i class="fa-solid fa-chart-simple mr-2"></i>View Advanced Frequency Analysis
|
| 216 |
+
</button>
|
| 217 |
+
</div>
|
| 218 |
+
|
| 219 |
+
<div id="advanced-analysis-section"
|
| 220 |
+
class="hidden animate-fade-in-up mb-8 relative z-10 w-full max-w-[580px]">
|
| 221 |
+
|
| 222 |
+
<!-- Chrome-style Tabs -->
|
| 223 |
+
<div class="flex items-end px-2 space-x-1 overflow-x-auto">
|
| 224 |
+
<!-- Frequency Tab (Active) -->
|
| 225 |
+
<button id="tab-freq"
|
| 226 |
+
class="relative px-4 py-2 text-xs font-medium text-white bg-white/10 rounded-t-lg transition-colors border-t border-x border-white/10 hover:bg-white/20 z-10">
|
| 227 |
+
<div class="flex items-center gap-2">
|
| 228 |
+
<i class="fa-solid fa-wave-square text-purple-400"></i>
|
| 229 |
+
Frequency Map
|
| 230 |
+
</div>
|
| 231 |
+
</button>
|
| 232 |
+
<!-- Pattern Tab (Inactive) -->
|
| 233 |
+
<button id="tab-pattern"
|
| 234 |
+
class="relative px-4 py-2 text-xs font-medium text-brand-gray bg-black/40 rounded-t-lg transition-colors border-t border-x border-white/5 hover:bg-white/5 hover:text-white">
|
| 235 |
+
<div class="flex items-center gap-2">
|
| 236 |
+
<i class="fa-solid fa-border-all text-blue-400"></i>
|
| 237 |
+
Noise Pattern
|
| 238 |
+
</div>
|
| 239 |
+
</button>
|
| 240 |
+
<!-- Spacer line -->
|
| 241 |
+
<div class="flex-1 border-b border-white/10 h-full"></div>
|
| 242 |
+
</div>
|
| 243 |
+
|
| 244 |
+
<!-- Content Container -->
|
| 245 |
+
<div
|
| 246 |
+
class="bg-black/40 rounded-b-xl rounded-tr-xl border border-white/10 p-5 backdrop-blur-md relative -top-[1px] z-0">
|
| 247 |
+
|
| 248 |
+
<!-- TAB 1: Frequency map -->
|
| 249 |
+
<div id="panel-freq" class="block">
|
| 250 |
+
<div class="flex items-center justify-between mb-4">
|
| 251 |
+
<h3 class="text-white text-xs font-semibold">Frequency Spectrum (FFT)</h3>
|
| 252 |
+
<span
|
| 253 |
+
class="text-[10px] text-brand-gray bg-white/5 px-2 py-1 rounded border border-white/5">Mathematical
|
| 254 |
+
Structure</span>
|
| 255 |
+
</div>
|
| 256 |
+
|
| 257 |
+
<div class="grid grid-cols-1 gap-6">
|
| 258 |
+
<div
|
| 259 |
+
class="relative group flex justify-center bg-black/50 rounded-lg p-4 border border-white/5">
|
| 260 |
+
<img id="frequency-map-img" src="" alt="Frequency Spectrum"
|
| 261 |
+
class="w-64 h-64 object-contain rounded-lg shadow-2xl">
|
| 262 |
+
</div>
|
| 263 |
+
<!-- FFT Legend -->
|
| 264 |
+
<div class="grid grid-cols-2 gap-3">
|
| 265 |
+
<div
|
| 266 |
+
class="p-3 rounded-lg bg-white/5 border border-white/5 flex flex-col items-center text-center">
|
| 267 |
+
<div class="w-2 h-2 bg-white rounded-full shadow-[0_0_5px_white] mb-2"></div>
|
| 268 |
+
<p class="text-[10px] font-bold text-white mb-1">Authentic</p>
|
| 269 |
+
<p class="text-[10px] text-brand-gray/60 leading-tight">Single center starburst.</p>
|
| 270 |
+
</div>
|
| 271 |
+
<div
|
| 272 |
+
class="p-3 rounded-lg bg-white/5 border border-white/5 flex flex-col items-center text-center">
|
| 273 |
+
<div class="grid grid-cols-3 gap-0.5 mb-2 opacity-80 scale-75">
|
| 274 |
+
<div class="w-0.5 h-0.5 bg-red-400"></div>
|
| 275 |
+
<div class="w-0.5 h-0.5"></div>
|
| 276 |
+
<div class="w-0.5 h-0.5 bg-red-400"></div>
|
| 277 |
+
<div class="w-0.5 h-0.5"></div>
|
| 278 |
+
<div class="w-0.5 h-0.5"></div>
|
| 279 |
+
<div class="w-0.5 h-0.5"></div>
|
| 280 |
+
<div class="w-0.5 h-0.5 bg-red-400"></div>
|
| 281 |
+
<div class="w-0.5 h-0.5"></div>
|
| 282 |
+
<div class="w-0.5 h-0.5 bg-red-400"></div>
|
| 283 |
+
</div>
|
| 284 |
+
<p class="text-[10px] font-bold text-red-300 mb-1">Artificial</p>
|
| 285 |
+
<p class="text-[10px] text-brand-gray/60 leading-tight">Grid pattern / bright dots.
|
| 286 |
+
</p>
|
| 287 |
+
</div>
|
| 288 |
+
</div>
|
| 289 |
+
</div>
|
| 290 |
+
</div>
|
| 291 |
+
|
| 292 |
+
<!-- TAB 2: Noise Pattern -->
|
| 293 |
+
<div id="panel-pattern" class="hidden">
|
| 294 |
+
<div class="flex items-center justify-between mb-4">
|
| 295 |
+
<h3 class="text-white text-xs font-semibold">High-Frequency Noise Filter</h3>
|
| 296 |
+
<span
|
| 297 |
+
class="text-[10px] text-brand-gray bg-white/5 px-2 py-1 rounded border border-white/5">Surface
|
| 298 |
+
Artifacts</span>
|
| 299 |
+
</div>
|
| 300 |
+
|
| 301 |
+
<div class="grid grid-cols-1 gap-6">
|
| 302 |
+
<div
|
| 303 |
+
class="relative group flex justify-center bg-black/50 rounded-lg p-4 border border-white/5">
|
| 304 |
+
<img id="pattern-map-img" src="" alt="Noise Pattern"
|
| 305 |
+
class="w-64 h-64 object-contain rounded-lg shadow-2xl filter contrast-125 brightness-110">
|
| 306 |
+
</div>
|
| 307 |
+
<!-- Pattern Legend -->
|
| 308 |
+
<div class="p-3 rounded-lg bg-white/5 border border-white/5">
|
| 309 |
+
<h4 class="text-[10px] text-brand-gray uppercase font-bold mb-2">What to look for</h4>
|
| 310 |
+
<ul class="space-y-2">
|
| 311 |
+
<li class="flex items-start gap-2 text-[10px] text-gray-300">
|
| 312 |
+
<i class="fa-solid fa-check text-brand-green mt-0.5"></i>
|
| 313 |
+
<span><b>Real:</b> Random "Tv Static" or organic grain. No straight
|
| 314 |
+
lines.</span>
|
| 315 |
+
</li>
|
| 316 |
+
<li class="flex items-start gap-2 text-[10px] text-gray-300">
|
| 317 |
+
<i class="fa-solid fa-xmark text-red-500 mt-0.5"></i>
|
| 318 |
+
<span><b>AI:</b> Visible "Checkerboard" grid lines or squares tiles.</span>
|
| 319 |
+
</li>
|
| 320 |
+
</ul>
|
| 321 |
+
</div>
|
| 322 |
+
</div>
|
| 323 |
+
</div>
|
| 324 |
+
|
| 325 |
+
</div>
|
| 326 |
+
</div>
|
| 327 |
+
|
| 328 |
+
<!-- Decorative line -->
|
| 329 |
+
<div
|
| 330 |
+
class="absolute bottom-0 left-0 w-full h-[2px] bg-gradient-to-r from-transparent via-brand-green/50 to-transparent">
|
| 331 |
+
</div>
|
| 332 |
+
</div>
|
| 333 |
+
</div>
|
| 334 |
+
|
| 335 |
+
</main>
|
| 336 |
+
|
| 337 |
+
<script src="{{ url_for('static', filename='bg-animation.js') }}"></script>
|
| 338 |
+
<script src="{{ url_for('static', filename='script.js') }}?v=5"></script>
|
| 339 |
+
<style>
|
| 340 |
+
.animate-fade-in-up {
|
| 341 |
+
animation: fadeInUp 0.8s cubic-bezier(0.16, 1, 0.3, 1) forwards;
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
@keyframes fadeInUp {
|
| 345 |
+
from {
|
| 346 |
+
opacity: 0;
|
| 347 |
+
transform: translateY(20px);
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
to {
|
| 351 |
+
opacity: 1;
|
| 352 |
+
transform: translateY(0);
|
| 353 |
+
}
|
| 354 |
+
}
|
| 355 |
+
</style>
|
| 356 |
+
</body>
|
| 357 |
+
|
| 358 |
+
</html>
|
test_app_load.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
print("Importing app...")
|
| 3 |
+
import app
|
| 4 |
+
print("Import success!")
|
| 5 |
+
except Exception as e:
|
| 6 |
+
print(f"Import failed: {e}")
|
| 7 |
+
except SystemExit as se:
|
| 8 |
+
print(f"SystemExit: {se}")
|
test_freq.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
from frequency_analysis import FrequencyAnalyzer
|
| 3 |
+
print("Import Successful")
|
| 4 |
+
|
| 5 |
+
fa = FrequencyAnalyzer()
|
| 6 |
+
print("Initialization Successful")
|
| 7 |
+
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
# Create dummy image
|
| 12 |
+
img = Image.new('RGB', (100, 100), color = 'red')
|
| 13 |
+
res = fa.generate_spectrum(img)
|
| 14 |
+
print(f"Generation Successful. Len: {len(res)}")
|
| 15 |
+
|
| 16 |
+
except Exception as e:
|
| 17 |
+
print(f"Error: {e}")
|
test_image.jpg
ADDED
|
Git LFS Details
|
train.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from datasets import load_dataset, ClassLabel, Image
|
| 4 |
+
from transformers import (
|
| 5 |
+
ViTImageProcessor,
|
| 6 |
+
ViTForImageClassification,
|
| 7 |
+
TrainingArguments,
|
| 8 |
+
Trainer,
|
| 9 |
+
DefaultDataCollator,
|
| 10 |
+
)
|
| 11 |
+
import evaluate
|
| 12 |
+
from torchvision.transforms import (
|
| 13 |
+
CenterCrop,
|
| 14 |
+
Compose,
|
| 15 |
+
Normalize,
|
| 16 |
+
RandomRotation,
|
| 17 |
+
RandomResizedCrop,
|
| 18 |
+
RandomHorizontalFlip,
|
| 19 |
+
RandomAdjustSharpness,
|
| 20 |
+
Resize,
|
| 21 |
+
ToTensor,
|
| 22 |
+
)
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
# --- Configuration ---
|
| 26 |
+
MODEL_NAME = "google/vit-base-patch16-224"
|
| 27 |
+
DATASET_DIR = "./dataset"
|
| 28 |
+
OUTPUT_DIR = "./model"
|
| 29 |
+
BATCH_SIZE = 16
|
| 30 |
+
NUM_EPOCHS = 3
|
| 31 |
+
LEARNING_RATE = 2e-5
|
| 32 |
+
|
| 33 |
+
def main():
|
| 34 |
+
# 1. Load Dataset
|
| 35 |
+
print("Loading dataset...")
|
| 36 |
+
# Expects dataset structure: dataset/train/LABEL and dataset/test/LABEL
|
| 37 |
+
data_files = {}
|
| 38 |
+
if os.path.exists(os.path.join(DATASET_DIR, "train")):
|
| 39 |
+
data_files["train"] = os.path.join(DATASET_DIR, "train")
|
| 40 |
+
if os.path.exists(os.path.join(DATASET_DIR, "test")):
|
| 41 |
+
data_files["test"] = os.path.join(DATASET_DIR, "test")
|
| 42 |
+
|
| 43 |
+
if not data_files:
|
| 44 |
+
print(f"Error: No data found in {DATASET_DIR}. Please organize data in 'train' and 'test' folders.")
|
| 45 |
+
print("Expected structure: ./dataset/train/REAL, ./dataset/train/FAKE, etc.")
|
| 46 |
+
return
|
| 47 |
+
|
| 48 |
+
# Use evaluate load logic or simplified imagefolder loading
|
| 49 |
+
# Ideally use Hugging Face datasets ImageFolder builder which is automatic if we point to directory
|
| 50 |
+
dataset = load_dataset("imagefolder", data_dir=DATASET_DIR)
|
| 51 |
+
|
| 52 |
+
# 2. Labels
|
| 53 |
+
labels = dataset["train"].features["label"].names
|
| 54 |
+
id2label = {str(i): c for i, c in enumerate(labels)}
|
| 55 |
+
label2id = {c: str(i) for i, c in enumerate(labels)}
|
| 56 |
+
print(f"Labels found: {labels}")
|
| 57 |
+
|
| 58 |
+
# 3. Preprocessing
|
| 59 |
+
processor = ViTImageProcessor.from_pretrained(MODEL_NAME)
|
| 60 |
+
image_mean = processor.image_mean
|
| 61 |
+
image_std = processor.image_std
|
| 62 |
+
size = processor.size["height"]
|
| 63 |
+
|
| 64 |
+
normalize = Normalize(mean=image_mean, std=image_std)
|
| 65 |
+
|
| 66 |
+
_train_transforms = Compose([
|
| 67 |
+
RandomResizedCrop(size),
|
| 68 |
+
RandomHorizontalFlip(),
|
| 69 |
+
RandomAdjustSharpness(2),
|
| 70 |
+
ToTensor(),
|
| 71 |
+
normalize,
|
| 72 |
+
])
|
| 73 |
+
|
| 74 |
+
_val_transforms = Compose([
|
| 75 |
+
Resize(size),
|
| 76 |
+
CenterCrop(size),
|
| 77 |
+
ToTensor(),
|
| 78 |
+
normalize,
|
| 79 |
+
])
|
| 80 |
+
|
| 81 |
+
def train_transforms(examples):
|
| 82 |
+
examples["pixel_values"] = [_train_transforms(image.convert("RGB")) for image in examples["image"]]
|
| 83 |
+
return examples
|
| 84 |
+
|
| 85 |
+
def val_transforms(examples):
|
| 86 |
+
examples["pixel_values"] = [_val_transforms(image.convert("RGB")) for image in examples["image"]]
|
| 87 |
+
return examples
|
| 88 |
+
|
| 89 |
+
# Apply transforms
|
| 90 |
+
print("Applying transforms...")
|
| 91 |
+
dataset["train"].set_transform(train_transforms)
|
| 92 |
+
if "test" in dataset:
|
| 93 |
+
dataset["test"].set_transform(val_transforms)
|
| 94 |
+
|
| 95 |
+
# 4. Model
|
| 96 |
+
print(f"Loading model {MODEL_NAME}...")
|
| 97 |
+
model = ViTForImageClassification.from_pretrained(
|
| 98 |
+
MODEL_NAME,
|
| 99 |
+
num_labels=len(labels),
|
| 100 |
+
id2label=id2label,
|
| 101 |
+
label2id=label2id,
|
| 102 |
+
ignore_mismatched_sizes=True
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# 5. Metrics
|
| 106 |
+
metric = evaluate.load("accuracy")
|
| 107 |
+
def compute_metrics(eval_pred):
|
| 108 |
+
predictions = np.argmax(eval_pred.predictions, axis=1)
|
| 109 |
+
return metric.compute(predictions=predictions, references=eval_pred.label_ids)
|
| 110 |
+
|
| 111 |
+
# 6. Training Arguments
|
| 112 |
+
args = TrainingArguments(
|
| 113 |
+
output_dir=OUTPUT_DIR,
|
| 114 |
+
remove_unused_columns=False,
|
| 115 |
+
evaluation_strategy="epoch",
|
| 116 |
+
save_strategy="epoch",
|
| 117 |
+
learning_rate=LEARNING_RATE,
|
| 118 |
+
per_device_train_batch_size=BATCH_SIZE,
|
| 119 |
+
per_device_eval_batch_size=BATCH_SIZE,
|
| 120 |
+
num_train_epochs=NUM_EPOCHS,
|
| 121 |
+
warmup_ratio=0.1,
|
| 122 |
+
logging_steps=10,
|
| 123 |
+
load_best_model_at_end=True,
|
| 124 |
+
metric_for_best_model="accuracy",
|
| 125 |
+
push_to_hub=False,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
collator = DefaultDataCollator()
|
| 129 |
+
|
| 130 |
+
trainer = Trainer(
|
| 131 |
+
model=model,
|
| 132 |
+
args=args,
|
| 133 |
+
train_dataset=dataset["train"],
|
| 134 |
+
eval_dataset=dataset["test"] if "test" in dataset else None,
|
| 135 |
+
tokenizer=processor,
|
| 136 |
+
data_collator=collator,
|
| 137 |
+
compute_metrics=compute_metrics,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# 7. Train
|
| 141 |
+
print("Starting training...")
|
| 142 |
+
trainer.train()
|
| 143 |
+
|
| 144 |
+
# 8. Save
|
| 145 |
+
print(f"Saving model to {OUTPUT_DIR}...")
|
| 146 |
+
trainer.save_model(OUTPUT_DIR)
|
| 147 |
+
processor.save_pretrained(OUTPUT_DIR)
|
| 148 |
+
print("Done!")
|
| 149 |
+
|
| 150 |
+
if __name__ == "__main__":
|
| 151 |
+
main()
|