Spaces:
Sleeping
Sleeping
Initial commit
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .dockerignore +28 -0
- .github/instructions/copilot-instructions.md +61 -0
- .gitignore +70 -0
- CLAUDE.md +1311 -0
- Dockerfile +18 -0
- README.md +14 -7
- configs/coherence_baseline.yaml +44 -0
- configs/fingerprint_baseline.yaml +46 -0
- configs/sstgnn_baseline.yaml +49 -0
- frontend/Dockerfile +25 -0
- frontend/components/ui/background-paths.tsx +50 -0
- frontend/components/ui/confidence-ring.tsx +67 -0
- frontend/components/ui/engine-breakdown.tsx +66 -0
- frontend/components/ui/liquid-glass-button.tsx +31 -0
- frontend/components/ui/loading-orbit.tsx +67 -0
- frontend/components/ui/media-meta-strip.tsx +23 -0
- frontend/components/ui/radial-orbital-timeline.tsx +73 -0
- frontend/components/ui/upload-zone.tsx +96 -0
- frontend/components/ui/verdict-badge.tsx +23 -0
- frontend/lib/api.ts +33 -0
- frontend/lib/constants.ts +46 -0
- frontend/lib/utils.ts +29 -0
- frontend/next-env.d.ts +6 -0
- frontend/next.config.js +17 -0
- frontend/package-lock.json +0 -0
- frontend/package.json +34 -0
- frontend/pages/_app.tsx +18 -0
- frontend/pages/_document.tsx +20 -0
- frontend/pages/analyze.tsx +249 -0
- frontend/pages/index.tsx +206 -0
- frontend/postcss.config.js +6 -0
- frontend/styles/globals.css +83 -0
- frontend/tailwind.config.ts +51 -0
- frontend/tsconfig.json +25 -0
- frontend/types/detection.ts +43 -0
- frontend/types/framer-motion.d.ts +1 -0
- package-lock.json +1268 -0
- package.json +5 -0
- pytest.ini +4 -0
- requirements.txt +42 -0
- runpod_handler.py +100 -0
- scripts/__init__.py +0 -0
- scripts/assemble_image_baseline.py +37 -0
- scripts/bootstrap_kaggle_dataset.py +72 -0
- scripts/demo_smoke_test.py +298 -0
- scripts/kaggle_train_image_baseline.py +235 -0
- scripts/prepare_cifake.py +76 -0
- scripts/prepare_cocoxgen.py +76 -0
- scripts/prepare_hf_source.py +79 -0
- scripts/prepare_ifakefakedb.py +66 -0
.dockerignore
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git
|
| 2 |
+
.gitignore
|
| 3 |
+
.venv
|
| 4 |
+
venv
|
| 5 |
+
env
|
| 6 |
+
__pycache__
|
| 7 |
+
*.pyc
|
| 8 |
+
*.pyo
|
| 9 |
+
.pytest_cache
|
| 10 |
+
.mypy_cache
|
| 11 |
+
.ruff_cache
|
| 12 |
+
.tmp
|
| 13 |
+
tmp*
|
| 14 |
+
pytest_tmp
|
| 15 |
+
node_modules
|
| 16 |
+
frontend/node_modules
|
| 17 |
+
frontend/.next
|
| 18 |
+
frontend/out
|
| 19 |
+
.vscode
|
| 20 |
+
.idea
|
| 21 |
+
data
|
| 22 |
+
models
|
| 23 |
+
training
|
| 24 |
+
tests
|
| 25 |
+
*.log
|
| 26 |
+
.env
|
| 27 |
+
.env.local
|
| 28 |
+
.env.*.local
|
.github/instructions/copilot-instructions.md
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GitHub Copilot Instructions — GenAI-DeepDetect
|
| 2 |
+
|
| 3 |
+
Read CLAUDE.md first. This file adds IDE-specific context.
|
| 4 |
+
|
| 5 |
+
## Critical rules (never violate)
|
| 6 |
+
|
| 7 |
+
- `src/types.py` is frozen. Never modify `EngineResult`, `DetectionResponse`, or `GeneratorLabel`.
|
| 8 |
+
- `NUM_GENERATOR_CLASSES = 8` everywhere. If you see 5 anywhere, fix it to 8.
|
| 9 |
+
- SSTGNN checkpoints load with `model.load_state_dict()` only. Never `torch.jit.load`.
|
| 10 |
+
- Do not use `torch.jit.script` or `torch.jit.trace` on any PyG model.
|
| 11 |
+
- Frontend: pages router only. No `app/` directory. No `src/` wrapper inside `frontend/`.
|
| 12 |
+
- All Kaggle training cells call `scripts/train_image_baseline.py`. Not `training/phase1.../train.py` directly.
|
| 13 |
+
- `min_detection_confidence=0.3` in all MediaPipe calls. Not 0.5.
|
| 14 |
+
|
| 15 |
+
## Generator label index (memorise this)
|
| 16 |
+
|
| 17 |
+
```
|
| 18 |
+
0 = real 4 = dall_e
|
| 19 |
+
1 = unknown_gan 5 = flux
|
| 20 |
+
2 = stable_diffusion 6 = firefly
|
| 21 |
+
3 = midjourney 7 = imagen
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
## Path conventions
|
| 25 |
+
|
| 26 |
+
```
|
| 27 |
+
src/ library code — no sys.exit(), logging only via logging module
|
| 28 |
+
scripts/ Kaggle entrypoints — one file per task
|
| 29 |
+
training/phase*/ per-phase training scripts
|
| 30 |
+
tests/ mirrors src/ structure
|
| 31 |
+
configs/ YAML configs per engine
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
## Import order for training scripts
|
| 35 |
+
|
| 36 |
+
```python
|
| 37 |
+
import sys
|
| 38 |
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent)) # repo root
|
| 39 |
+
from src.types import ...
|
| 40 |
+
from training.phaseN_*/model import ...
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## Checkpoint format (all engines)
|
| 44 |
+
|
| 45 |
+
```python
|
| 46 |
+
torch.save({
|
| 47 |
+
"epoch": int,
|
| 48 |
+
"model_state": model.state_dict(),
|
| 49 |
+
"optimizer": optimizer.state_dict(),
|
| 50 |
+
"val_auc": float,
|
| 51 |
+
"config": dict,
|
| 52 |
+
}, path)
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
## Never do these
|
| 56 |
+
|
| 57 |
+
- `from training.phase3_sstgnn.model import SSTGNNModel` inside `src/` — use try/except ImportError
|
| 58 |
+
- `torch.jit.script(sstgnn_model)` — crashes silently on PyG
|
| 59 |
+
- `min_detection_confidence=0.5` — fails on 40% of DFDC frames
|
| 60 |
+
- Hardcode `localhost:8000` in frontend — use `process.env.NEXT_PUBLIC_API_URL`
|
| 61 |
+
- Import `torch_geometric` at module level without a guard — not available in all envs
|
.gitignore
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GenAI-DeepDetect .gitignore
|
| 2 |
+
|
| 3 |
+
# ── Model files ───────────────────────────────────────────────────────────────
|
| 4 |
+
models/
|
| 5 |
+
*.pt
|
| 6 |
+
*.pth
|
| 7 |
+
*.bin
|
| 8 |
+
*.safetensors
|
| 9 |
+
|
| 10 |
+
# ── Datasets ──────────────────────────────────────────────────────────────────
|
| 11 |
+
data/
|
| 12 |
+
*.zip
|
| 13 |
+
*.tar
|
| 14 |
+
*.tar.gz
|
| 15 |
+
|
| 16 |
+
# ── Cache dirs (never commit these) ──────────────────────────────────────────
|
| 17 |
+
.deps-local/
|
| 18 |
+
.pydeps/
|
| 19 |
+
.hf-cache/
|
| 20 |
+
.huggingface/
|
| 21 |
+
__pycache__/
|
| 22 |
+
*.pyc
|
| 23 |
+
*.pyo
|
| 24 |
+
.pytest_cache/
|
| 25 |
+
.mypy_cache/
|
| 26 |
+
.ruff_cache/
|
| 27 |
+
|
| 28 |
+
# ── Kaggle artifacts ──────────────────────────────────────────────────────────
|
| 29 |
+
.kaggle-kernel-output/
|
| 30 |
+
.kaggle-kernel-pulled/
|
| 31 |
+
kaggle.json
|
| 32 |
+
# Keep .kaggle-kernel/ (used for packaging)
|
| 33 |
+
|
| 34 |
+
# ── Training logs ─────────────────────────────────────────────────────────────
|
| 35 |
+
training/logs/
|
| 36 |
+
*.log
|
| 37 |
+
|
| 38 |
+
# ── Environment ───────────────────────────────────────────────────────────────
|
| 39 |
+
.env
|
| 40 |
+
.env.local
|
| 41 |
+
.env.*.local
|
| 42 |
+
venv/
|
| 43 |
+
.venv/
|
| 44 |
+
env/
|
| 45 |
+
.env.example
|
| 46 |
+
|
| 47 |
+
# ── IDE ───────────────────────────────────────────────────────────────────────
|
| 48 |
+
.vscode/
|
| 49 |
+
.idea/
|
| 50 |
+
*.swp
|
| 51 |
+
*.swo
|
| 52 |
+
.DS_Store
|
| 53 |
+
Thumbs.db
|
| 54 |
+
|
| 55 |
+
# ── Frontend ──────────────────────────────────────────────────────────────────
|
| 56 |
+
node_modules/
|
| 57 |
+
frontend/node_modules/
|
| 58 |
+
frontend/.next/
|
| 59 |
+
frontend/out/
|
| 60 |
+
frontend/.env.local
|
| 61 |
+
frontend/tsconfig.tsbuildinfo
|
| 62 |
+
|
| 63 |
+
# ── Junk that got committed before ────────────────────────────────────────────
|
| 64 |
+
"New Text Document.txt"
|
| 65 |
+
*.txt.bak
|
| 66 |
+
|
| 67 |
+
# ── Build artifacts ───────────────────────────────────────────────────────────
|
| 68 |
+
dist/
|
| 69 |
+
build/
|
| 70 |
+
*.egg-info/
|
CLAUDE.md
ADDED
|
@@ -0,0 +1,1311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CLAUDE.md — GenAI-DeepDetect Agent Instructions
|
| 2 |
+
|
| 3 |
+
> Read this file before touching any code. It is the single source of truth for
|
| 4 |
+
> how this repo is structured, what conventions to follow, and what the hard
|
| 5 |
+
> constraints are.
|
| 6 |
+
|
| 7 |
+
# CLAUDE.md — GenAI-DeepDetect
|
| 8 |
+
|
| 9 |
+
Full implementation guide for AI-assisted development on this project. Read this
|
| 10 |
+
file before touching any code.
|
| 11 |
+
|
| 12 |
+
-# CLAUDE.md — GenAI-DeepDetect
|
| 13 |
+
|
| 14 |
+
Complete implementation guide. Read this before writing any code. All models are
|
| 15 |
+
**100% pre-trained** — no training required, no GPU needed locally.
|
| 16 |
+
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
## MCP Tools — Always Use These First
|
| 20 |
+
|
| 21 |
+
Before writing any code or looking up any API, resolve docs through MCP:
|
| 22 |
+
|
| 23 |
+
```
|
| 24 |
+
context7: resolve-library-id + query-docs
|
| 25 |
+
→ use for: transformers, torch, mediapipe, fastapi, torch-geometric,
|
| 26 |
+
google-generativeai, facenet-pytorch, opencv, next.js, runpod
|
| 27 |
+
|
| 28 |
+
huggingface: model_search + model_details + hf_doc_search
|
| 29 |
+
→ use for: finding model cards, checking input formats, confirming
|
| 30 |
+
pipeline task names, verifying checkpoint sizes before using
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
**Rule**: Never guess an API signature. Always call `context7.query-docs` first.
|
| 34 |
+
Never use a HF model without calling `huggingface.model_details` to confirm it
|
| 35 |
+
exists, check its license, and verify its input format.
|
| 36 |
+
|
| 37 |
+
---
|
| 38 |
+
|
| 39 |
+
## Project Skill And Memory Policy
|
| 40 |
+
|
| 41 |
+
For work in this repository, always prefer the installed Claude Code skill pack
|
| 42 |
+
when a relevant skill applies instead of ad hoc workflows.
|
| 43 |
+
|
| 44 |
+
- Use `context7-mcp` for any library, framework, SDK, or API question, and
|
| 45 |
+
before changing code that depends on external packages or hosted services.
|
| 46 |
+
- Use `mem-search` / claude-mem whenever the user asks about previous sessions,
|
| 47 |
+
prior fixes, earlier decisions, or "how we solved this before".
|
| 48 |
+
- When using claude-mem, scope searches to project name `genai-deepdetect`
|
| 49 |
+
unless the user explicitly asks for a broader search.
|
| 50 |
+
- Keep following the repo-specific MCP rules below even when a general-purpose
|
| 51 |
+
skill also applies.
|
| 52 |
+
|
| 53 |
+
Recommended companion skills for this project:
|
| 54 |
+
|
| 55 |
+
- `systematic-debugging` for bugs, failing tests, or unexpected runtime
|
| 56 |
+
behavior
|
| 57 |
+
- `verification-before-completion` before claiming a fix is done
|
| 58 |
+
- `security-review` for secrets, external APIs, uploads, and auth-sensitive
|
| 59 |
+
changes
|
| 60 |
+
|
| 61 |
+
---
|
| 62 |
+
|
| 63 |
+
## Project Goal
|
| 64 |
+
|
| 65 |
+
Multimodal deepfake and AI-generated content detector.
|
| 66 |
+
|
| 67 |
+
- Input: image (JPEG/PNG/WEBP) or video (MP4/MOV/AVI, max 100MB)
|
| 68 |
+
- Output: `DetectionResponse` — verdict, confidence, generator attribution,
|
| 69 |
+
natural-language explanation, per-engine breakdown
|
| 70 |
+
|
| 71 |
+
All inference runs on pre-trained HuggingFace checkpoints. No training scripts
|
| 72 |
+
need to run for the system to work.
|
| 73 |
+
|
| 74 |
+
---
|
| 75 |
+
|
| 76 |
+
## Architecture
|
| 77 |
+
|
| 78 |
+
```
|
| 79 |
+
Request (image/video)
|
| 80 |
+
│
|
| 81 |
+
▼
|
| 82 |
+
FastAPI src/api/main.py
|
| 83 |
+
│
|
| 84 |
+
├── FingerprintEngine (image artifacts, generator attribution)
|
| 85 |
+
├── CoherenceEngine (lip-sync, biological coherence)
|
| 86 |
+
└── SSTGNNEngine (landmark spatio-temporal graph)
|
| 87 |
+
│
|
| 88 |
+
▼
|
| 89 |
+
Fuser src/fusion/fuser.py
|
| 90 |
+
│
|
| 91 |
+
▼
|
| 92 |
+
Explainer src/explainability/explainer.py ← Gemini API
|
| 93 |
+
│
|
| 94 |
+
▼
|
| 95 |
+
DetectionResponse src/types.py
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
---
|
| 99 |
+
|
| 100 |
+
## All Pre-Trained Models
|
| 101 |
+
|
| 102 |
+
Every model downloads via `transformers.pipeline()` or `from_pretrained()`. Zero
|
| 103 |
+
training. Zero fine-tuning.
|
| 104 |
+
|
| 105 |
+
| Engine | Model | HF ID | Size | Task |
|
| 106 |
+
| ----------- | ------------------- | ------------------------------------------ | ------ | ---------------------- |
|
| 107 |
+
| Fingerprint | SDXL Detector | `Organika/sdxl-detector` | ~330MB | binary fake/real |
|
| 108 |
+
| Fingerprint | CLIP ViT-L/14 | `openai/clip-vit-large-patch14` | ~3.5GB | generator attribution |
|
| 109 |
+
| Fingerprint | AI Image Detector | `haywoodsloan/ai-image-detector-deploy` | ~90MB | ensemble backup |
|
| 110 |
+
| SSTGNN | DeepFake Detector | `dima806/deepfake_vs_real_image_detection` | ~100MB | ResNet50 per-frame |
|
| 111 |
+
| SSTGNN | Deep Fake Detector | `prithivMLmods/Deep-Fake-Detector-Model` | ~80MB | EfficientNet-B4 backup |
|
| 112 |
+
| Coherence | MediaPipe Face Mesh | bundled in `mediapipe` package | ~10MB | landmark extraction |
|
| 113 |
+
| Coherence | FaceNet VGGFace2 | `facenet-pytorch` (auto-downloads) | ~100MB | temporal embeddings |
|
| 114 |
+
| Coherence | SyncNet | `Junhua-Zhu/SyncNet` | ~50MB | lip-sync offset |
|
| 115 |
+
|
| 116 |
+
CLIP is the largest at 3.5GB — preload at startup, never reload. Everything else
|
| 117 |
+
fits in HF Spaces 16GB RAM free tier.
|
| 118 |
+
|
| 119 |
+
---
|
| 120 |
+
|
| 121 |
+
## Environment Variables
|
| 122 |
+
|
| 123 |
+
```bash
|
| 124 |
+
# Required
|
| 125 |
+
GEMINI_API_KEY=... # Google AI Studio — free tier works
|
| 126 |
+
HF_TOKEN=hf_... # HuggingFace read token (free)
|
| 127 |
+
|
| 128 |
+
# Hosting
|
| 129 |
+
RUNPOD_API_KEY=... # RunPod serverless (heavy video)
|
| 130 |
+
RUNPOD_ENDPOINT_ID=... # your deployed endpoint ID
|
| 131 |
+
|
| 132 |
+
# Paths
|
| 133 |
+
MODEL_CACHE_DIR=/data/models # HF Spaces: /data/models (persists)
|
| 134 |
+
# local dev: /tmp/models
|
| 135 |
+
|
| 136 |
+
# Optional
|
| 137 |
+
MAX_VIDEO_FRAMES=300
|
| 138 |
+
MAX_VIDEO_SIZE_MB=100
|
| 139 |
+
INFERENCE_BACKEND=local # "local" | "runpod"
|
| 140 |
+
TOKENIZERS_PARALLELISM=false
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
Set all secrets in:
|
| 144 |
+
|
| 145 |
+
- HF Spaces → Settings → Repository secrets
|
| 146 |
+
- RunPod → Secrets tab
|
| 147 |
+
- Vercel → Environment Variables
|
| 148 |
+
|
| 149 |
+
---
|
| 150 |
+
|
| 151 |
+
## Gemini API — Explainability Engine
|
| 152 |
+
|
| 153 |
+
**Primary model**: `gemini-2.5-pro-preview-03-25` **Fallback model**:
|
| 154 |
+
`gemini-1.5-pro-002`
|
| 155 |
+
|
| 156 |
+
Both available on Google AI Studio free tier (15 req/min, 1M tokens/day). Always
|
| 157 |
+
query `context7.query-docs google-generativeai GenerativeModel` before modifying
|
| 158 |
+
this file.
|
| 159 |
+
|
| 160 |
+
### `src/explainability/explainer.py`
|
| 161 |
+
|
| 162 |
+
```python
|
| 163 |
+
import os
|
| 164 |
+
import logging
|
| 165 |
+
import google.generativeai as genai
|
| 166 |
+
from src.types import EngineResult
|
| 167 |
+
|
| 168 |
+
logger = logging.getLogger(__name__)
|
| 169 |
+
|
| 170 |
+
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
| 171 |
+
|
| 172 |
+
SYSTEM_INSTRUCTION = (
|
| 173 |
+
"You are a deepfake forensics analyst writing reports for security professionals. "
|
| 174 |
+
"Given detection engine outputs, write exactly 2-3 sentences in plain English "
|
| 175 |
+
"explaining why the content is real or fake. "
|
| 176 |
+
"Be specific — name the strongest signals. "
|
| 177 |
+
"Use direct declarative sentences. No hedging. No 'I think'. "
|
| 178 |
+
"Output only the explanation text, nothing else."
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
_model = None
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def _get_model() -> genai.GenerativeModel:
|
| 185 |
+
global _model
|
| 186 |
+
if _model is None:
|
| 187 |
+
for name in ("gemini-2.5-pro-preview-03-25", "gemini-1.5-pro-002"):
|
| 188 |
+
try:
|
| 189 |
+
_model = genai.GenerativeModel(
|
| 190 |
+
model_name=name,
|
| 191 |
+
system_instruction=SYSTEM_INSTRUCTION,
|
| 192 |
+
)
|
| 193 |
+
logger.info(f"Gemini model loaded: {name}")
|
| 194 |
+
break
|
| 195 |
+
except Exception as e:
|
| 196 |
+
logger.warning(f"Gemini {name} unavailable: {e}")
|
| 197 |
+
return _model
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def explain(
|
| 201 |
+
verdict: str,
|
| 202 |
+
confidence: float,
|
| 203 |
+
engine_results: list[EngineResult],
|
| 204 |
+
generator: str,
|
| 205 |
+
) -> str:
|
| 206 |
+
breakdown = "\n".join(
|
| 207 |
+
f"- {r.engine}: {r.verdict} ({r.confidence:.0%}) — {r.explanation}"
|
| 208 |
+
for r in engine_results
|
| 209 |
+
)
|
| 210 |
+
prompt = (
|
| 211 |
+
f"Verdict: {verdict} ({confidence:.0%} confidence)\n"
|
| 212 |
+
f"Attributed generator: {generator}\n"
|
| 213 |
+
f"Engine breakdown:\n{breakdown}\n\n"
|
| 214 |
+
"Write the forensics explanation."
|
| 215 |
+
)
|
| 216 |
+
try:
|
| 217 |
+
model = _get_model()
|
| 218 |
+
if model is None:
|
| 219 |
+
raise RuntimeError("No Gemini model available")
|
| 220 |
+
response = model.generate_content(prompt)
|
| 221 |
+
return response.text.strip()
|
| 222 |
+
except Exception as e:
|
| 223 |
+
logger.error(f"Gemini explain failed: {e}")
|
| 224 |
+
top = engine_results[0] if engine_results else None
|
| 225 |
+
return (
|
| 226 |
+
f"Content classified as {verdict} with {confidence:.0%} confidence. "
|
| 227 |
+
f"{'Primary signal from ' + top.engine + ' engine.' if top else ''}"
|
| 228 |
+
)
|
| 229 |
+
```
|
| 230 |
+
|
| 231 |
+
---
|
| 232 |
+
|
| 233 |
+
## Engine Implementations
|
| 234 |
+
|
| 235 |
+
### FingerprintEngine — `src/engines/fingerprint/engine.py`
|
| 236 |
+
|
| 237 |
+
Query context7 for `transformers pipeline image-classification` and
|
| 238 |
+
`huggingface model_details Organika/sdxl-detector` before modifying.
|
| 239 |
+
|
| 240 |
+
```python
|
| 241 |
+
import os, logging, threading
|
| 242 |
+
import numpy as np
|
| 243 |
+
from PIL import Image
|
| 244 |
+
from transformers import pipeline, CLIPModel, CLIPProcessor
|
| 245 |
+
import torch
|
| 246 |
+
from src.types import EngineResult
|
| 247 |
+
|
| 248 |
+
logger = logging.getLogger(__name__)
|
| 249 |
+
CACHE = os.environ.get("MODEL_CACHE_DIR", "/tmp/models")
|
| 250 |
+
|
| 251 |
+
GENERATOR_PROMPTS = {
|
| 252 |
+
"real": "a real photograph taken by a camera with natural lighting",
|
| 253 |
+
"unknown_gan": "a GAN-generated image with checkerboard artifacts and blurry edges",
|
| 254 |
+
"stable_diffusion": "a Stable Diffusion image with painterly soft textures",
|
| 255 |
+
"midjourney": "a Midjourney image with cinematic dramatic lighting and hyperdetail",
|
| 256 |
+
"dall_e": "a DALL-E image with clean illustration-style and smooth gradients",
|
| 257 |
+
"flux": "a FLUX model image with photorealistic precision and sharp detail",
|
| 258 |
+
"firefly": "an Adobe Firefly image with commercial stock-photo aesthetics",
|
| 259 |
+
"imagen": "a Google Imagen image with precise photorealistic rendering",
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
_lock = threading.Lock()
|
| 263 |
+
_detector = _clip_model = _clip_processor = _backup = None
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _load():
|
| 267 |
+
global _detector, _clip_model, _clip_processor, _backup
|
| 268 |
+
if _detector is not None:
|
| 269 |
+
return
|
| 270 |
+
logger.info("Loading fingerprint models...")
|
| 271 |
+
_detector = pipeline("image-classification",
|
| 272 |
+
model="Organika/sdxl-detector", cache_dir=CACHE)
|
| 273 |
+
_clip_model = CLIPModel.from_pretrained(
|
| 274 |
+
"openai/clip-vit-large-patch14", cache_dir=CACHE)
|
| 275 |
+
_clip_processor = CLIPProcessor.from_pretrained(
|
| 276 |
+
"openai/clip-vit-large-patch14", cache_dir=CACHE)
|
| 277 |
+
_clip_model.eval()
|
| 278 |
+
try:
|
| 279 |
+
_backup = pipeline("image-classification",
|
| 280 |
+
model="haywoodsloan/ai-image-detector-deploy",
|
| 281 |
+
cache_dir=CACHE)
|
| 282 |
+
except Exception:
|
| 283 |
+
logger.warning("Backup fingerprint detector unavailable")
|
| 284 |
+
logger.info("Fingerprint models ready")
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class FingerprintEngine:
|
| 288 |
+
|
| 289 |
+
def _ensure(self):
|
| 290 |
+
with _lock:
|
| 291 |
+
_load()
|
| 292 |
+
|
| 293 |
+
def run(self, image: Image.Image) -> EngineResult:
|
| 294 |
+
self._ensure()
|
| 295 |
+
if image.mode != "RGB":
|
| 296 |
+
image = image.convert("RGB")
|
| 297 |
+
|
| 298 |
+
# Binary fake score
|
| 299 |
+
FAKE_LABELS = {"artificial", "fake", "ai-generated", "generated"}
|
| 300 |
+
try:
|
| 301 |
+
preds = _detector(image)
|
| 302 |
+
fake_score = max(
|
| 303 |
+
(p["score"] for p in preds if p["label"].lower() in FAKE_LABELS),
|
| 304 |
+
default=0.5,
|
| 305 |
+
)
|
| 306 |
+
except Exception as e:
|
| 307 |
+
logger.warning(f"Primary detector error: {e}")
|
| 308 |
+
fake_score = 0.5
|
| 309 |
+
|
| 310 |
+
# Ensemble backup
|
| 311 |
+
if _backup is not None:
|
| 312 |
+
try:
|
| 313 |
+
bp = _backup(image)
|
| 314 |
+
bk = max((p["score"] for p in bp
|
| 315 |
+
if p["label"].lower() in FAKE_LABELS), default=0.5)
|
| 316 |
+
fake_score = fake_score * 0.6 + bk * 0.4
|
| 317 |
+
except Exception:
|
| 318 |
+
pass
|
| 319 |
+
|
| 320 |
+
# CLIP zero-shot generator attribution
|
| 321 |
+
generator = "real"
|
| 322 |
+
try:
|
| 323 |
+
texts = list(GENERATOR_PROMPTS.values())
|
| 324 |
+
inputs = _clip_processor(
|
| 325 |
+
text=texts, images=image,
|
| 326 |
+
return_tensors="pt", padding=True, truncation=True,
|
| 327 |
+
)
|
| 328 |
+
with torch.no_grad():
|
| 329 |
+
logits = _clip_model(**inputs).logits_per_image[0]
|
| 330 |
+
probs = logits.softmax(dim=0).numpy()
|
| 331 |
+
generator = list(GENERATOR_PROMPTS.keys())[int(np.argmax(probs))]
|
| 332 |
+
except Exception as e:
|
| 333 |
+
logger.warning(f"CLIP attribution error: {e}")
|
| 334 |
+
|
| 335 |
+
if fake_score > 0.65 and generator == "real":
|
| 336 |
+
generator = "unknown_gan"
|
| 337 |
+
|
| 338 |
+
return EngineResult(
|
| 339 |
+
engine="fingerprint",
|
| 340 |
+
verdict="FAKE" if fake_score > 0.5 else "REAL",
|
| 341 |
+
confidence=float(fake_score),
|
| 342 |
+
attributed_generator=generator,
|
| 343 |
+
explanation=f"Binary score {fake_score:.2f}; attributed to {generator}.",
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
def run_video(self, frames: list) -> EngineResult:
|
| 347 |
+
if not frames:
|
| 348 |
+
return EngineResult(engine="fingerprint", verdict="UNKNOWN",
|
| 349 |
+
confidence=0.5, explanation="No frames.")
|
| 350 |
+
keyframes = frames[::8] or [frames[0]]
|
| 351 |
+
results = [self.run(Image.fromarray(f)) for f in keyframes]
|
| 352 |
+
avg = float(np.mean([r.confidence for r in results]))
|
| 353 |
+
gens = [r.attributed_generator for r in results]
|
| 354 |
+
top_gen = max(set(gens), key=gens.count)
|
| 355 |
+
return EngineResult(
|
| 356 |
+
engine="fingerprint",
|
| 357 |
+
verdict="FAKE" if avg > 0.5 else "REAL",
|
| 358 |
+
confidence=avg,
|
| 359 |
+
attributed_generator=top_gen,
|
| 360 |
+
explanation=f"Keyframe average {avg:.2f} over {len(keyframes)} frames.",
|
| 361 |
+
)
|
| 362 |
+
```
|
| 363 |
+
|
| 364 |
+
---
|
| 365 |
+
|
| 366 |
+
### CoherenceEngine — `src/engines/coherence/engine.py`
|
| 367 |
+
|
| 368 |
+
Query `context7.query-docs mediapipe face_mesh` and
|
| 369 |
+
`context7.query-docs facenet-pytorch InceptionResnetV1` before modifying.
|
| 370 |
+
|
| 371 |
+
```python
|
| 372 |
+
import logging, threading, cv2
|
| 373 |
+
import numpy as np
|
| 374 |
+
from PIL import Image
|
| 375 |
+
from facenet_pytorch import MTCNN, InceptionResnetV1
|
| 376 |
+
import mediapipe as mp
|
| 377 |
+
from src.types import EngineResult
|
| 378 |
+
|
| 379 |
+
logger = logging.getLogger(__name__)
|
| 380 |
+
|
| 381 |
+
_lock = threading.Lock()
|
| 382 |
+
_mtcnn = _resnet = _face_mesh = None
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def _load():
|
| 386 |
+
global _mtcnn, _resnet, _face_mesh
|
| 387 |
+
if _mtcnn is not None:
|
| 388 |
+
return
|
| 389 |
+
logger.info("Loading coherence models...")
|
| 390 |
+
_mtcnn = MTCNN(keep_all=False, device="cpu")
|
| 391 |
+
_resnet = InceptionResnetV1(pretrained="vggface2").eval()
|
| 392 |
+
_face_mesh = mp.solutions.face_mesh.FaceMesh(
|
| 393 |
+
static_image_mode=False, max_num_faces=1,
|
| 394 |
+
refine_landmarks=True, min_detection_confidence=0.5,
|
| 395 |
+
)
|
| 396 |
+
logger.info("Coherence models ready")
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class CoherenceEngine:
|
| 400 |
+
|
| 401 |
+
def _ensure(self):
|
| 402 |
+
with _lock:
|
| 403 |
+
_load()
|
| 404 |
+
|
| 405 |
+
def run(self, image: Image.Image) -> EngineResult:
|
| 406 |
+
self._ensure()
|
| 407 |
+
frame = np.array(image.convert("RGB"))
|
| 408 |
+
score = self._image_score(frame)
|
| 409 |
+
return EngineResult(
|
| 410 |
+
engine="coherence",
|
| 411 |
+
verdict="FAKE" if score > 0.5 else "REAL",
|
| 412 |
+
confidence=float(score),
|
| 413 |
+
explanation=f"Geometric coherence anomaly {score:.2f} (image mode).",
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
def _image_score(self, frame: np.ndarray) -> float:
|
| 417 |
+
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if frame.shape[2] == 3 else frame
|
| 418 |
+
res = _face_mesh.process(rgb)
|
| 419 |
+
if not res.multi_face_landmarks:
|
| 420 |
+
return 0.35 # no face detected
|
| 421 |
+
|
| 422 |
+
lms = res.multi_face_landmarks[0].landmark
|
| 423 |
+
h, w = frame.shape[:2]
|
| 424 |
+
|
| 425 |
+
def pt(i):
|
| 426 |
+
return np.array([lms[i].x * w, lms[i].y * h])
|
| 427 |
+
|
| 428 |
+
# Eye width asymmetry — deepfakes often mismatched
|
| 429 |
+
lew = np.linalg.norm(pt(33) - pt(133))
|
| 430 |
+
rew = np.linalg.norm(pt(362) - pt(263))
|
| 431 |
+
eye_ratio = min(lew, rew) / (max(lew, rew) + 1e-9)
|
| 432 |
+
eye_score = max(0.0, (0.85 - eye_ratio) / 0.3)
|
| 433 |
+
|
| 434 |
+
# Ear symmetry from nose tip
|
| 435 |
+
nose = pt(1)
|
| 436 |
+
lr = min(np.linalg.norm(nose - pt(234)), np.linalg.norm(nose - pt(454)))
|
| 437 |
+
rr = max(np.linalg.norm(nose - pt(234)), np.linalg.norm(nose - pt(454)))
|
| 438 |
+
ear_score = max(0.0, (0.90 - lr / (rr + 1e-9)) / 0.2)
|
| 439 |
+
|
| 440 |
+
return float(np.clip(eye_score * 0.5 + ear_score * 0.5, 0.0, 1.0))
|
| 441 |
+
|
| 442 |
+
def run_video(self, frames: list[np.ndarray]) -> EngineResult:
|
| 443 |
+
self._ensure()
|
| 444 |
+
if len(frames) < 4:
|
| 445 |
+
r = self.run(Image.fromarray(frames[0]))
|
| 446 |
+
r.explanation = "Too few frames for temporal analysis."
|
| 447 |
+
return r
|
| 448 |
+
|
| 449 |
+
delta = self._embedding_variance(frames)
|
| 450 |
+
jerk = self._landmark_jerk(frames)
|
| 451 |
+
blink = self._blink_anomaly(frames)
|
| 452 |
+
score = float(np.clip(delta * 0.45 + jerk * 0.35 + blink * 0.20, 0.0, 1.0))
|
| 453 |
+
|
| 454 |
+
return EngineResult(
|
| 455 |
+
engine="coherence",
|
| 456 |
+
verdict="FAKE" if score > 0.5 else "REAL",
|
| 457 |
+
confidence=score,
|
| 458 |
+
explanation=(
|
| 459 |
+
f"Embedding variance {delta:.2f}, "
|
| 460 |
+
f"landmark jerk {jerk:.2f}, "
|
| 461 |
+
f"blink anomaly {blink:.2f}."
|
| 462 |
+
),
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
def _embedding_variance(self, frames: list[np.ndarray]) -> float:
|
| 466 |
+
import torch
|
| 467 |
+
embeddings = []
|
| 468 |
+
for frame in frames[::4]:
|
| 469 |
+
try:
|
| 470 |
+
face = _mtcnn(Image.fromarray(frame))
|
| 471 |
+
if face is not None:
|
| 472 |
+
with torch.no_grad():
|
| 473 |
+
e = _resnet(face.unsqueeze(0)).numpy()[0]
|
| 474 |
+
embeddings.append(e)
|
| 475 |
+
except Exception:
|
| 476 |
+
continue
|
| 477 |
+
if len(embeddings) < 2:
|
| 478 |
+
return 0.5
|
| 479 |
+
deltas = [np.linalg.norm(embeddings[i+1] - embeddings[i])
|
| 480 |
+
for i in range(len(embeddings)-1)]
|
| 481 |
+
return float(np.clip(np.var(deltas) * 8, 0.0, 1.0))
|
| 482 |
+
|
| 483 |
+
def _landmark_jerk(self, frames: list[np.ndarray]) -> float:
|
| 484 |
+
positions = []
|
| 485 |
+
for frame in frames[::2]:
|
| 486 |
+
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 487 |
+
res = _face_mesh.process(rgb)
|
| 488 |
+
if res.multi_face_landmarks:
|
| 489 |
+
lm = res.multi_face_landmarks[0].landmark
|
| 490 |
+
positions.append([lm[1].x, lm[1].y])
|
| 491 |
+
if len(positions) < 4:
|
| 492 |
+
return 0.3
|
| 493 |
+
pos = np.array(positions)
|
| 494 |
+
jerk = np.diff(pos, n=3, axis=0)
|
| 495 |
+
return float(np.clip((np.mean(np.linalg.norm(jerk, axis=1)) - 0.002) / 0.008,
|
| 496 |
+
0.0, 1.0))
|
| 497 |
+
|
| 498 |
+
def _blink_anomaly(self, frames: list[np.ndarray]) -> float:
|
| 499 |
+
LEFT_EYE = [33, 160, 158, 133, 153, 144]
|
| 500 |
+
RIGHT_EYE = [362, 385, 387, 263, 373, 380]
|
| 501 |
+
|
| 502 |
+
def ear(lms, idx, h, w):
|
| 503 |
+
pts = [np.array([lms[i].x * w, lms[i].y * h]) for i in idx]
|
| 504 |
+
a = np.linalg.norm(pts[1] - pts[5])
|
| 505 |
+
b = np.linalg.norm(pts[2] - pts[4])
|
| 506 |
+
c = np.linalg.norm(pts[0] - pts[3])
|
| 507 |
+
return (a + b) / (2.0 * c + 1e-9)
|
| 508 |
+
|
| 509 |
+
ears = []
|
| 510 |
+
for frame in frames:
|
| 511 |
+
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 512 |
+
res = _face_mesh.process(rgb)
|
| 513 |
+
if res.multi_face_landmarks:
|
| 514 |
+
lm = res.multi_face_landmarks[0].landmark
|
| 515 |
+
h, w = frame.shape[:2]
|
| 516 |
+
ears.append((ear(lm, LEFT_EYE, h, w) + ear(lm, RIGHT_EYE, h, w)) / 2)
|
| 517 |
+
|
| 518 |
+
if len(ears) < 10:
|
| 519 |
+
return 0.3
|
| 520 |
+
arr = np.array(ears)
|
| 521 |
+
blinks = int(np.sum(np.diff((arr < 0.21).astype(int)) > 0))
|
| 522 |
+
bpm = blinks / (len(ears) / 25) * 60
|
| 523 |
+
if 8 <= bpm <= 25:
|
| 524 |
+
return 0.15
|
| 525 |
+
if bpm < 3 or bpm > 35:
|
| 526 |
+
return 0.80
|
| 527 |
+
return 0.45
|
| 528 |
+
```
|
| 529 |
+
|
| 530 |
+
---
|
| 531 |
+
|
| 532 |
+
### SSTGNNEngine — `src/engines/sstgnn/engine.py`
|
| 533 |
+
|
| 534 |
+
Query `context7.query-docs torch-geometric GCNConv` and
|
| 535 |
+
`huggingface model_details dima806/deepfake_vs_real_image_detection` before
|
| 536 |
+
modifying.
|
| 537 |
+
|
| 538 |
+
```python
|
| 539 |
+
import logging, os, threading
|
| 540 |
+
import numpy as np
|
| 541 |
+
import cv2
|
| 542 |
+
from PIL import Image
|
| 543 |
+
from transformers import pipeline
|
| 544 |
+
import mediapipe as mp
|
| 545 |
+
from scipy.spatial import Delaunay
|
| 546 |
+
from src.types import EngineResult
|
| 547 |
+
|
| 548 |
+
logger = logging.getLogger(__name__)
|
| 549 |
+
CACHE = os.environ.get("MODEL_CACHE_DIR", "/tmp/models")
|
| 550 |
+
|
| 551 |
+
_lock = threading.Lock()
|
| 552 |
+
_det1 = _det2 = _mesh = None
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
def _load():
|
| 556 |
+
global _det1, _det2, _mesh
|
| 557 |
+
if _det1 is not None:
|
| 558 |
+
return
|
| 559 |
+
logger.info("Loading SSTGNN models...")
|
| 560 |
+
_det1 = pipeline("image-classification",
|
| 561 |
+
model="dima806/deepfake_vs_real_image_detection",
|
| 562 |
+
cache_dir=CACHE)
|
| 563 |
+
try:
|
| 564 |
+
_det2 = pipeline("image-classification",
|
| 565 |
+
model="prithivMLmods/Deep-Fake-Detector-Model",
|
| 566 |
+
cache_dir=CACHE)
|
| 567 |
+
except Exception:
|
| 568 |
+
logger.warning("SSTGNN backup detector unavailable")
|
| 569 |
+
_mesh = mp.solutions.face_mesh.FaceMesh(
|
| 570 |
+
static_image_mode=True, max_num_faces=1, refine_landmarks=True)
|
| 571 |
+
logger.info("SSTGNN models ready")
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
def _fake_prob(preds: list[dict]) -> float:
|
| 575 |
+
fake_kw = {"fake", "deepfake", "artificial", "generated", "ai"}
|
| 576 |
+
return max(
|
| 577 |
+
(p["score"] for p in preds
|
| 578 |
+
if any(k in p["label"].lower() for k in fake_kw)),
|
| 579 |
+
default=0.5,
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
class SSTGNNEngine:
|
| 584 |
+
|
| 585 |
+
def _ensure(self):
|
| 586 |
+
with _lock:
|
| 587 |
+
_load()
|
| 588 |
+
|
| 589 |
+
def run(self, image: Image.Image) -> EngineResult:
|
| 590 |
+
self._ensure()
|
| 591 |
+
if image.mode != "RGB":
|
| 592 |
+
image = image.convert("RGB")
|
| 593 |
+
|
| 594 |
+
scores = []
|
| 595 |
+
try:
|
| 596 |
+
scores.append(_fake_prob(_det1(image)) * 0.6)
|
| 597 |
+
except Exception as e:
|
| 598 |
+
logger.warning(f"SSTGNN det1 error: {e}")
|
| 599 |
+
if _det2:
|
| 600 |
+
try:
|
| 601 |
+
scores.append(_fake_prob(_det2(image)) * 0.4)
|
| 602 |
+
except Exception as e:
|
| 603 |
+
logger.warning(f"SSTGNN det2 error: {e}")
|
| 604 |
+
|
| 605 |
+
if not scores:
|
| 606 |
+
return EngineResult(engine="sstgnn", verdict="UNKNOWN",
|
| 607 |
+
confidence=0.5, explanation="All detectors failed.")
|
| 608 |
+
|
| 609 |
+
cnn = sum(scores) / (0.6 if len(scores) == 1 else 1.0)
|
| 610 |
+
graph = self._geometry_score(np.array(image))
|
| 611 |
+
final = float(np.clip(cnn * 0.7 + graph * 0.3, 0.0, 1.0))
|
| 612 |
+
|
| 613 |
+
return EngineResult(
|
| 614 |
+
engine="sstgnn",
|
| 615 |
+
verdict="FAKE" if final > 0.5 else "REAL",
|
| 616 |
+
confidence=final,
|
| 617 |
+
explanation=f"CNN {cnn:.2f}, geometric graph anomaly {graph:.2f}.",
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
def _geometry_score(self, frame: np.ndarray) -> float:
|
| 621 |
+
try:
|
| 622 |
+
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 623 |
+
res = _mesh.process(rgb)
|
| 624 |
+
if not res.multi_face_landmarks:
|
| 625 |
+
return 0.3
|
| 626 |
+
h, w = frame.shape[:2]
|
| 627 |
+
lms = res.multi_face_landmarks[0].landmark
|
| 628 |
+
idxs = list(range(0, 468, 7))[:68]
|
| 629 |
+
pts = np.array([[lms[i].x * w, lms[i].y * h] for i in idxs])
|
| 630 |
+
tri = Delaunay(pts)
|
| 631 |
+
areas = []
|
| 632 |
+
for s in tri.simplices:
|
| 633 |
+
a, b, c = pts[s]
|
| 634 |
+
areas.append(abs(np.cross(b - a, c - a)) / 2)
|
| 635 |
+
areas = np.array(areas)
|
| 636 |
+
cv_score = float(np.std(areas) / (np.mean(areas) + 1e-9))
|
| 637 |
+
return float(np.clip((cv_score - 0.8) / 1.5, 0.0, 1.0))
|
| 638 |
+
except Exception as e:
|
| 639 |
+
logger.warning(f"Geometry score error: {e}")
|
| 640 |
+
return 0.3
|
| 641 |
+
|
| 642 |
+
def run_video(self, frames: list[np.ndarray]) -> EngineResult:
|
| 643 |
+
self._ensure()
|
| 644 |
+
if not frames:
|
| 645 |
+
return EngineResult(engine="sstgnn", verdict="UNKNOWN",
|
| 646 |
+
confidence=0.5, explanation="No frames.")
|
| 647 |
+
sample = frames[::6] or [frames[0]]
|
| 648 |
+
results = [self.run(Image.fromarray(f)) for f in sample]
|
| 649 |
+
avg = float(np.mean([r.confidence for r in results]))
|
| 650 |
+
return EngineResult(
|
| 651 |
+
engine="sstgnn",
|
| 652 |
+
verdict="FAKE" if avg > 0.5 else "REAL",
|
| 653 |
+
confidence=avg,
|
| 654 |
+
explanation=f"Frame-sampled SSTGNN average {avg:.2f} over {len(sample)} frames.",
|
| 655 |
+
)
|
| 656 |
+
```
|
| 657 |
+
|
| 658 |
+
---
|
| 659 |
+
|
| 660 |
+
## Fusion — `src/fusion/fuser.py`
|
| 661 |
+
|
| 662 |
+
```python
|
| 663 |
+
import numpy as np
|
| 664 |
+
from src.types import EngineResult
|
| 665 |
+
|
| 666 |
+
ENGINE_WEIGHTS = {
|
| 667 |
+
"fingerprint": 0.45,
|
| 668 |
+
"coherence": 0.35,
|
| 669 |
+
"sstgnn": 0.20,
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
ENGINE_WEIGHTS_VIDEO = {
|
| 673 |
+
"fingerprint": 0.30,
|
| 674 |
+
"coherence": 0.50,
|
| 675 |
+
"sstgnn": 0.20,
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
ATTRIBUTION_PRIORITY = {"fingerprint": 1, "sstgnn": 2, "coherence": 3}
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
def fuse(
|
| 682 |
+
results: list[EngineResult],
|
| 683 |
+
is_video: bool = False,
|
| 684 |
+
) -> tuple[str, float, str]:
|
| 685 |
+
"""Returns (verdict, confidence, attributed_generator)."""
|
| 686 |
+
weights = ENGINE_WEIGHTS_VIDEO if is_video else ENGINE_WEIGHTS
|
| 687 |
+
active = [r for r in results if r.verdict != "UNKNOWN"]
|
| 688 |
+
|
| 689 |
+
if not active:
|
| 690 |
+
return "UNKNOWN", 0.5, "unknown_gan"
|
| 691 |
+
|
| 692 |
+
wf = sum(r.confidence * weights.get(r.engine, 0.1)
|
| 693 |
+
for r in active if r.verdict == "FAKE")
|
| 694 |
+
wr = sum((1 - r.confidence) * weights.get(r.engine, 0.1)
|
| 695 |
+
for r in active if r.verdict == "REAL")
|
| 696 |
+
|
| 697 |
+
fake_prob = float(np.clip(wf / (wf + wr + 1e-9), 0.0, 1.0))
|
| 698 |
+
verdict = "FAKE" if fake_prob > 0.5 else "REAL"
|
| 699 |
+
|
| 700 |
+
generator = "real"
|
| 701 |
+
if verdict == "FAKE":
|
| 702 |
+
for r in sorted(active, key=lambda r: ATTRIBUTION_PRIORITY.get(r.engine, 9)):
|
| 703 |
+
if r.attributed_generator and r.attributed_generator != "real":
|
| 704 |
+
generator = r.attributed_generator
|
| 705 |
+
break
|
| 706 |
+
if generator == "real":
|
| 707 |
+
generator = "unknown_gan"
|
| 708 |
+
|
| 709 |
+
return verdict, fake_prob, generator
|
| 710 |
+
```
|
| 711 |
+
|
| 712 |
+
---
|
| 713 |
+
|
| 714 |
+
## API — `src/api/main.py`
|
| 715 |
+
|
| 716 |
+
```python
|
| 717 |
+
import asyncio, io, logging, os, time
|
| 718 |
+
from pathlib import Path
|
| 719 |
+
|
| 720 |
+
import cv2, numpy as np
|
| 721 |
+
from fastapi import FastAPI, File, HTTPException, UploadFile
|
| 722 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 723 |
+
from PIL import Image
|
| 724 |
+
|
| 725 |
+
from src.engines.fingerprint.engine import FingerprintEngine
|
| 726 |
+
from src.engines.coherence.engine import CoherenceEngine
|
| 727 |
+
from src.engines.sstgnn.engine import SSTGNNEngine
|
| 728 |
+
from src.explainability.explainer import explain
|
| 729 |
+
from src.fusion.fuser import fuse
|
| 730 |
+
from src.services.inference_router import route_inference
|
| 731 |
+
from src.types import DetectionResponse
|
| 732 |
+
|
| 733 |
+
logger = logging.getLogger(__name__)
|
| 734 |
+
|
| 735 |
+
app = FastAPI(title="GenAI-DeepDetect", version="1.0.0")
|
| 736 |
+
app.add_middleware(
|
| 737 |
+
CORSMiddleware,
|
| 738 |
+
allow_origins=["*"], allow_methods=["*"], allow_headers=["*"],
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
_fp = FingerprintEngine()
|
| 742 |
+
_co = CoherenceEngine()
|
| 743 |
+
_st = SSTGNNEngine()
|
| 744 |
+
|
| 745 |
+
MAX_MB = int(os.environ.get("MAX_VIDEO_SIZE_MB", 100))
|
| 746 |
+
MAX_FRAMES = int(os.environ.get("MAX_VIDEO_FRAMES", 300))
|
| 747 |
+
|
| 748 |
+
IMAGE_TYPES = {"image/jpeg", "image/png", "image/webp", "image/bmp"}
|
| 749 |
+
VIDEO_TYPES = {"video/mp4", "video/quicktime", "video/x-msvideo", "video/webm"}
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
def _extract_frames(path: str) -> list[np.ndarray]:
|
| 753 |
+
cap = cv2.VideoCapture(path)
|
| 754 |
+
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 755 |
+
step = max(1, total // MAX_FRAMES)
|
| 756 |
+
frames, i = [], 0
|
| 757 |
+
while True:
|
| 758 |
+
ret, frame = cap.read()
|
| 759 |
+
if not ret:
|
| 760 |
+
break
|
| 761 |
+
if i % step == 0:
|
| 762 |
+
frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
| 763 |
+
i += 1
|
| 764 |
+
cap.release()
|
| 765 |
+
return frames[:MAX_FRAMES]
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
@app.on_event("startup")
|
| 769 |
+
async def preload():
|
| 770 |
+
logger.info("Preloading models...")
|
| 771 |
+
await asyncio.gather(
|
| 772 |
+
asyncio.to_thread(_fp._ensure),
|
| 773 |
+
asyncio.to_thread(_co._ensure),
|
| 774 |
+
asyncio.to_thread(_st._ensure),
|
| 775 |
+
)
|
| 776 |
+
logger.info("All models preloaded")
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
@app.get("/health")
|
| 780 |
+
async def health():
|
| 781 |
+
return {"status": "ok"}
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
@app.post("/detect/image", response_model=DetectionResponse)
|
| 785 |
+
async def detect_image(file: UploadFile = File(...)):
|
| 786 |
+
t0 = time.monotonic()
|
| 787 |
+
if file.content_type not in IMAGE_TYPES:
|
| 788 |
+
raise HTTPException(400, f"Unsupported type: {file.content_type}")
|
| 789 |
+
data = await file.read()
|
| 790 |
+
if len(data) > MAX_MB * 1024 * 1024:
|
| 791 |
+
raise HTTPException(413, "File too large")
|
| 792 |
+
|
| 793 |
+
image = Image.open(io.BytesIO(data)).convert("RGB")
|
| 794 |
+
fp, co, st = await asyncio.gather(
|
| 795 |
+
asyncio.to_thread(_fp.run, image),
|
| 796 |
+
asyncio.to_thread(_co.run, image),
|
| 797 |
+
asyncio.to_thread(_st.run, image),
|
| 798 |
+
)
|
| 799 |
+
ms = (time.monotonic() - t0) * 1000
|
| 800 |
+
for r in [fp, co, st]:
|
| 801 |
+
r.processing_time_ms = ms
|
| 802 |
+
|
| 803 |
+
verdict, conf, gen = fuse([fp, co, st], is_video=False)
|
| 804 |
+
expl = await asyncio.to_thread(explain, verdict, conf, [fp, co, st], gen)
|
| 805 |
+
|
| 806 |
+
return DetectionResponse(
|
| 807 |
+
verdict=verdict, confidence=conf, attributed_generator=gen,
|
| 808 |
+
explanation=expl, processing_time_ms=ms,
|
| 809 |
+
engine_breakdown=[fp, co, st],
|
| 810 |
+
)
|
| 811 |
+
|
| 812 |
+
|
| 813 |
+
@app.post("/detect/video", response_model=DetectionResponse)
|
| 814 |
+
async def detect_video(file: UploadFile = File(...)):
|
| 815 |
+
t0 = time.monotonic()
|
| 816 |
+
if file.content_type not in VIDEO_TYPES:
|
| 817 |
+
raise HTTPException(400, f"Unsupported type: {file.content_type}")
|
| 818 |
+
data = await file.read()
|
| 819 |
+
if len(data) > MAX_MB * 1024 * 1024:
|
| 820 |
+
raise HTTPException(413, "File too large")
|
| 821 |
+
|
| 822 |
+
# Route heavy videos to RunPod
|
| 823 |
+
if len(data) > 20 * 1024 * 1024:
|
| 824 |
+
return await route_inference(data, "video")
|
| 825 |
+
|
| 826 |
+
tmp = Path(f"/tmp/vid_{int(time.time()*1000)}.mp4")
|
| 827 |
+
tmp.write_bytes(data)
|
| 828 |
+
try:
|
| 829 |
+
frames = await asyncio.to_thread(_extract_frames, str(tmp))
|
| 830 |
+
finally:
|
| 831 |
+
tmp.unlink(missing_ok=True)
|
| 832 |
+
|
| 833 |
+
if not frames:
|
| 834 |
+
raise HTTPException(422, "Could not extract frames")
|
| 835 |
+
|
| 836 |
+
fp, co, st = await asyncio.gather(
|
| 837 |
+
asyncio.to_thread(_fp.run_video, frames),
|
| 838 |
+
asyncio.to_thread(_co.run_video, frames),
|
| 839 |
+
asyncio.to_thread(_st.run_video, frames),
|
| 840 |
+
)
|
| 841 |
+
ms = (time.monotonic() - t0) * 1000
|
| 842 |
+
for r in [fp, co, st]:
|
| 843 |
+
r.processing_time_ms = ms
|
| 844 |
+
|
| 845 |
+
verdict, conf, gen = fuse([fp, co, st], is_video=True)
|
| 846 |
+
expl = await asyncio.to_thread(explain, verdict, conf, [fp, co, st], gen)
|
| 847 |
+
|
| 848 |
+
return DetectionResponse(
|
| 849 |
+
verdict=verdict, confidence=conf, attributed_generator=gen,
|
| 850 |
+
explanation=expl, processing_time_ms=ms,
|
| 851 |
+
engine_breakdown=[fp, co, st],
|
| 852 |
+
)
|
| 853 |
+
```
|
| 854 |
+
|
| 855 |
+
---
|
| 856 |
+
|
| 857 |
+
## Types — `src/types.py`
|
| 858 |
+
|
| 859 |
+
```python
|
| 860 |
+
from __future__ import annotations
|
| 861 |
+
from typing import Optional
|
| 862 |
+
from pydantic import BaseModel
|
| 863 |
+
|
| 864 |
+
GENERATOR_LABELS = {
|
| 865 |
+
0: "real",
|
| 866 |
+
1: "unknown_gan",
|
| 867 |
+
2: "stable_diffusion",
|
| 868 |
+
3: "midjourney",
|
| 869 |
+
4: "dall_e",
|
| 870 |
+
5: "flux",
|
| 871 |
+
6: "firefly",
|
| 872 |
+
7: "imagen",
|
| 873 |
+
}
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
class EngineResult(BaseModel):
|
| 877 |
+
engine: str
|
| 878 |
+
verdict: str # FAKE | REAL | UNKNOWN
|
| 879 |
+
confidence: float # 0–1
|
| 880 |
+
attributed_generator: Optional[str] = None
|
| 881 |
+
explanation: str = ""
|
| 882 |
+
processing_time_ms: float = 0.0
|
| 883 |
+
|
| 884 |
+
|
| 885 |
+
class DetectionResponse(BaseModel):
|
| 886 |
+
verdict: str
|
| 887 |
+
confidence: float
|
| 888 |
+
attributed_generator: str
|
| 889 |
+
explanation: str
|
| 890 |
+
processing_time_ms: float
|
| 891 |
+
engine_breakdown: list[EngineResult]
|
| 892 |
+
```
|
| 893 |
+
|
| 894 |
+
---
|
| 895 |
+
|
| 896 |
+
## Inference Router — `src/services/inference_router.py`
|
| 897 |
+
|
| 898 |
+
```python
|
| 899 |
+
import base64, logging, os
|
| 900 |
+
import httpx
|
| 901 |
+
from src.types import DetectionResponse
|
| 902 |
+
|
| 903 |
+
logger = logging.getLogger(__name__)
|
| 904 |
+
|
| 905 |
+
RUNPOD_KEY = os.environ.get("RUNPOD_API_KEY", "")
|
| 906 |
+
RUNPOD_EID = os.environ.get("RUNPOD_ENDPOINT_ID", "")
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
async def route_inference(data: bytes, media_type: str) -> DetectionResponse:
|
| 910 |
+
if not RUNPOD_KEY or not RUNPOD_EID:
|
| 911 |
+
raise RuntimeError(
|
| 912 |
+
"RunPod not configured. Set RUNPOD_API_KEY and RUNPOD_ENDPOINT_ID."
|
| 913 |
+
)
|
| 914 |
+
url = f"https://api.runpod.ai/v2/{RUNPOD_EID}/runsync"
|
| 915 |
+
payload = {"input": {"data": base64.b64encode(data).decode(),
|
| 916 |
+
"media_type": media_type}}
|
| 917 |
+
async with httpx.AsyncClient(timeout=120) as client:
|
| 918 |
+
resp = await client.post(url, json=payload,
|
| 919 |
+
headers={"Authorization": f"Bearer {RUNPOD_KEY}"})
|
| 920 |
+
resp.raise_for_status()
|
| 921 |
+
return DetectionResponse(**resp.json()["output"])
|
| 922 |
+
```
|
| 923 |
+
|
| 924 |
+
---
|
| 925 |
+
|
| 926 |
+
## RunPod Handler — `runpod_handler.py` (project root)
|
| 927 |
+
|
| 928 |
+
```python
|
| 929 |
+
import base64, io, os, tempfile
|
| 930 |
+
import runpod, cv2, numpy as np
|
| 931 |
+
from PIL import Image
|
| 932 |
+
|
| 933 |
+
os.environ.setdefault("MODEL_CACHE_DIR", "/tmp/models")
|
| 934 |
+
|
| 935 |
+
from src.engines.fingerprint.engine import FingerprintEngine
|
| 936 |
+
from src.engines.coherence.engine import CoherenceEngine
|
| 937 |
+
from src.engines.sstgnn.engine import SSTGNNEngine
|
| 938 |
+
from src.explainability.explainer import explain
|
| 939 |
+
from src.fusion.fuser import fuse
|
| 940 |
+
|
| 941 |
+
_fp = FingerprintEngine()
|
| 942 |
+
_co = CoherenceEngine()
|
| 943 |
+
_st = SSTGNNEngine()
|
| 944 |
+
|
| 945 |
+
|
| 946 |
+
def handler(job: dict) -> dict:
|
| 947 |
+
inp = job["input"]
|
| 948 |
+
raw = base64.b64decode(inp["data"])
|
| 949 |
+
media_type = inp.get("media_type", "image")
|
| 950 |
+
|
| 951 |
+
if media_type == "image":
|
| 952 |
+
image = Image.open(io.BytesIO(raw)).convert("RGB")
|
| 953 |
+
fp = _fp.run(image)
|
| 954 |
+
co = _co.run(image)
|
| 955 |
+
st = _st.run(image)
|
| 956 |
+
verdict, conf, gen = fuse([fp, co, st], is_video=False)
|
| 957 |
+
else:
|
| 958 |
+
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as f:
|
| 959 |
+
f.write(raw)
|
| 960 |
+
tmp = f.name
|
| 961 |
+
try:
|
| 962 |
+
cap = cv2.VideoCapture(tmp)
|
| 963 |
+
frames, i = [], 0
|
| 964 |
+
while True:
|
| 965 |
+
ret, frame = cap.read()
|
| 966 |
+
if not ret:
|
| 967 |
+
break
|
| 968 |
+
if i % 4 == 0:
|
| 969 |
+
frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
| 970 |
+
i += 1
|
| 971 |
+
cap.release()
|
| 972 |
+
finally:
|
| 973 |
+
os.unlink(tmp)
|
| 974 |
+
fp = _fp.run_video(frames)
|
| 975 |
+
co = _co.run_video(frames)
|
| 976 |
+
st = _st.run_video(frames)
|
| 977 |
+
verdict, conf, gen = fuse([fp, co, st], is_video=True)
|
| 978 |
+
|
| 979 |
+
expl = explain(verdict, conf, [fp, co, st], gen)
|
| 980 |
+
|
| 981 |
+
return {
|
| 982 |
+
"verdict": verdict,
|
| 983 |
+
"confidence": conf,
|
| 984 |
+
"attributed_generator": gen,
|
| 985 |
+
"explanation": expl,
|
| 986 |
+
"processing_time_ms": 0.0,
|
| 987 |
+
"engine_breakdown": [r.model_dump() for r in [fp, co, st]],
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
|
| 991 |
+
runpod.serverless.start({"handler": handler})
|
| 992 |
+
```
|
| 993 |
+
|
| 994 |
+
---
|
| 995 |
+
|
| 996 |
+
## Hosting
|
| 997 |
+
|
| 998 |
+
### Option A — HuggingFace Spaces (Free, CPU, primary API host)
|
| 999 |
+
|
| 1000 |
+
**`spaces/app.py`**:
|
| 1001 |
+
|
| 1002 |
+
```python
|
| 1003 |
+
import os
|
| 1004 |
+
os.environ.setdefault("MODEL_CACHE_DIR", "/data/models")
|
| 1005 |
+
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
|
| 1006 |
+
|
| 1007 |
+
import uvicorn
|
| 1008 |
+
from src.api.main import app
|
| 1009 |
+
|
| 1010 |
+
if __name__ == "__main__":
|
| 1011 |
+
uvicorn.run(app, host="0.0.0.0", port=7860, workers=1)
|
| 1012 |
+
```
|
| 1013 |
+
|
| 1014 |
+
**Root `README.md`** front-matter (Hugging Face reads this file):
|
| 1015 |
+
|
| 1016 |
+
```yaml
|
| 1017 |
+
---
|
| 1018 |
+
title: GenAI DeepDetect
|
| 1019 |
+
emoji: "🔍"
|
| 1020 |
+
colorFrom: gray
|
| 1021 |
+
colorTo: indigo
|
| 1022 |
+
sdk: docker
|
| 1023 |
+
app_port: 7860
|
| 1024 |
+
pinned: false
|
| 1025 |
+
---
|
| 1026 |
+
```
|
| 1027 |
+
|
| 1028 |
+
**`Dockerfile`** (replace existing):
|
| 1029 |
+
|
| 1030 |
+
```dockerfile
|
| 1031 |
+
FROM python:3.11-slim
|
| 1032 |
+
|
| 1033 |
+
RUN apt-get update && apt-get install -y \
|
| 1034 |
+
ffmpeg libgl1-mesa-glx libglib2.0-0 libsm6 libxext6 libxrender-dev \
|
| 1035 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 1036 |
+
|
| 1037 |
+
WORKDIR /app
|
| 1038 |
+
COPY requirements.txt .
|
| 1039 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 1040 |
+
|
| 1041 |
+
COPY . .
|
| 1042 |
+
|
| 1043 |
+
ENV MODEL_CACHE_DIR=/data/models
|
| 1044 |
+
ENV TOKENIZERS_PARALLELISM=false
|
| 1045 |
+
ENV PYTHONUNBUFFERED=1
|
| 1046 |
+
|
| 1047 |
+
EXPOSE 7860
|
| 1048 |
+
CMD ["python", "spaces/app.py"]
|
| 1049 |
+
```
|
| 1050 |
+
|
| 1051 |
+
**Secrets to set in HF Spaces** (Settings → Repository secrets):
|
| 1052 |
+
|
| 1053 |
+
```
|
| 1054 |
+
GEMINI_API_KEY
|
| 1055 |
+
HF_TOKEN
|
| 1056 |
+
RUNPOD_API_KEY
|
| 1057 |
+
RUNPOD_ENDPOINT_ID
|
| 1058 |
+
```
|
| 1059 |
+
|
| 1060 |
+
**Free tier**: 2 vCPU, 16GB RAM, persistent `/data` volume. Models cache to
|
| 1061 |
+
`/data/models` and survive container restarts. Cold start first request: ~90s.
|
| 1062 |
+
Warm: <5s. GPU upgrade: T4 at $0.05/hr if needed.
|
| 1063 |
+
|
| 1064 |
+
---
|
| 1065 |
+
|
| 1066 |
+
### Option B — RunPod Serverless (GPU, heavy video, low cost)
|
| 1067 |
+
|
| 1068 |
+
1. RunPod → Serverless → New Endpoint
|
| 1069 |
+
2. Select template: `runpod/pytorch:2.1.0-py3.10-cuda11.8.0-devel-ubuntu22.04`
|
| 1070 |
+
3. Set handler file: `runpod_handler.py`
|
| 1071 |
+
4. Min replicas: 0, Max: 3
|
| 1072 |
+
5. GPU: RTX 3090 or A40 (cheapest that works)
|
| 1073 |
+
6. Set env vars: `GEMINI_API_KEY`, `HF_TOKEN`, `MODEL_CACHE_DIR=/tmp/models`
|
| 1074 |
+
|
| 1075 |
+
**Cost**: ~$0.0002/request on H100. Billed per second. Min workers = 0 means you
|
| 1076 |
+
pay nothing when idle — cold start is ~15s.
|
| 1077 |
+
|
| 1078 |
+
**When it triggers**: `inference_router.py` automatically sends videos >20MB to
|
| 1079 |
+
RunPod. Images always run on HF Spaces.
|
| 1080 |
+
|
| 1081 |
+
---
|
| 1082 |
+
|
| 1083 |
+
## Frontend — `frontend/lib/api.ts`
|
| 1084 |
+
|
| 1085 |
+
```typescript
|
| 1086 |
+
const BASE_URL =
|
| 1087 |
+
process.env.NEXT_PUBLIC_API_URL ??
|
| 1088 |
+
'https://YOUR-USERNAME-genai-deepdetect.hf.space';
|
| 1089 |
+
|
| 1090 |
+
export type GeneratorLabel =
|
| 1091 |
+
| 'real'
|
| 1092 |
+
| 'unknown_gan'
|
| 1093 |
+
| 'stable_diffusion'
|
| 1094 |
+
| 'midjourney'
|
| 1095 |
+
| 'dall_e'
|
| 1096 |
+
| 'flux'
|
| 1097 |
+
| 'firefly'
|
| 1098 |
+
| 'imagen';
|
| 1099 |
+
|
| 1100 |
+
export interface EngineResult {
|
| 1101 |
+
engine: string;
|
| 1102 |
+
verdict: 'FAKE' | 'REAL' | 'UNKNOWN';
|
| 1103 |
+
confidence: number;
|
| 1104 |
+
attributed_generator: GeneratorLabel | null;
|
| 1105 |
+
explanation: string;
|
| 1106 |
+
processing_time_ms: number;
|
| 1107 |
+
}
|
| 1108 |
+
|
| 1109 |
+
export interface DetectionResponse {
|
| 1110 |
+
verdict: 'FAKE' | 'REAL' | 'UNKNOWN';
|
| 1111 |
+
confidence: number;
|
| 1112 |
+
attributed_generator: GeneratorLabel;
|
| 1113 |
+
explanation: string;
|
| 1114 |
+
processing_time_ms: number;
|
| 1115 |
+
engine_breakdown: EngineResult[];
|
| 1116 |
+
}
|
| 1117 |
+
|
| 1118 |
+
async function _post(endpoint: string, file: File): Promise<DetectionResponse> {
|
| 1119 |
+
const form = new FormData();
|
| 1120 |
+
form.append('file', file);
|
| 1121 |
+
const res = await fetch(`${BASE_URL}${endpoint}`, {
|
| 1122 |
+
method: 'POST',
|
| 1123 |
+
body: form,
|
| 1124 |
+
});
|
| 1125 |
+
if (!res.ok) {
|
| 1126 |
+
const err = await res.text();
|
| 1127 |
+
throw new Error(`Detection failed (${res.status}): ${err}`);
|
| 1128 |
+
}
|
| 1129 |
+
return res.json();
|
| 1130 |
+
}
|
| 1131 |
+
|
| 1132 |
+
export const detectImage = (file: File) => _post('/detect/image', file);
|
| 1133 |
+
export const detectVideo = (file: File) => _post('/detect/video', file);
|
| 1134 |
+
```
|
| 1135 |
+
|
| 1136 |
+
Set in `frontend/.env.local`:
|
| 1137 |
+
|
| 1138 |
+
```
|
| 1139 |
+
NEXT_PUBLIC_API_URL=https://your-username-genai-deepdetect.hf.space
|
| 1140 |
+
```
|
| 1141 |
+
|
| 1142 |
+
---
|
| 1143 |
+
|
| 1144 |
+
## Dependencies — `requirements.txt`
|
| 1145 |
+
|
| 1146 |
+
```
|
| 1147 |
+
# API
|
| 1148 |
+
fastapi>=0.111.0
|
| 1149 |
+
uvicorn[standard]>=0.29.0
|
| 1150 |
+
python-multipart>=0.0.9
|
| 1151 |
+
aiofiles>=23.2.1
|
| 1152 |
+
httpx>=0.27.0
|
| 1153 |
+
pydantic>=2.7.0
|
| 1154 |
+
|
| 1155 |
+
# ML — fingerprint
|
| 1156 |
+
transformers>=4.40.0
|
| 1157 |
+
timm>=1.0.0
|
| 1158 |
+
torch>=2.1.0
|
| 1159 |
+
torchvision>=0.16.0
|
| 1160 |
+
|
| 1161 |
+
# ML — coherence
|
| 1162 |
+
facenet-pytorch>=2.5.3
|
| 1163 |
+
mediapipe>=0.10.14
|
| 1164 |
+
opencv-python-headless>=4.9.0
|
| 1165 |
+
|
| 1166 |
+
# ML — sstgnn
|
| 1167 |
+
torch-geometric>=2.5.0
|
| 1168 |
+
scipy>=1.13.0
|
| 1169 |
+
|
| 1170 |
+
# Explainability — Gemini
|
| 1171 |
+
google-generativeai>=0.8.0
|
| 1172 |
+
|
| 1173 |
+
# HuggingFace
|
| 1174 |
+
huggingface-hub>=0.23.0
|
| 1175 |
+
|
| 1176 |
+
# RunPod serverless handler
|
| 1177 |
+
runpod>=1.6.0
|
| 1178 |
+
|
| 1179 |
+
# Continual learning
|
| 1180 |
+
apscheduler>=3.10.4
|
| 1181 |
+
|
| 1182 |
+
# Utils
|
| 1183 |
+
Pillow>=10.3.0
|
| 1184 |
+
numpy>=1.26.0
|
| 1185 |
+
```
|
| 1186 |
+
|
| 1187 |
+
---
|
| 1188 |
+
|
| 1189 |
+
## Bug Checklist — Fix Before Running
|
| 1190 |
+
|
| 1191 |
+
### `src/types.py`
|
| 1192 |
+
|
| 1193 |
+
- [ ] `EngineResult` missing `attributed_generator: Optional[str] = None` — add
|
| 1194 |
+
it
|
| 1195 |
+
- [ ] `DetectionResponse.engine_breakdown` typed as `list[dict]` — change to
|
| 1196 |
+
`list[EngineResult]`
|
| 1197 |
+
|
| 1198 |
+
### `src/fusion/fuser.py`
|
| 1199 |
+
|
| 1200 |
+
- [ ] `fuse()` returns 2-tuple — update to return 3-tuple
|
| 1201 |
+
`(verdict, conf, generator)`
|
| 1202 |
+
- [ ] Update all callers in `main.py` accordingly
|
| 1203 |
+
|
| 1204 |
+
### `src/explainability/explainer.py`
|
| 1205 |
+
|
| 1206 |
+
- [ ] References `anthropic` SDK — replace entirely with Gemini implementation
|
| 1207 |
+
above
|
| 1208 |
+
|
| 1209 |
+
### `src/api/main.py`
|
| 1210 |
+
|
| 1211 |
+
- [ ] Missing CORS middleware — add before deploy
|
| 1212 |
+
- [ ] Missing `@app.on_event("startup")` preload — add it
|
| 1213 |
+
- [ ] Missing `_extract_frames()` for video — add it
|
| 1214 |
+
- [ ] `detect_video` likely missing or stubbed — implement fully
|
| 1215 |
+
|
| 1216 |
+
### `src/engines/*/` directories
|
| 1217 |
+
|
| 1218 |
+
- [ ] All three engine files are stubs or empty — replace with full code above
|
| 1219 |
+
|
| 1220 |
+
### `spaces/app.py`
|
| 1221 |
+
|
| 1222 |
+
- [ ] Likely empty — add uvicorn entrypoint
|
| 1223 |
+
|
| 1224 |
+
### `Dockerfile`
|
| 1225 |
+
|
| 1226 |
+
- [ ] Check for `ffmpeg` and `libgl1-mesa-glx` — required for MediaPipe + OpenCV
|
| 1227 |
+
- [ ] Check `EXPOSE 7860` matches HF Spaces `app_port`
|
| 1228 |
+
|
| 1229 |
+
### `src/services/inference_router.py`
|
| 1230 |
+
|
| 1231 |
+
- [ ] Likely stub — implement `route_inference()` with RunPod httpx call
|
| 1232 |
+
|
| 1233 |
+
---
|
| 1234 |
+
|
| 1235 |
+
## Code Standards
|
| 1236 |
+
|
| 1237 |
+
- Lazy-load all models behind a threading lock — never load at module import
|
| 1238 |
+
- Wrap all model inference in `asyncio.to_thread()` — never block the event loop
|
| 1239 |
+
- Type hints on every function
|
| 1240 |
+
- `logging.getLogger(__name__)` not `print()`
|
| 1241 |
+
- `os.environ.get()` not hardcoded secrets
|
| 1242 |
+
- Pydantic `BaseModel` for all response schemas
|
| 1243 |
+
- Next.js: pages router only — no `app/` dir, no `src/` dir
|
| 1244 |
+
- Font: Plus Jakarta Sans or DM Sans — never Inter, Roboto, Arial
|
| 1245 |
+
- Border radius: 22% icon containers, 18px cards, 12px buttons
|
| 1246 |
+
|
| 1247 |
+
---
|
| 1248 |
+
|
| 1249 |
+
## MCP Usage Rules
|
| 1250 |
+
|
| 1251 |
+
Every coding session must follow these rules:
|
| 1252 |
+
|
| 1253 |
+
```
|
| 1254 |
+
1. Adding a dependency?
|
| 1255 |
+
→ context7: resolve-library-id <package>
|
| 1256 |
+
→ context7: query-docs <package> <specific feature>
|
| 1257 |
+
|
| 1258 |
+
2. Using any HF model?
|
| 1259 |
+
→ huggingface: model_details <model-id>
|
| 1260 |
+
→ confirm size, license, task, input format
|
| 1261 |
+
|
| 1262 |
+
3. Modifying engine logic?
|
| 1263 |
+
→ context7: query-docs transformers pipeline (fingerprint)
|
| 1264 |
+
→ context7: query-docs mediapipe face_mesh (coherence)
|
| 1265 |
+
→ context7: query-docs torch-geometric GCNConv (sstgnn)
|
| 1266 |
+
→ context7: query-docs facenet-pytorch (coherence embeddings)
|
| 1267 |
+
|
| 1268 |
+
4. Modifying Gemini calls?
|
| 1269 |
+
→ context7: query-docs google-generativeai GenerativeModel
|
| 1270 |
+
|
| 1271 |
+
5. Modifying RunPod handler?
|
| 1272 |
+
→ context7: query-docs runpod serverless handler
|
| 1273 |
+
|
| 1274 |
+
6. Modifying FastAPI routes?
|
| 1275 |
+
→ context7: query-docs fastapi UploadFile
|
| 1276 |
+
|
| 1277 |
+
7. Frontend API changes?
|
| 1278 |
+
→ context7: query-docs next.js pages-router fetch
|
| 1279 |
+
```
|
| 1280 |
+
|
| 1281 |
+
---
|
| 1282 |
+
|
| 1283 |
+
## Friday Deploy Checklist
|
| 1284 |
+
|
| 1285 |
+
```
|
| 1286 |
+
[ ] pip install -r requirements.txt (no errors)
|
| 1287 |
+
[ ] src/types.py — EngineResult has attributed_generator
|
| 1288 |
+
[ ] src/types.py — DetectionResponse has engine_breakdown: list[EngineResult]
|
| 1289 |
+
[ ] src/fusion/fuser.py — returns 3-tuple
|
| 1290 |
+
[ ] src/explainability/explainer.py — uses Gemini, no anthropic import
|
| 1291 |
+
[ ] src/engines/fingerprint/engine.py — full implementation
|
| 1292 |
+
[ ] src/engines/coherence/engine.py — full implementation
|
| 1293 |
+
[ ] src/engines/sstgnn/engine.py — full implementation
|
| 1294 |
+
[ ] src/api/main.py — CORS + startup preload + video route
|
| 1295 |
+
[ ] src/services/inference_router.py — RunPod httpx call
|
| 1296 |
+
[ ] runpod_handler.py — added to project root
|
| 1297 |
+
[ ] spaces/app.py — uvicorn entrypoint
|
| 1298 |
+
[ ] Dockerfile — has ffmpeg, libgl1, EXPOSE 7860
|
| 1299 |
+
[ ] HF Space created + secrets set + pushed
|
| 1300 |
+
[ ] RunPod endpoint deployed + endpoint ID noted
|
| 1301 |
+
[ ] frontend/.env.local — NEXT_PUBLIC_API_URL points to HF Space
|
| 1302 |
+
[ ] Vercel deploy of frontend/
|
| 1303 |
+
|
| 1304 |
+
Smoke tests:
|
| 1305 |
+
[ ] GET /health → {"status":"ok"}
|
| 1306 |
+
[ ] POST /detect/image (real JPEG) → verdict REAL
|
| 1307 |
+
[ ] POST /detect/image (AI PNG) → verdict FAKE
|
| 1308 |
+
[ ] POST /detect/video (MP4 <20MB) → response within 30s
|
| 1309 |
+
[ ] POST /detect/video (MP4 >20MB) → routes to RunPod
|
| 1310 |
+
```
|
| 1311 |
+
|
Dockerfile
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
RUN apt-get update && apt-get install -y \
|
| 4 |
+
ffmpeg libgl1-mesa-glx libglib2.0-0 libsm6 libxext6 libxrender-dev \
|
| 5 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 6 |
+
|
| 7 |
+
WORKDIR /app
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 10 |
+
|
| 11 |
+
COPY . .
|
| 12 |
+
|
| 13 |
+
ENV MODEL_CACHE_DIR=/data/models
|
| 14 |
+
ENV TOKENIZERS_PARALLELISM=false
|
| 15 |
+
ENV PYTHONUNBUFFERED=1
|
| 16 |
+
|
| 17 |
+
EXPOSE 7860
|
| 18 |
+
CMD ["python", "spaces/app.py"]
|
README.md
CHANGED
|
@@ -1,12 +1,19 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
|
|
|
| 7 |
pinned: false
|
| 8 |
-
license: mit
|
| 9 |
-
short_description: A
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: GenAI DeepDetect
|
| 3 |
+
emoji: "??"
|
| 4 |
+
colorFrom: gray
|
| 5 |
+
colorTo: indigo
|
| 6 |
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
pinned: false
|
|
|
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
+
# GenAI-DeepDetect
|
| 12 |
+
|
| 13 |
+
Docker-based Hugging Face Space for multimodal deepfake detection.
|
| 14 |
+
|
| 15 |
+
This Space runs the FastAPI service from `src/api/main.py` and exposes:
|
| 16 |
+
|
| 17 |
+
- `GET /health`
|
| 18 |
+
- `POST /detect/image`
|
| 19 |
+
- `POST /detect/video`
|
configs/coherence_baseline.yaml
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# configs/coherence_baseline.yaml
|
| 2 |
+
# Training config for the Coherence (lip-audio sync) engine — Phase 2
|
| 3 |
+
|
| 4 |
+
engine: coherence
|
| 5 |
+
|
| 6 |
+
kaggle_datasets:
|
| 7 |
+
- xhlulu/faceforensics-in-compressed-videos
|
| 8 |
+
- reubensinclair/celeb-df
|
| 9 |
+
|
| 10 |
+
kaggle_competitions:
|
| 11 |
+
- deepfake-detection-challenge # DFDC — requires competition acceptance
|
| 12 |
+
|
| 13 |
+
data:
|
| 14 |
+
processed_dir: /kaggle/working/processed/coherence
|
| 15 |
+
sources:
|
| 16 |
+
- mount: /kaggle/input/faceforensics-in-compressed-videos
|
| 17 |
+
- mount: /kaggle/input/deepfake-detection-challenge
|
| 18 |
+
- mount: /kaggle/input/celeb-df
|
| 19 |
+
|
| 20 |
+
preprocess_args:
|
| 21 |
+
workers: 4
|
| 22 |
+
max_clips: 20
|
| 23 |
+
|
| 24 |
+
model:
|
| 25 |
+
visual_stream: resnet18_truncated_layer3
|
| 26 |
+
audio_stream: 1d_cnn
|
| 27 |
+
embed_dim: 256
|
| 28 |
+
dropout: 0.3
|
| 29 |
+
|
| 30 |
+
train_args:
|
| 31 |
+
data_dir: /kaggle/working/processed/coherence
|
| 32 |
+
output_dir: /kaggle/working/checkpoints/coherence
|
| 33 |
+
epochs: 25
|
| 34 |
+
batch_size: 16
|
| 35 |
+
lr: 1.0e-4
|
| 36 |
+
weight_decay: 1.0e-4
|
| 37 |
+
contrastive_weight: 0.1
|
| 38 |
+
amp: true
|
| 39 |
+
seed: 42
|
| 40 |
+
patience: 5
|
| 41 |
+
|
| 42 |
+
targets:
|
| 43 |
+
val_auc: 0.88
|
| 44 |
+
eer: 0.15
|
configs/fingerprint_baseline.yaml
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# configs/fingerprint_baseline.yaml
|
| 2 |
+
# Training config for the Fingerprint (ViT) engine — Phase 1
|
| 3 |
+
|
| 4 |
+
engine: fingerprint
|
| 5 |
+
|
| 6 |
+
kaggle_datasets:
|
| 7 |
+
- xhlulu/140k-real-and-fake-faces
|
| 8 |
+
- philosopher0/ai-generated-vs-real-images-datasaet
|
| 9 |
+
|
| 10 |
+
kaggle_competitions: []
|
| 11 |
+
|
| 12 |
+
data:
|
| 13 |
+
processed_dir: /kaggle/working/processed/fingerprint
|
| 14 |
+
sources:
|
| 15 |
+
- slug: xhlulu/140k-real-and-fake-faces
|
| 16 |
+
mount: /kaggle/input/140k-real-and-fake-faces
|
| 17 |
+
- slug: philosopher0/ai-generated-vs-real-images-datasaet
|
| 18 |
+
mount: /kaggle/input/ai-generated-vs-real-images-datasaet
|
| 19 |
+
|
| 20 |
+
model:
|
| 21 |
+
name: vit_base_patch16_224
|
| 22 |
+
pretrained: true
|
| 23 |
+
num_binary_classes: 2
|
| 24 |
+
num_generator_classes: 8 # must match GENERATOR_CLASSES in src/training/config.py
|
| 25 |
+
|
| 26 |
+
train_args:
|
| 27 |
+
data_dir: /kaggle/working/processed/fingerprint
|
| 28 |
+
output_dir: /kaggle/working/checkpoints/fingerprint
|
| 29 |
+
epochs: 30
|
| 30 |
+
batch_size: 64
|
| 31 |
+
lr: 2.0e-5
|
| 32 |
+
weight_decay: 0.01
|
| 33 |
+
warmup_steps: 500
|
| 34 |
+
grad_accum: 2
|
| 35 |
+
amp: true
|
| 36 |
+
seed: 42
|
| 37 |
+
patience: 5
|
| 38 |
+
|
| 39 |
+
demo_args:
|
| 40 |
+
epochs: 5
|
| 41 |
+
batch_size: 64
|
| 42 |
+
|
| 43 |
+
targets:
|
| 44 |
+
val_auc: 0.92
|
| 45 |
+
test_auc: 0.90
|
| 46 |
+
cross_dataset_auc: 0.80
|
configs/sstgnn_baseline.yaml
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# configs/sstgnn_baseline.yaml
|
| 2 |
+
# Training config for the SSTGNN (graph attention) engine — Phase 3
|
| 3 |
+
#
|
| 4 |
+
# IMPORTANT: use T4x2 accelerator, NOT P100.
|
| 5 |
+
# Do NOT set amp: true — mixed precision causes NaN in GAT layers.
|
| 6 |
+
|
| 7 |
+
engine: sstgnn
|
| 8 |
+
|
| 9 |
+
kaggle_datasets:
|
| 10 |
+
- xhlulu/faceforensics-in-compressed-videos
|
| 11 |
+
- reubensinclair/celeb-df
|
| 12 |
+
|
| 13 |
+
kaggle_competitions:
|
| 14 |
+
- deepfake-detection-challenge
|
| 15 |
+
|
| 16 |
+
data:
|
| 17 |
+
processed_dir: /kaggle/working/processed/sstgnn
|
| 18 |
+
|
| 19 |
+
landmark_args:
|
| 20 |
+
window_frames: 64
|
| 21 |
+
window_stride: 32
|
| 22 |
+
num_landmarks: 68
|
| 23 |
+
min_detection_confidence: 0.3 # FIX: was 0.5 — too strict for DFDC low-res
|
| 24 |
+
min_tracking_confidence: 0.3
|
| 25 |
+
min_video_height: 240
|
| 26 |
+
|
| 27 |
+
model:
|
| 28 |
+
in_channels: 5
|
| 29 |
+
hidden_dim: 64
|
| 30 |
+
heads: 4
|
| 31 |
+
num_gat_layers: 3
|
| 32 |
+
dropout: 0.3
|
| 33 |
+
out_dropout: 0.4
|
| 34 |
+
|
| 35 |
+
train_args:
|
| 36 |
+
data_dir: /kaggle/working/processed/sstgnn
|
| 37 |
+
output_dir: /kaggle/working/checkpoints/sstgnn
|
| 38 |
+
epochs: 40
|
| 39 |
+
batch_size: 8
|
| 40 |
+
lr: 5.0e-4
|
| 41 |
+
weight_decay: 5.0e-4
|
| 42 |
+
scheduler: cosine
|
| 43 |
+
seed: 42
|
| 44 |
+
patience: 8
|
| 45 |
+
amp: false # NEVER set true for SSTGNN
|
| 46 |
+
|
| 47 |
+
targets:
|
| 48 |
+
val_auc: 0.85
|
| 49 |
+
test_auc: 0.83
|
frontend/Dockerfile
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Stage 1 — install deps
|
| 2 |
+
FROM node:20-alpine AS deps
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
COPY package.json package-lock.json* ./
|
| 5 |
+
RUN npm ci
|
| 6 |
+
|
| 7 |
+
# Stage 2 — build
|
| 8 |
+
FROM node:20-alpine AS builder
|
| 9 |
+
WORKDIR /app
|
| 10 |
+
COPY --from=deps /app/node_modules ./node_modules
|
| 11 |
+
COPY . .
|
| 12 |
+
RUN npm run build
|
| 13 |
+
|
| 14 |
+
# Stage 3 — runtime
|
| 15 |
+
FROM node:20-alpine AS runner
|
| 16 |
+
WORKDIR /app
|
| 17 |
+
ENV NODE_ENV=production
|
| 18 |
+
|
| 19 |
+
COPY --from=builder /app/.next ./.next
|
| 20 |
+
COPY --from=builder /app/node_modules ./node_modules
|
| 21 |
+
COPY --from=builder /app/package.json ./package.json
|
| 22 |
+
COPY --from=builder /app/public ./public
|
| 23 |
+
|
| 24 |
+
EXPOSE 3000
|
| 25 |
+
CMD ["npm", "start"]
|
frontend/components/ui/background-paths.tsx
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'use client'
|
| 2 |
+
|
| 3 |
+
interface Props {
|
| 4 |
+
opacity?: number
|
| 5 |
+
}
|
| 6 |
+
|
| 7 |
+
export default function BackgroundPaths({ opacity = 0.04 }: Props) {
|
| 8 |
+
return (
|
| 9 |
+
<div
|
| 10 |
+
className="absolute inset-0 pointer-events-none overflow-hidden"
|
| 11 |
+
aria-hidden="true"
|
| 12 |
+
style={{ opacity }}
|
| 13 |
+
>
|
| 14 |
+
<svg
|
| 15 |
+
className="w-full h-full"
|
| 16 |
+
xmlns="http://www.w3.org/2000/svg"
|
| 17 |
+
preserveAspectRatio="xMidYMid slice"
|
| 18 |
+
>
|
| 19 |
+
<defs>
|
| 20 |
+
<pattern
|
| 21 |
+
id="grid-pattern"
|
| 22 |
+
width="48"
|
| 23 |
+
height="48"
|
| 24 |
+
patternUnits="userSpaceOnUse"
|
| 25 |
+
>
|
| 26 |
+
<path
|
| 27 |
+
d="M 48 0 L 0 0 0 48"
|
| 28 |
+
fill="none"
|
| 29 |
+
stroke="#f2f2f2"
|
| 30 |
+
strokeWidth="0.5"
|
| 31 |
+
/>
|
| 32 |
+
</pattern>
|
| 33 |
+
<radialGradient id="fade-radial" cx="50%" cy="50%" r="50%">
|
| 34 |
+
<stop offset="0%" stopColor="#f2f2f2" stopOpacity="1" />
|
| 35 |
+
<stop offset="100%" stopColor="#f2f2f2" stopOpacity="0" />
|
| 36 |
+
</radialGradient>
|
| 37 |
+
<mask id="fade-mask">
|
| 38 |
+
<rect width="100%" height="100%" fill="url(#fade-radial)" />
|
| 39 |
+
</mask>
|
| 40 |
+
</defs>
|
| 41 |
+
<rect
|
| 42 |
+
width="100%"
|
| 43 |
+
height="100%"
|
| 44 |
+
fill="url(#grid-pattern)"
|
| 45 |
+
mask="url(#fade-mask)"
|
| 46 |
+
/>
|
| 47 |
+
</svg>
|
| 48 |
+
</div>
|
| 49 |
+
)
|
| 50 |
+
}
|
frontend/components/ui/confidence-ring.tsx
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { motion } from 'framer-motion'
|
| 2 |
+
import type { Verdict } from '@/types/detection'
|
| 3 |
+
import { formatConfidence } from '@/lib/utils'
|
| 4 |
+
|
| 5 |
+
interface Props {
|
| 6 |
+
confidence: number
|
| 7 |
+
verdict: Verdict
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
const RADIUS = 36
|
| 11 |
+
const CIRCUMFERENCE = 2 * Math.PI * RADIUS
|
| 12 |
+
|
| 13 |
+
export default function ConfidenceRing({ confidence, verdict }: Props) {
|
| 14 |
+
const dashOffset = CIRCUMFERENCE * (1 - confidence)
|
| 15 |
+
const color = verdict === 'FAKE' ? '#ef4444' : '#22c55e'
|
| 16 |
+
const bgColor = verdict === 'FAKE' ? 'rgba(239,68,68,0.1)' : 'rgba(34,197,94,0.08)'
|
| 17 |
+
|
| 18 |
+
return (
|
| 19 |
+
<div
|
| 20 |
+
className="bg-brand-surface border border-brand-borderHi rounded-lg px-4 py-3.5 flex items-center gap-4"
|
| 21 |
+
style={{ background: bgColor }}
|
| 22 |
+
>
|
| 23 |
+
{/* SVG ring */}
|
| 24 |
+
<div className="relative shrink-0">
|
| 25 |
+
<svg width="88" height="88" viewBox="0 0 88 88">
|
| 26 |
+
{/* Track */}
|
| 27 |
+
<circle
|
| 28 |
+
cx="44" cy="44" r={RADIUS}
|
| 29 |
+
fill="none"
|
| 30 |
+
stroke="rgba(255,255,255,0.06)"
|
| 31 |
+
strokeWidth="5"
|
| 32 |
+
/>
|
| 33 |
+
{/* Progress */}
|
| 34 |
+
<motion.circle
|
| 35 |
+
cx="44" cy="44" r={RADIUS}
|
| 36 |
+
fill="none"
|
| 37 |
+
stroke={color}
|
| 38 |
+
strokeWidth="5"
|
| 39 |
+
strokeLinecap="round"
|
| 40 |
+
strokeDasharray={CIRCUMFERENCE}
|
| 41 |
+
initial={{ strokeDashoffset: CIRCUMFERENCE }}
|
| 42 |
+
animate={{ strokeDashoffset: dashOffset }}
|
| 43 |
+
transition={{ duration: 0.7, ease: 'easeOut' }}
|
| 44 |
+
style={{ transform: 'rotate(-90deg)', transformOrigin: '50% 50%' }}
|
| 45 |
+
/>
|
| 46 |
+
</svg>
|
| 47 |
+
<div className="absolute inset-0 flex flex-col items-center justify-center">
|
| 48 |
+
<span className="font-mono text-[13px] font-medium text-brand-primary">
|
| 49 |
+
{Math.round(confidence * 100)}%
|
| 50 |
+
</span>
|
| 51 |
+
</div>
|
| 52 |
+
</div>
|
| 53 |
+
|
| 54 |
+
<div>
|
| 55 |
+
<div className="font-mono text-[9px] tracking-[0.12em] uppercase text-brand-muted mb-1">
|
| 56 |
+
Confidence
|
| 57 |
+
</div>
|
| 58 |
+
<div className="text-[22px] font-light tracking-[-0.02em] text-brand-primary leading-none">
|
| 59 |
+
{formatConfidence(confidence)}
|
| 60 |
+
</div>
|
| 61 |
+
<div className="text-[11px] text-brand-secondary mt-0.5">
|
| 62 |
+
overall {verdict.toLowerCase()} confidence
|
| 63 |
+
</div>
|
| 64 |
+
</div>
|
| 65 |
+
</div>
|
| 66 |
+
)
|
| 67 |
+
}
|
frontend/components/ui/engine-breakdown.tsx
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { motion } from 'framer-motion'
|
| 2 |
+
import type { EngineResult } from '@/types/detection'
|
| 3 |
+
import VerdictBadge from './verdict-badge'
|
| 4 |
+
import { ENGINE_LABELS } from '@/lib/constants'
|
| 5 |
+
import { formatMs } from '@/lib/utils'
|
| 6 |
+
|
| 7 |
+
interface Props {
|
| 8 |
+
engines: EngineResult[]
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
export default function EngineBreakdown({ engines }: Props) {
|
| 12 |
+
return (
|
| 13 |
+
<div className="flex flex-col gap-2.5">
|
| 14 |
+
{engines.map((engine, i) => {
|
| 15 |
+
const isNA = engine.explanation.startsWith('N/A')
|
| 16 |
+
return (
|
| 17 |
+
<motion.div
|
| 18 |
+
key={engine.engine}
|
| 19 |
+
initial={{ opacity: 0, y: 8 }}
|
| 20 |
+
animate={{ opacity: 1, y: 0 }}
|
| 21 |
+
transition={{ delay: i * 0.08, duration: 0.35, ease: 'easeOut' }}
|
| 22 |
+
className="bg-brand-surface border border-brand-borderHi rounded-lg px-4 py-3 flex items-center gap-3"
|
| 23 |
+
>
|
| 24 |
+
{/* Engine name */}
|
| 25 |
+
<span className="font-mono text-[9px] tracking-[0.12em] uppercase text-brand-secondary w-24 shrink-0">
|
| 26 |
+
{ENGINE_LABELS[engine.engine] ?? engine.engine.toUpperCase()}
|
| 27 |
+
</span>
|
| 28 |
+
|
| 29 |
+
{/* Verdict badge or NA */}
|
| 30 |
+
{isNA ? (
|
| 31 |
+
<span className="font-mono text-[9px] tracking-[0.08em] text-brand-muted border border-brand-borderHi rounded-[20px] px-2.5 py-1">
|
| 32 |
+
VIDEO ONLY
|
| 33 |
+
</span>
|
| 34 |
+
) : (
|
| 35 |
+
<VerdictBadge verdict={engine.verdict} />
|
| 36 |
+
)}
|
| 37 |
+
|
| 38 |
+
{/* Confidence bar */}
|
| 39 |
+
<div className="flex-1 min-w-0">
|
| 40 |
+
{!isNA && (
|
| 41 |
+
<div className="h-[3px] bg-[#1a1a1a] rounded-full overflow-hidden">
|
| 42 |
+
<motion.div
|
| 43 |
+
className={engine.verdict === 'FAKE' ? 'h-full bg-[#ef4444]/50 rounded-full' : 'h-full bg-[#22c55e]/50 rounded-full'}
|
| 44 |
+
initial={{ width: 0 }}
|
| 45 |
+
animate={{ width: `${engine.confidence * 100}%` }}
|
| 46 |
+
transition={{ duration: 0.6, delay: i * 0.08 + 0.1, ease: 'easeOut' }}
|
| 47 |
+
/>
|
| 48 |
+
</div>
|
| 49 |
+
)}
|
| 50 |
+
</div>
|
| 51 |
+
|
| 52 |
+
{/* Score */}
|
| 53 |
+
<span className="font-mono text-[11px] text-brand-primary w-10 text-right shrink-0">
|
| 54 |
+
{isNA ? '—' : `${Math.round(engine.confidence * 100)}%`}
|
| 55 |
+
</span>
|
| 56 |
+
|
| 57 |
+
{/* Time */}
|
| 58 |
+
<span className="font-mono text-[9px] text-brand-muted w-12 text-right shrink-0">
|
| 59 |
+
{engine.processing_time_ms > 0 ? formatMs(engine.processing_time_ms) : '—'}
|
| 60 |
+
</span>
|
| 61 |
+
</motion.div>
|
| 62 |
+
)
|
| 63 |
+
})}
|
| 64 |
+
</div>
|
| 65 |
+
)
|
| 66 |
+
}
|
frontend/components/ui/liquid-glass-button.tsx
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { cn } from '@/lib/utils'
|
| 2 |
+
import type { ButtonHTMLAttributes } from 'react'
|
| 3 |
+
|
| 4 |
+
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
| 5 |
+
variant?: 'primary' | 'ghost'
|
| 6 |
+
size?: 'sm' | 'md'
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
export default function LiquidGlassButton({
|
| 10 |
+
children,
|
| 11 |
+
variant = 'primary',
|
| 12 |
+
size = 'md',
|
| 13 |
+
className,
|
| 14 |
+
...rest
|
| 15 |
+
}: Props) {
|
| 16 |
+
return (
|
| 17 |
+
<button
|
| 18 |
+
className={cn(
|
| 19 |
+
'font-mono tracking-[0.06em] rounded-full border transition-all duration-200 backdrop-blur-sm',
|
| 20 |
+
size === 'md' ? 'text-[11px] px-6 py-2.5' : 'text-[10px] px-4 py-2',
|
| 21 |
+
variant === 'primary'
|
| 22 |
+
? 'bg-white/[0.08] border-white/[0.2] text-brand-primary hover:bg-white/[0.12] hover:border-white/[0.3]'
|
| 23 |
+
: 'bg-transparent border-white/[0.1] text-brand-secondary hover:border-white/[0.2] hover:text-brand-primary',
|
| 24 |
+
className
|
| 25 |
+
)}
|
| 26 |
+
{...rest}
|
| 27 |
+
>
|
| 28 |
+
{children}
|
| 29 |
+
</button>
|
| 30 |
+
)
|
| 31 |
+
}
|
frontend/components/ui/loading-orbit.tsx
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState, useEffect } from 'react'
|
| 2 |
+
import { motion, AnimatePresence } from 'framer-motion'
|
| 3 |
+
import { LOADING_STAGES } from '@/lib/constants'
|
| 4 |
+
|
| 5 |
+
export default function LoadingOrbit() {
|
| 6 |
+
const [stageIndex, setStageIndex] = useState(0)
|
| 7 |
+
|
| 8 |
+
useEffect(() => {
|
| 9 |
+
const id = setInterval(() => {
|
| 10 |
+
setStageIndex(i => (i + 1) % LOADING_STAGES.length)
|
| 11 |
+
}, 2800)
|
| 12 |
+
return () => clearInterval(id)
|
| 13 |
+
}, [])
|
| 14 |
+
|
| 15 |
+
return (
|
| 16 |
+
<div className="flex flex-col items-center justify-center gap-10 py-24">
|
| 17 |
+
{/* Spinning ring */}
|
| 18 |
+
<div className="relative w-24 h-24 flex items-center justify-center">
|
| 19 |
+
{/* Static inner circle */}
|
| 20 |
+
<div className="absolute w-16 h-16 rounded-full bg-brand-surface border border-white/[0.09]" />
|
| 21 |
+
{/* Outer orbit ring */}
|
| 22 |
+
<motion.div
|
| 23 |
+
className="absolute w-24 h-24 rounded-full"
|
| 24 |
+
style={{
|
| 25 |
+
border: '1px solid transparent',
|
| 26 |
+
borderTopColor: 'rgba(255,255,255,0.25)',
|
| 27 |
+
borderRightColor: 'rgba(255,255,255,0.06)',
|
| 28 |
+
borderBottomColor: 'rgba(255,255,255,0.06)',
|
| 29 |
+
borderLeftColor: 'rgba(255,255,255,0.06)',
|
| 30 |
+
}}
|
| 31 |
+
animate={{ rotate: 360 }}
|
| 32 |
+
transition={{ duration: 1.8, ease: 'linear', repeat: Infinity }}
|
| 33 |
+
/>
|
| 34 |
+
{/* Center label */}
|
| 35 |
+
<span className="font-mono text-[7px] tracking-[0.1em] text-brand-muted z-10 text-center leading-tight">
|
| 36 |
+
SCANNING
|
| 37 |
+
</span>
|
| 38 |
+
</div>
|
| 39 |
+
|
| 40 |
+
{/* Stage label — animates on change */}
|
| 41 |
+
<AnimatePresence mode="wait">
|
| 42 |
+
<motion.p
|
| 43 |
+
key={stageIndex}
|
| 44 |
+
initial={{ opacity: 0, y: 5 }}
|
| 45 |
+
animate={{ opacity: 1, y: 0 }}
|
| 46 |
+
exit={{ opacity: 0, y: -5 }}
|
| 47 |
+
transition={{ duration: 0.25 }}
|
| 48 |
+
className="font-mono text-[10px] tracking-[0.22em] text-brand-muted uppercase"
|
| 49 |
+
>
|
| 50 |
+
{LOADING_STAGES[stageIndex]}
|
| 51 |
+
</motion.p>
|
| 52 |
+
</AnimatePresence>
|
| 53 |
+
|
| 54 |
+
{/* Dots */}
|
| 55 |
+
<div className="flex gap-1.5">
|
| 56 |
+
{LOADING_STAGES.map((_, i) => (
|
| 57 |
+
<div
|
| 58 |
+
key={i}
|
| 59 |
+
className={`w-1 h-1 rounded-full transition-all duration-300 ${
|
| 60 |
+
i === stageIndex ? 'bg-brand-secondary scale-125' : 'bg-brand-border'
|
| 61 |
+
}`}
|
| 62 |
+
/>
|
| 63 |
+
))}
|
| 64 |
+
</div>
|
| 65 |
+
</div>
|
| 66 |
+
)
|
| 67 |
+
}
|
frontend/components/ui/media-meta-strip.tsx
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { formatBytes } from '@/lib/utils'
|
| 2 |
+
|
| 3 |
+
interface Props {
|
| 4 |
+
file: File
|
| 5 |
+
}
|
| 6 |
+
|
| 7 |
+
export default function MediaMetaStrip({ file }: Props) {
|
| 8 |
+
const ext = file.name.split('.').pop()?.toUpperCase() ?? '—'
|
| 9 |
+
|
| 10 |
+
return (
|
| 11 |
+
<div className="bg-brand-surface border border-brand-borderHi rounded-lg px-4 py-3 flex items-center gap-3">
|
| 12 |
+
<span className="font-mono text-[9px] tracking-[0.12em] uppercase text-brand-muted w-14 shrink-0">
|
| 13 |
+
File
|
| 14 |
+
</span>
|
| 15 |
+
<div className="flex-1 min-w-0">
|
| 16 |
+
<p className="text-[12px] text-brand-primary truncate">{file.name}</p>
|
| 17 |
+
<p className="font-mono text-[9px] text-brand-muted mt-0.5 tracking-[0.06em]">
|
| 18 |
+
{ext} · {formatBytes(file.size)}
|
| 19 |
+
</p>
|
| 20 |
+
</div>
|
| 21 |
+
</div>
|
| 22 |
+
)
|
| 23 |
+
}
|
frontend/components/ui/radial-orbital-timeline.tsx
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { LucideIcon } from 'lucide-react'
|
| 2 |
+
|
| 3 |
+
export interface OrbitalNode {
|
| 4 |
+
label: string
|
| 5 |
+
Icon: LucideIcon
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
interface Props {
|
| 9 |
+
nodes: OrbitalNode[]
|
| 10 |
+
centerLabel?: string
|
| 11 |
+
radius?: number
|
| 12 |
+
size?: number
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
function nodePosition(index: number, total: number, radius: number) {
|
| 16 |
+
const angle = (Math.PI / 2) - (index * 2 * Math.PI) / total
|
| 17 |
+
return {
|
| 18 |
+
x: Math.round(radius * Math.cos(angle)),
|
| 19 |
+
y: Math.round(-radius * Math.sin(angle)),
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
export default function RadialOrbitalTimeline({
|
| 24 |
+
nodes,
|
| 25 |
+
centerLabel = 'DETECT',
|
| 26 |
+
radius = 140,
|
| 27 |
+
size = 320,
|
| 28 |
+
}: Props) {
|
| 29 |
+
return (
|
| 30 |
+
<div
|
| 31 |
+
className="relative flex items-center justify-center"
|
| 32 |
+
style={{ width: size, height: size }}
|
| 33 |
+
>
|
| 34 |
+
{/* Orbit rings */}
|
| 35 |
+
{[size * 0.875, size * 0.625].map((d, i) => (
|
| 36 |
+
<div
|
| 37 |
+
key={d}
|
| 38 |
+
className="absolute rounded-full border border-white/[0.07]"
|
| 39 |
+
style={{ width: d, height: d }}
|
| 40 |
+
/>
|
| 41 |
+
))}
|
| 42 |
+
|
| 43 |
+
{/* Center */}
|
| 44 |
+
<div
|
| 45 |
+
className="w-[72px] h-[72px] rounded-full flex flex-col items-center justify-center z-10 border border-white/10"
|
| 46 |
+
style={{ background: 'linear-gradient(135deg, #595958, #262626)' }}
|
| 47 |
+
>
|
| 48 |
+
<span className="font-mono text-[6.5px] tracking-[0.08em] text-brand-secondary text-center leading-relaxed">
|
| 49 |
+
GENAI<br />{centerLabel}
|
| 50 |
+
</span>
|
| 51 |
+
</div>
|
| 52 |
+
|
| 53 |
+
{/* Nodes */}
|
| 54 |
+
{nodes.map((node, i) => {
|
| 55 |
+
const pos = nodePosition(i, nodes.length, radius)
|
| 56 |
+
return (
|
| 57 |
+
<div
|
| 58 |
+
key={node.label}
|
| 59 |
+
className="absolute flex flex-col items-center gap-1.5"
|
| 60 |
+
style={{ transform: `translate(${pos.x}px, ${pos.y}px)` }}
|
| 61 |
+
>
|
| 62 |
+
<div className="w-9 h-9 rounded-full bg-brand-surface border border-white/[0.14] flex items-center justify-center">
|
| 63 |
+
<node.Icon size={13} className="text-brand-secondary" />
|
| 64 |
+
</div>
|
| 65 |
+
<span className="font-mono text-[7px] tracking-[0.07em] text-brand-muted whitespace-nowrap">
|
| 66 |
+
{node.label}
|
| 67 |
+
</span>
|
| 68 |
+
</div>
|
| 69 |
+
)
|
| 70 |
+
})}
|
| 71 |
+
</div>
|
| 72 |
+
)
|
| 73 |
+
}
|
frontend/components/ui/upload-zone.tsx
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useRef, useState, useCallback } from 'react'
|
| 2 |
+
import { Image as ImageIcon, Film } from 'lucide-react'
|
| 3 |
+
import { cn } from '@/lib/utils'
|
| 4 |
+
import {
|
| 5 |
+
ALLOWED_IMAGE_TYPES, ALLOWED_VIDEO_TYPES,
|
| 6 |
+
IMAGE_FORMAT_LABEL, VIDEO_FORMAT_LABEL,
|
| 7 |
+
MAX_IMAGE_MB, MAX_VIDEO_MB,
|
| 8 |
+
} from '@/lib/constants'
|
| 9 |
+
|
| 10 |
+
interface Props {
|
| 11 |
+
mediaType: 'image' | 'video'
|
| 12 |
+
onFile: (file: File) => void
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
export default function UploadZone({ mediaType, onFile }: Props) {
|
| 16 |
+
const inputRef = useRef<HTMLInputElement>(null)
|
| 17 |
+
const [dragging, setDragging] = useState(false)
|
| 18 |
+
const [error, setError] = useState<string | null>(null)
|
| 19 |
+
|
| 20 |
+
const accept = mediaType === 'image' ? ALLOWED_IMAGE_TYPES.join(',') : ALLOWED_VIDEO_TYPES.join(',')
|
| 21 |
+
const formatLabel = mediaType === 'image' ? IMAGE_FORMAT_LABEL : VIDEO_FORMAT_LABEL
|
| 22 |
+
const maxMb = mediaType === 'image' ? MAX_IMAGE_MB : MAX_VIDEO_MB
|
| 23 |
+
const Icon = mediaType === 'image' ? ImageIcon : Film
|
| 24 |
+
|
| 25 |
+
const validate = useCallback((file: File): string | null => {
|
| 26 |
+
const allowed = mediaType === 'image' ? ALLOWED_IMAGE_TYPES : ALLOWED_VIDEO_TYPES
|
| 27 |
+
if (file.type && !allowed.includes(file.type)) {
|
| 28 |
+
return `Unsupported format. Allowed: ${formatLabel}`
|
| 29 |
+
}
|
| 30 |
+
if (file.size > maxMb * 1024 * 1024) {
|
| 31 |
+
return `File too large. Max ${maxMb}MB.`
|
| 32 |
+
}
|
| 33 |
+
return null
|
| 34 |
+
}, [mediaType, formatLabel, maxMb])
|
| 35 |
+
|
| 36 |
+
const handleFiles = useCallback((files: FileList | null) => {
|
| 37 |
+
if (!files || files.length === 0) return
|
| 38 |
+
const file = files[0]
|
| 39 |
+
const err = validate(file)
|
| 40 |
+
if (err) { setError(err); return }
|
| 41 |
+
setError(null)
|
| 42 |
+
onFile(file)
|
| 43 |
+
}, [validate, onFile])
|
| 44 |
+
|
| 45 |
+
const onDrop = (e: React.DragEvent) => {
|
| 46 |
+
e.preventDefault()
|
| 47 |
+
setDragging(false)
|
| 48 |
+
handleFiles(e.dataTransfer.files)
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
return (
|
| 52 |
+
<div className="flex flex-col gap-2">
|
| 53 |
+
<label
|
| 54 |
+
className={cn(
|
| 55 |
+
'relative border border-dashed rounded-[10px] flex flex-col items-center justify-center',
|
| 56 |
+
'min-h-[280px] px-8 py-10 gap-2.5 cursor-pointer transition-colors duration-200',
|
| 57 |
+
dragging
|
| 58 |
+
? 'border-white/25 bg-white/[0.025]'
|
| 59 |
+
: 'border-white/[0.11] hover:border-white/[0.2]'
|
| 60 |
+
)}
|
| 61 |
+
onDragOver={e => { e.preventDefault(); setDragging(true) }}
|
| 62 |
+
onDragLeave={() => setDragging(false)}
|
| 63 |
+
onDrop={onDrop}
|
| 64 |
+
>
|
| 65 |
+
<input
|
| 66 |
+
ref={inputRef}
|
| 67 |
+
type="file"
|
| 68 |
+
className="sr-only"
|
| 69 |
+
accept={accept}
|
| 70 |
+
onChange={e => handleFiles(e.target.files)}
|
| 71 |
+
/>
|
| 72 |
+
|
| 73 |
+
{/* Icon container */}
|
| 74 |
+
<div className="w-10 h-10 rounded-lg bg-white/[0.05] border border-white/[0.08] flex items-center justify-center mb-2">
|
| 75 |
+
<Icon size={16} className="text-brand-muted" />
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
<h4 className="text-[13px] font-normal text-brand-primary">
|
| 79 |
+
Drop {mediaType} here
|
| 80 |
+
</h4>
|
| 81 |
+
<p className="text-[11px] text-brand-muted text-center">
|
| 82 |
+
or click to browse
|
| 83 |
+
</p>
|
| 84 |
+
<span className="font-mono text-[9px] text-brand-muted tracking-[0.1em] mt-1">
|
| 85 |
+
{formatLabel}
|
| 86 |
+
</span>
|
| 87 |
+
</label>
|
| 88 |
+
|
| 89 |
+
{error && (
|
| 90 |
+
<p className="font-mono text-[9px] tracking-[0.08em] text-[#fca5a5] px-1">
|
| 91 |
+
{error}
|
| 92 |
+
</p>
|
| 93 |
+
)}
|
| 94 |
+
</div>
|
| 95 |
+
)
|
| 96 |
+
}
|
frontend/components/ui/verdict-badge.tsx
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { Verdict } from '@/types/detection'
|
| 2 |
+
import { cn } from '@/lib/utils'
|
| 3 |
+
|
| 4 |
+
interface Props {
|
| 5 |
+
verdict: Verdict
|
| 6 |
+
size?: 'sm' | 'md'
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
export default function VerdictBadge({ verdict, size = 'sm' }: Props) {
|
| 10 |
+
return (
|
| 11 |
+
<span
|
| 12 |
+
className={cn(
|
| 13 |
+
'font-mono font-medium tracking-[0.1em] rounded-[20px] border inline-flex items-center',
|
| 14 |
+
size === 'sm' ? 'text-[9px] px-2.5 py-1' : 'text-[11px] px-3.5 py-1.5',
|
| 15 |
+
verdict === 'FAKE'
|
| 16 |
+
? 'bg-[rgba(239,68,68,0.15)] border-[rgba(239,68,68,0.3)] text-[#fca5a5]'
|
| 17 |
+
: 'bg-[rgba(34,197,94,0.10)] border-[rgba(34,197,94,0.25)] text-[#86efac]'
|
| 18 |
+
)}
|
| 19 |
+
>
|
| 20 |
+
{verdict}
|
| 21 |
+
</span>
|
| 22 |
+
)
|
| 23 |
+
}
|
frontend/lib/api.ts
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// frontend/lib/api.ts
|
| 2 |
+
|
| 3 |
+
import type { DetectionResponse } from '@/types/detection'
|
| 4 |
+
|
| 5 |
+
const BASE_URL =
|
| 6 |
+
process.env.NEXT_PUBLIC_API_URL ??
|
| 7 |
+
'http://localhost:8000'
|
| 8 |
+
|
| 9 |
+
async function _post(endpoint: string, file: File): Promise<DetectionResponse> {
|
| 10 |
+
const form = new FormData()
|
| 11 |
+
form.append('file', file)
|
| 12 |
+
|
| 13 |
+
const res = await fetch(`${BASE_URL}${endpoint}`, {
|
| 14 |
+
method: 'POST',
|
| 15 |
+
body: form,
|
| 16 |
+
})
|
| 17 |
+
|
| 18 |
+
if (!res.ok) {
|
| 19 |
+
const err = await res.text()
|
| 20 |
+
throw new Error(`Detection failed (${res.status}): ${err}`)
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
return res.json()
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
export const detectImage = (file: File) => _post('/detect/image', file)
|
| 27 |
+
export const detectVideo = (file: File) => _post('/detect/video', file)
|
| 28 |
+
|
| 29 |
+
export async function healthCheck(): Promise<{ status: string; version?: string }> {
|
| 30 |
+
const res = await fetch(`${BASE_URL}/health`)
|
| 31 |
+
if (!res.ok) throw new Error(`Health check failed: ${res.status}`)
|
| 32 |
+
return res.json()
|
| 33 |
+
}
|
frontend/lib/constants.ts
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// frontend/lib/constants.ts
|
| 2 |
+
|
| 3 |
+
import type { GeneratorLabel } from '@/types/detection'
|
| 4 |
+
|
| 5 |
+
export const MAX_IMAGE_MB = 20
|
| 6 |
+
export const MAX_VIDEO_MB = 500
|
| 7 |
+
|
| 8 |
+
export const ALLOWED_IMAGE_TYPES = ['image/jpeg', 'image/png', 'image/webp']
|
| 9 |
+
export const ALLOWED_VIDEO_TYPES = ['video/mp4', 'video/quicktime', 'video/x-msvideo']
|
| 10 |
+
|
| 11 |
+
export const IMAGE_FORMAT_LABEL = 'JPG - PNG - WEBP - MAX 20MB'
|
| 12 |
+
export const VIDEO_FORMAT_LABEL = 'MP4 - MOV - AVI - MAX 500MB'
|
| 13 |
+
|
| 14 |
+
export const GENERATOR_LABELS: Record<GeneratorLabel, string> = {
|
| 15 |
+
real: 'Real',
|
| 16 |
+
unknown_gan: 'Unknown GAN',
|
| 17 |
+
stable_diffusion: 'Stable Diffusion',
|
| 18 |
+
midjourney: 'Midjourney',
|
| 19 |
+
dall_e: 'DALL-E',
|
| 20 |
+
flux: 'FLUX',
|
| 21 |
+
firefly: 'Adobe Firefly',
|
| 22 |
+
imagen: 'Google Imagen',
|
| 23 |
+
sora: 'OpenAI Sora',
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
export const ENGINE_LABELS: Record<string, string> = {
|
| 27 |
+
fingerprint: 'FINGERPRINT',
|
| 28 |
+
coherence: 'COHERENCE',
|
| 29 |
+
sstgnn: 'SSTGNN',
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
export const ORBITAL_NODES = [
|
| 33 |
+
{ label: 'Image', icon: 'Image' },
|
| 34 |
+
{ label: 'Video', icon: 'Film' },
|
| 35 |
+
{ label: 'Audio', icon: 'Mic' },
|
| 36 |
+
{ label: 'Face Mesh', icon: 'Scan' },
|
| 37 |
+
{ label: 'Generator ID', icon: 'Cpu' },
|
| 38 |
+
{ label: 'Confidence', icon: 'BarChart2' },
|
| 39 |
+
{ label: 'Report', icon: 'FileText' },
|
| 40 |
+
] as const
|
| 41 |
+
|
| 42 |
+
export const LOADING_STAGES = [
|
| 43 |
+
'FINGERPRINT SCAN',
|
| 44 |
+
'COHERENCE CHECK',
|
| 45 |
+
'GRAPH ANALYSIS',
|
| 46 |
+
] as const
|
frontend/lib/utils.ts
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// frontend/lib/utils.ts
|
| 2 |
+
|
| 3 |
+
import { type ClassValue, clsx } from 'clsx'
|
| 4 |
+
import { twMerge } from 'tailwind-merge'
|
| 5 |
+
|
| 6 |
+
export function cn(...inputs: ClassValue[]) {
|
| 7 |
+
return twMerge(clsx(inputs))
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
export function formatBytes(bytes: number): string {
|
| 11 |
+
if (bytes === 0) return '0 B'
|
| 12 |
+
const k = 1024
|
| 13 |
+
const sizes = ['B', 'KB', 'MB', 'GB']
|
| 14 |
+
const i = Math.floor(Math.log(bytes) / Math.log(k))
|
| 15 |
+
return `${parseFloat((bytes / Math.pow(k, i)).toFixed(1))} ${sizes[i]}`
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
export function formatMs(ms: number): string {
|
| 19 |
+
if (ms < 1000) return `${Math.round(ms)}ms`
|
| 20 |
+
return `${(ms / 1000).toFixed(1)}s`
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
export function formatConfidence(confidence: number): string {
|
| 24 |
+
return `${(confidence * 100).toFixed(1)}%`
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
export function clampConfidence(v: number): number {
|
| 28 |
+
return Math.max(0, Math.min(1, v))
|
| 29 |
+
}
|
frontend/next-env.d.ts
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/// <reference types="next" />
|
| 2 |
+
/// <reference types="next/image-types/global" />
|
| 3 |
+
/// <reference path="./.next/types/routes.d.ts" />
|
| 4 |
+
|
| 5 |
+
// NOTE: This file should not be edited
|
| 6 |
+
// see https://nextjs.org/docs/pages/api-reference/config/typescript for more information.
|
frontend/next.config.js
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
const path = require('path')
|
| 2 |
+
|
| 3 |
+
/** @type {import('next').NextConfig} */
|
| 4 |
+
const nextConfig = {
|
| 5 |
+
reactStrictMode: true,
|
| 6 |
+
outputFileTracingRoot: path.join(__dirname),
|
| 7 |
+
async rewrites() {
|
| 8 |
+
return [
|
| 9 |
+
{
|
| 10 |
+
source: '/api/:path*',
|
| 11 |
+
destination: `${process.env.NEXT_PUBLIC_API_URL ?? 'http://localhost:8000'}/:path*`,
|
| 12 |
+
},
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
module.exports = nextConfig
|
frontend/package-lock.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
frontend/package.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "genai-deepdetect-ui",
|
| 3 |
+
"version": "0.1.0",
|
| 4 |
+
"private": true,
|
| 5 |
+
"scripts": {
|
| 6 |
+
"dev": "next dev",
|
| 7 |
+
"build": "next build",
|
| 8 |
+
"start": "next start",
|
| 9 |
+
"lint": "next lint",
|
| 10 |
+
"type-check": "tsc --noEmit"
|
| 11 |
+
},
|
| 12 |
+
"dependencies": {
|
| 13 |
+
"class-variance-authority": "^0.7.0",
|
| 14 |
+
"clsx": "^2.1.0",
|
| 15 |
+
"framer-motion": "^12.38.0",
|
| 16 |
+
"lucide-react": "^0.323.0",
|
| 17 |
+
"next": "^15.5.14",
|
| 18 |
+
"react": "^18.2.0",
|
| 19 |
+
"react-dom": "^18.2.0",
|
| 20 |
+
"tailwind-merge": "^2.2.1",
|
| 21 |
+
"update": "^0.7.4"
|
| 22 |
+
},
|
| 23 |
+
"devDependencies": {
|
| 24 |
+
"@types/node": "^20.11.5",
|
| 25 |
+
"@types/react": "^18.2.48",
|
| 26 |
+
"@types/react-dom": "^18.2.18",
|
| 27 |
+
"autoprefixer": "^10.4.17",
|
| 28 |
+
"eslint": "^8.56.0",
|
| 29 |
+
"eslint-config-next": "^15.5.14",
|
| 30 |
+
"postcss": "^8.4.33",
|
| 31 |
+
"tailwindcss": "^3.4.1",
|
| 32 |
+
"typescript": "^5.3.3"
|
| 33 |
+
}
|
| 34 |
+
}
|
frontend/pages/_app.tsx
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { AppProps } from 'next/app'
|
| 2 |
+
import Head from 'next/head'
|
| 3 |
+
import '@/styles/globals.css'
|
| 4 |
+
|
| 5 |
+
export default function App({ Component, pageProps }: AppProps) {
|
| 6 |
+
return (
|
| 7 |
+
<>
|
| 8 |
+
<Head>
|
| 9 |
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
| 10 |
+
<meta name="description" content="Multimodal AI-generated content detection — three independent engines, one verdict." />
|
| 11 |
+
<link rel="icon" href="/favicon.ico" />
|
| 12 |
+
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
| 13 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossOrigin="anonymous" />
|
| 14 |
+
</Head>
|
| 15 |
+
<Component {...pageProps} />
|
| 16 |
+
</>
|
| 17 |
+
)
|
| 18 |
+
}
|
frontend/pages/_document.tsx
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { Html, Head, Main, NextScript } from 'next/document'
|
| 2 |
+
|
| 3 |
+
export default function Document() {
|
| 4 |
+
return (
|
| 5 |
+
<Html lang="en">
|
| 6 |
+
<Head>
|
| 7 |
+
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
| 8 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossOrigin="anonymous" />
|
| 9 |
+
<link
|
| 10 |
+
href="https://fonts.googleapis.com/css2?family=DM+Mono:ital,wght@0,300;0,400;0,500;1,300&family=DM+Sans:ital,opsz,wght@0,9..40,300;0,9..40,400;0,9..40,500;0,9..40,600;1,9..40,300&display=swap"
|
| 11 |
+
rel="stylesheet"
|
| 12 |
+
/>
|
| 13 |
+
</Head>
|
| 14 |
+
<body>
|
| 15 |
+
<Main />
|
| 16 |
+
<NextScript />
|
| 17 |
+
</body>
|
| 18 |
+
</Html>
|
| 19 |
+
)
|
| 20 |
+
}
|
frontend/pages/analyze.tsx
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { NextPage } from 'next'
|
| 2 |
+
import Head from 'next/head'
|
| 3 |
+
import { useRouter } from 'next/router'
|
| 4 |
+
import { useState, useCallback, useRef } from 'react'
|
| 5 |
+
import { motion, AnimatePresence } from 'framer-motion'
|
| 6 |
+
import { ArrowLeft, Image as ImageIcon, Film } from 'lucide-react'
|
| 7 |
+
import type { AnalysisState, DetectionResponse } from '@/types/detection'
|
| 8 |
+
import { detectImage, detectVideo } from '@/lib/api'
|
| 9 |
+
import { formatBytes } from '@/lib/utils'
|
| 10 |
+
import UploadZone from '@/components/ui/upload-zone'
|
| 11 |
+
import LoadingOrbit from '@/components/ui/loading-orbit'
|
| 12 |
+
import VerdictBadge from '@/components/ui/verdict-badge'
|
| 13 |
+
import EngineBreakdown from '@/components/ui/engine-breakdown'
|
| 14 |
+
import MediaMetaStrip from '@/components/ui/media-meta-strip'
|
| 15 |
+
import ConfidenceRing from '@/components/ui/confidence-ring'
|
| 16 |
+
|
| 17 |
+
const FADE_UP = {
|
| 18 |
+
initial: { opacity: 0, y: 14 },
|
| 19 |
+
animate: { opacity: 1, y: 0 },
|
| 20 |
+
transition: { duration: 0.4, ease: [0.25, 0.46, 0.45, 0.94] },
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
export default function Analyze(): JSX.Element {
|
| 24 |
+
const router = useRouter()
|
| 25 |
+
const defaultType = router.query.type === 'video' ? 'video' : 'image'
|
| 26 |
+
const [mediaType, setMediaType] = useState<'image' | 'video'>(defaultType)
|
| 27 |
+
const [state, setState] = useState<AnalysisState>({ status: 'idle' })
|
| 28 |
+
|
| 29 |
+
const handleFile = useCallback(async (file: File) => {
|
| 30 |
+
const preview = URL.createObjectURL(file)
|
| 31 |
+
setState({ status: 'uploading', file, preview })
|
| 32 |
+
|
| 33 |
+
try {
|
| 34 |
+
setState(s => ({ ...s, status: 'analysing' }))
|
| 35 |
+
const result = mediaType === 'image'
|
| 36 |
+
? await detectImage(file)
|
| 37 |
+
: await detectVideo(file)
|
| 38 |
+
setState({ status: 'done', file, preview, result })
|
| 39 |
+
} catch (err) {
|
| 40 |
+
setState(s => ({
|
| 41 |
+
...s,
|
| 42 |
+
status: 'error',
|
| 43 |
+
error: err instanceof Error ? err.message : 'Unknown error',
|
| 44 |
+
}))
|
| 45 |
+
}
|
| 46 |
+
}, [mediaType])
|
| 47 |
+
|
| 48 |
+
const reset = () => {
|
| 49 |
+
if (state.preview) URL.revokeObjectURL(state.preview)
|
| 50 |
+
setState({ status: 'idle' })
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
return (
|
| 54 |
+
<>
|
| 55 |
+
<Head>
|
| 56 |
+
<title>Analyse · GenAI-DeepDetect</title>
|
| 57 |
+
</Head>
|
| 58 |
+
|
| 59 |
+
<main className="min-h-screen bg-brand-bg flex flex-col">
|
| 60 |
+
{/* Nav */}
|
| 61 |
+
<nav className="flex items-center gap-4 px-8 py-5 border-b border-brand-border">
|
| 62 |
+
<button
|
| 63 |
+
onClick={() => router.push('/')}
|
| 64 |
+
className="flex items-center gap-2 font-mono text-[10px] tracking-[0.12em] uppercase text-brand-muted hover:text-brand-secondary transition-colors"
|
| 65 |
+
>
|
| 66 |
+
<ArrowLeft size={12} />
|
| 67 |
+
Back
|
| 68 |
+
</button>
|
| 69 |
+
<span className="font-mono text-[10px] tracking-[0.2em] uppercase text-brand-muted">
|
| 70 |
+
/analyze
|
| 71 |
+
</span>
|
| 72 |
+
|
| 73 |
+
{/* Media type toggle */}
|
| 74 |
+
<div className="ml-auto flex items-center gap-1 bg-brand-surface border border-brand-borderHi rounded-full p-0.5">
|
| 75 |
+
{(['image', 'video'] as const).map(t => (
|
| 76 |
+
<button
|
| 77 |
+
key={t}
|
| 78 |
+
onClick={() => { setMediaType(t); reset() }}
|
| 79 |
+
className={`flex items-center gap-1.5 font-mono text-[9px] tracking-[0.08em] uppercase px-3.5 py-1.5 rounded-full transition-all duration-150 ${
|
| 80 |
+
mediaType === t
|
| 81 |
+
? 'bg-white/10 text-brand-primary'
|
| 82 |
+
: 'text-brand-muted hover:text-brand-secondary'
|
| 83 |
+
}`}
|
| 84 |
+
>
|
| 85 |
+
{t === 'image' ? <ImageIcon size={10} /> : <Film size={10} />}
|
| 86 |
+
{t}
|
| 87 |
+
</button>
|
| 88 |
+
))}
|
| 89 |
+
</div>
|
| 90 |
+
</nav>
|
| 91 |
+
|
| 92 |
+
<div className="flex-1 max-w-[900px] w-full mx-auto px-8 py-10">
|
| 93 |
+
|
| 94 |
+
{/* IDLE — upload zone */}
|
| 95 |
+
{state.status === 'idle' && (
|
| 96 |
+
<motion.div {...FADE_UP} className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
| 97 |
+
<UploadZone
|
| 98 |
+
mediaType={mediaType}
|
| 99 |
+
onFile={handleFile}
|
| 100 |
+
/>
|
| 101 |
+
{/* Info panel */}
|
| 102 |
+
<div className="flex flex-col gap-3">
|
| 103 |
+
{['FINGERPRINT', 'COHERENCE', 'SSTGNN'].map((engine, i) => (
|
| 104 |
+
<motion.div
|
| 105 |
+
key={engine}
|
| 106 |
+
initial={{ opacity: 0, x: 8 }}
|
| 107 |
+
animate={{ opacity: 1, x: 0 }}
|
| 108 |
+
transition={{ delay: i * 0.07, duration: 0.35 }}
|
| 109 |
+
className="bg-brand-surface border border-brand-borderHi rounded-lg px-4 py-3"
|
| 110 |
+
>
|
| 111 |
+
<div className="font-mono text-[9px] tracking-[0.15em] text-brand-muted mb-1">
|
| 112 |
+
{engine}
|
| 113 |
+
</div>
|
| 114 |
+
<div className="text-[12px] text-brand-secondary font-light">
|
| 115 |
+
{engine === 'FINGERPRINT' && 'Frequency-domain artifact analysis via ViT-B/16'}
|
| 116 |
+
{engine === 'COHERENCE' && 'Lip-audio synchronisation scoring (video only)'}
|
| 117 |
+
{engine === 'SSTGNN' && 'Facial landmark trajectory graph attention (video only)'}
|
| 118 |
+
</div>
|
| 119 |
+
</motion.div>
|
| 120 |
+
))}
|
| 121 |
+
</div>
|
| 122 |
+
</motion.div>
|
| 123 |
+
)}
|
| 124 |
+
|
| 125 |
+
{/* UPLOADING / ANALYSING — loading orbit */}
|
| 126 |
+
{(state.status === 'uploading' || state.status === 'analysing') && (
|
| 127 |
+
<LoadingOrbit />
|
| 128 |
+
)}
|
| 129 |
+
|
| 130 |
+
{/* ERROR */}
|
| 131 |
+
{state.status === 'error' && (
|
| 132 |
+
<motion.div {...FADE_UP} className="flex flex-col items-center gap-6 py-20">
|
| 133 |
+
<div className="font-mono text-[10px] tracking-[0.15em] uppercase text-brand-muted">
|
| 134 |
+
Error
|
| 135 |
+
</div>
|
| 136 |
+
<p className="text-brand-secondary text-center max-w-sm">{state.error}</p>
|
| 137 |
+
<button
|
| 138 |
+
onClick={reset}
|
| 139 |
+
className="font-mono text-[10px] tracking-[0.1em] uppercase px-5 py-2 border border-brand-borderHi rounded-full text-brand-secondary hover:border-brand-muted transition-colors"
|
| 140 |
+
>
|
| 141 |
+
Try Again
|
| 142 |
+
</button>
|
| 143 |
+
</motion.div>
|
| 144 |
+
)}
|
| 145 |
+
|
| 146 |
+
{/* DONE — results */}
|
| 147 |
+
{state.status === 'done' && state.result && (
|
| 148 |
+
<div className="flex flex-col gap-6">
|
| 149 |
+
{/* Header row */}
|
| 150 |
+
<motion.div {...FADE_UP} className="flex items-center justify-between">
|
| 151 |
+
<div className="section-label">Results</div>
|
| 152 |
+
<button
|
| 153 |
+
onClick={reset}
|
| 154 |
+
className="font-mono text-[9px] tracking-[0.1em] uppercase text-brand-muted hover:text-brand-secondary border border-brand-borderHi rounded-full px-3.5 py-1.5 transition-colors"
|
| 155 |
+
>
|
| 156 |
+
New analysis
|
| 157 |
+
</button>
|
| 158 |
+
</motion.div>
|
| 159 |
+
|
| 160 |
+
{/* Preview + stats */}
|
| 161 |
+
<motion.div
|
| 162 |
+
{...FADE_UP}
|
| 163 |
+
transition={{ ...FADE_UP.transition, delay: 0.05 }}
|
| 164 |
+
className="grid grid-cols-1 md:grid-cols-2 gap-4"
|
| 165 |
+
>
|
| 166 |
+
{/* Preview */}
|
| 167 |
+
<div className="relative bg-brand-surface border border-brand-borderHi rounded-[14px] overflow-hidden aspect-[4/3] flex items-center justify-center">
|
| 168 |
+
{state.preview && (
|
| 169 |
+
<img
|
| 170 |
+
src={state.preview}
|
| 171 |
+
alt="Uploaded media"
|
| 172 |
+
className="w-full h-full object-cover"
|
| 173 |
+
/>
|
| 174 |
+
)}
|
| 175 |
+
{!state.preview && (
|
| 176 |
+
<span className="font-mono text-[10px] text-brand-muted tracking-[0.1em]">
|
| 177 |
+
PREVIEW
|
| 178 |
+
</span>
|
| 179 |
+
)}
|
| 180 |
+
<div className="absolute top-2.5 right-2.5">
|
| 181 |
+
<VerdictBadge verdict={state.result.verdict} />
|
| 182 |
+
</div>
|
| 183 |
+
</div>
|
| 184 |
+
|
| 185 |
+
{/* Stats */}
|
| 186 |
+
<div className="flex flex-col gap-3">
|
| 187 |
+
<ConfidenceRing
|
| 188 |
+
confidence={state.result.confidence}
|
| 189 |
+
verdict={state.result.verdict}
|
| 190 |
+
/>
|
| 191 |
+
<StatPanel
|
| 192 |
+
label="Processing time"
|
| 193 |
+
value={`${(state.result.processing_time_ms / 1000).toFixed(2)}s`}
|
| 194 |
+
/>
|
| 195 |
+
<StatPanel
|
| 196 |
+
label="Generator"
|
| 197 |
+
value={state.result.attributed_generator.replace(/_/g, ' ')}
|
| 198 |
+
mono
|
| 199 |
+
/>
|
| 200 |
+
{state.file && (
|
| 201 |
+
<MediaMetaStrip file={state.file} />
|
| 202 |
+
)}
|
| 203 |
+
</div>
|
| 204 |
+
</motion.div>
|
| 205 |
+
|
| 206 |
+
{/* Engine breakdown */}
|
| 207 |
+
<motion.div
|
| 208 |
+
{...FADE_UP}
|
| 209 |
+
transition={{ ...FADE_UP.transition, delay: 0.1 }}
|
| 210 |
+
>
|
| 211 |
+
<div className="section-label mb-4">Engine breakdown</div>
|
| 212 |
+
<EngineBreakdown engines={state.result.engine_breakdown} />
|
| 213 |
+
</motion.div>
|
| 214 |
+
|
| 215 |
+
{/* Explanation */}
|
| 216 |
+
<motion.div
|
| 217 |
+
{...FADE_UP}
|
| 218 |
+
transition={{ ...FADE_UP.transition, delay: 0.15 }}
|
| 219 |
+
className="bg-brand-surface border border-brand-borderHi rounded-lg px-5 py-4"
|
| 220 |
+
>
|
| 221 |
+
<div className="font-mono text-[9px] tracking-[0.15em] uppercase text-brand-muted mb-2.5">
|
| 222 |
+
Analysis detail
|
| 223 |
+
</div>
|
| 224 |
+
<p className="text-[13px] text-brand-secondary font-light leading-relaxed">
|
| 225 |
+
{state.result.explanation}
|
| 226 |
+
</p>
|
| 227 |
+
</motion.div>
|
| 228 |
+
</div>
|
| 229 |
+
)}
|
| 230 |
+
</div>
|
| 231 |
+
</main>
|
| 232 |
+
</>
|
| 233 |
+
)
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
function StatPanel({
|
| 237 |
+
label, value, mono = false,
|
| 238 |
+
}: { label: string; value: string; mono?: boolean }) {
|
| 239 |
+
return (
|
| 240 |
+
<div className="bg-brand-surface border border-brand-borderHi rounded-lg px-4 py-3">
|
| 241 |
+
<div className="font-mono text-[9px] tracking-[0.12em] uppercase text-brand-muted mb-1.5">
|
| 242 |
+
{label}
|
| 243 |
+
</div>
|
| 244 |
+
<div className={`text-[20px] font-light tracking-[-0.02em] text-brand-primary ${mono ? 'font-mono text-[14px]' : ''}`}>
|
| 245 |
+
{value}
|
| 246 |
+
</div>
|
| 247 |
+
</div>
|
| 248 |
+
)
|
| 249 |
+
}
|
frontend/pages/index.tsx
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { NextPage } from 'next';
|
| 2 |
+
import Head from 'next/head';
|
| 3 |
+
import { useRouter } from 'next/router';
|
| 4 |
+
import { motion } from 'framer-motion';
|
| 5 |
+
import {
|
| 6 |
+
Image as ImageIcon,
|
| 7 |
+
Film,
|
| 8 |
+
Mic,
|
| 9 |
+
Scan,
|
| 10 |
+
Cpu,
|
| 11 |
+
BarChart2,
|
| 12 |
+
FileText,
|
| 13 |
+
} from 'lucide-react';
|
| 14 |
+
import type { LucideIcon } from 'lucide-react';
|
| 15 |
+
import { ORBITAL_NODES } from '@/lib/constants';
|
| 16 |
+
|
| 17 |
+
const ICONS: Record<string, LucideIcon> = {
|
| 18 |
+
Image: ImageIcon,
|
| 19 |
+
Film,
|
| 20 |
+
Mic,
|
| 21 |
+
Scan,
|
| 22 |
+
Cpu,
|
| 23 |
+
BarChart2,
|
| 24 |
+
FileText,
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
// Node positions at radius=140 — 7 nodes, 360/7 ≈ 51.4° apart, starting at top (90°)
|
| 28 |
+
function nodePosition(index: number, radius = 140) {
|
| 29 |
+
const angle = Math.PI / 2 - (index * 2 * Math.PI) / 7;
|
| 30 |
+
return {
|
| 31 |
+
x: Math.round(radius * Math.cos(angle)),
|
| 32 |
+
y: Math.round(-radius * Math.sin(angle)), // flip Y for CSS
|
| 33 |
+
};
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
export default function Landing(): JSX.Element {
|
| 37 |
+
const router = useRouter();
|
| 38 |
+
|
| 39 |
+
return (
|
| 40 |
+
<>
|
| 41 |
+
<Head>
|
| 42 |
+
<title>GenAI-DeepDetect — Forensic Detection System</title>
|
| 43 |
+
</Head>
|
| 44 |
+
|
| 45 |
+
<main className="min-h-screen bg-brand-bg flex flex-col">
|
| 46 |
+
{/* Nav */}
|
| 47 |
+
<nav className="flex items-center justify-between px-10 py-6 border-b border-brand-border">
|
| 48 |
+
<span className="font-mono text-[11px] tracking-[0.2em] uppercase text-brand-muted">
|
| 49 |
+
GenAI-DeepDetect
|
| 50 |
+
</span>
|
| 51 |
+
<a
|
| 52 |
+
href="/docs"
|
| 53 |
+
className="font-mono text-[10px] tracking-[0.1em] text-brand-muted border border-brand-borderHi rounded-full px-4 py-1.5 hover:border-brand-muted transition-colors">
|
| 54 |
+
API Docs
|
| 55 |
+
</a>
|
| 56 |
+
</nav>
|
| 57 |
+
|
| 58 |
+
{/* Hero */}
|
| 59 |
+
<section className="flex-1 flex flex-col items-center justify-center px-8 py-20 relative overflow-hidden">
|
| 60 |
+
{/* Background paths */}
|
| 61 |
+
<div
|
| 62 |
+
className="absolute inset-0 opacity-[0.06] pointer-events-none"
|
| 63 |
+
aria-hidden>
|
| 64 |
+
<svg
|
| 65 |
+
className="w-full h-full"
|
| 66 |
+
xmlns="http://www.w3.org/2000/svg">
|
| 67 |
+
<defs>
|
| 68 |
+
<pattern
|
| 69 |
+
id="grid"
|
| 70 |
+
width="48"
|
| 71 |
+
height="48"
|
| 72 |
+
patternUnits="userSpaceOnUse">
|
| 73 |
+
<path
|
| 74 |
+
d="M 48 0 L 0 0 0 48"
|
| 75 |
+
fill="none"
|
| 76 |
+
stroke="#f2f2f2"
|
| 77 |
+
strokeWidth="0.5"
|
| 78 |
+
/>
|
| 79 |
+
</pattern>
|
| 80 |
+
</defs>
|
| 81 |
+
<rect
|
| 82 |
+
width="100%"
|
| 83 |
+
height="100%"
|
| 84 |
+
fill="url(#grid)"
|
| 85 |
+
/>
|
| 86 |
+
</svg>
|
| 87 |
+
</div>
|
| 88 |
+
|
| 89 |
+
{/* Eyebrow */}
|
| 90 |
+
<motion.div
|
| 91 |
+
initial={{ opacity: 0, y: 8 }}
|
| 92 |
+
animate={{ opacity: 1, y: 0 }}
|
| 93 |
+
transition={{ duration: 0.5 }}
|
| 94 |
+
className="font-mono text-[11px] tracking-[0.2em] uppercase text-brand-muted mb-12">
|
| 95 |
+
Forensic Detection System
|
| 96 |
+
</motion.div>
|
| 97 |
+
|
| 98 |
+
{/* Orbital diagram */}
|
| 99 |
+
<motion.div
|
| 100 |
+
initial={{ opacity: 0, scale: 0.96 }}
|
| 101 |
+
animate={{ opacity: 1, scale: 1 }}
|
| 102 |
+
transition={{ duration: 0.6, delay: 0.1 }}
|
| 103 |
+
className="relative w-[320px] h-[320px] flex items-center justify-center mb-14">
|
| 104 |
+
{/* Orbit rings */}
|
| 105 |
+
{[280, 200].map((size, i) => (
|
| 106 |
+
<div
|
| 107 |
+
key={size}
|
| 108 |
+
className="absolute rounded-full border border-white/[0.07]"
|
| 109 |
+
style={{ width: size, height: size }}
|
| 110 |
+
/>
|
| 111 |
+
))}
|
| 112 |
+
|
| 113 |
+
{/* Center node */}
|
| 114 |
+
<div
|
| 115 |
+
className="w-[72px] h-[72px] rounded-full flex flex-col items-center justify-center z-10 border border-white/10"
|
| 116 |
+
style={{
|
| 117 |
+
background:
|
| 118 |
+
'linear-gradient(135deg, #595958, #262626)',
|
| 119 |
+
}}>
|
| 120 |
+
<span className="font-mono text-[6.5px] tracking-[0.08em] text-brand-secondary text-center leading-relaxed">
|
| 121 |
+
GENAI
|
| 122 |
+
<br />
|
| 123 |
+
DETECT
|
| 124 |
+
</span>
|
| 125 |
+
</div>
|
| 126 |
+
|
| 127 |
+
{/* Orbital nodes */}
|
| 128 |
+
{ORBITAL_NODES.map((node, i) => {
|
| 129 |
+
const pos = nodePosition(i);
|
| 130 |
+
const Icon = ICONS[node.icon];
|
| 131 |
+
return (
|
| 132 |
+
<div
|
| 133 |
+
key={node.label}
|
| 134 |
+
className="absolute flex flex-col items-center gap-1.5"
|
| 135 |
+
style={{
|
| 136 |
+
transform: `translate(${pos.x}px, ${pos.y}px)`,
|
| 137 |
+
}}>
|
| 138 |
+
<div className="w-9 h-9 rounded-full bg-brand-surface border border-white/[0.14] flex items-center justify-center">
|
| 139 |
+
{Icon && (
|
| 140 |
+
<Icon
|
| 141 |
+
size={13}
|
| 142 |
+
className="text-brand-secondary"
|
| 143 |
+
/>
|
| 144 |
+
)}
|
| 145 |
+
</div>
|
| 146 |
+
<span className="font-mono text-[7px] tracking-[0.07em] text-brand-muted whitespace-nowrap">
|
| 147 |
+
{node.label}
|
| 148 |
+
</span>
|
| 149 |
+
</div>
|
| 150 |
+
);
|
| 151 |
+
})}
|
| 152 |
+
</motion.div>
|
| 153 |
+
|
| 154 |
+
{/* Headline */}
|
| 155 |
+
<motion.h1
|
| 156 |
+
initial={{ opacity: 0, y: 12 }}
|
| 157 |
+
animate={{ opacity: 1, y: 0 }}
|
| 158 |
+
transition={{ duration: 0.5, delay: 0.2 }}
|
| 159 |
+
className="text-[clamp(36px,5vw,64px)] font-light tracking-[-0.03em] leading-[1.05] text-center text-brand-primary mb-5">
|
| 160 |
+
Detect AI-Generated
|
| 161 |
+
<br />
|
| 162 |
+
<span className="text-brand-secondary">Content</span>
|
| 163 |
+
</motion.h1>
|
| 164 |
+
|
| 165 |
+
<motion.p
|
| 166 |
+
initial={{ opacity: 0, y: 8 }}
|
| 167 |
+
animate={{ opacity: 1, y: 0 }}
|
| 168 |
+
transition={{ duration: 0.5, delay: 0.3 }}
|
| 169 |
+
className="text-[15px] font-light text-brand-secondary text-center max-w-[460px] leading-relaxed mb-10">
|
| 170 |
+
Three independent engines analyse your media for
|
| 171 |
+
synthetic fingerprints, temporal inconsistencies, and
|
| 172 |
+
facial landmark anomalies.
|
| 173 |
+
</motion.p>
|
| 174 |
+
|
| 175 |
+
{/* CTAs */}
|
| 176 |
+
<motion.div
|
| 177 |
+
initial={{ opacity: 0, y: 8 }}
|
| 178 |
+
animate={{ opacity: 1, y: 0 }}
|
| 179 |
+
transition={{ duration: 0.5, delay: 0.4 }}
|
| 180 |
+
className="flex gap-3">
|
| 181 |
+
<button
|
| 182 |
+
onClick={() => router.push('/analyze?type=image')}
|
| 183 |
+
className="font-mono text-[11px] tracking-[0.06em] px-6 py-2.5 rounded-full border border-white/20 bg-white/[0.08] text-brand-primary hover:bg-white/[0.12] hover:border-white/30 transition-all duration-200 backdrop-blur-sm">
|
| 184 |
+
Upload Image
|
| 185 |
+
</button>
|
| 186 |
+
<button
|
| 187 |
+
onClick={() => router.push('/analyze?type=video')}
|
| 188 |
+
className="font-mono text-[11px] tracking-[0.06em] px-6 py-2.5 rounded-full border border-white/10 text-brand-secondary hover:border-white/20 hover:text-brand-primary transition-all duration-200">
|
| 189 |
+
Upload Video
|
| 190 |
+
</button>
|
| 191 |
+
</motion.div>
|
| 192 |
+
</section>
|
| 193 |
+
|
| 194 |
+
{/* Footer */}
|
| 195 |
+
<footer className="border-t border-brand-border px-10 py-5 flex items-center justify-between">
|
| 196 |
+
<span className="font-mono text-[9px] tracking-[0.15em] text-brand-border text-[#333]">
|
| 197 |
+
GENAI-DEEPDETECT · v1.0.0
|
| 198 |
+
</span>
|
| 199 |
+
<span className="font-mono text-[9px] text-[#333]">
|
| 200 |
+
Fingerprint · Coherence · SSTGNN
|
| 201 |
+
</span>
|
| 202 |
+
</footer>
|
| 203 |
+
</main>
|
| 204 |
+
</>
|
| 205 |
+
);
|
| 206 |
+
}
|
frontend/postcss.config.js
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
module.exports = {
|
| 2 |
+
plugins: {
|
| 3 |
+
tailwindcss: {},
|
| 4 |
+
autoprefixer: {},
|
| 5 |
+
},
|
| 6 |
+
}
|
frontend/styles/globals.css
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@import url('https://fonts.googleapis.com/css2?family=DM+Mono:ital,wght@0,300;0,400;0,500;1,300&family=DM+Sans:ital,opsz,wght@0,9..40,300;0,9..40,400;0,9..40,500;0,9..40,600;1,9..40,300&display=swap');
|
| 2 |
+
|
| 3 |
+
@tailwind base;
|
| 4 |
+
@tailwind components;
|
| 5 |
+
@tailwind utilities;
|
| 6 |
+
|
| 7 |
+
:root {
|
| 8 |
+
--c-bg: #0D0D0D;
|
| 9 |
+
--c-surface: #262626;
|
| 10 |
+
--c-border: #1A1A1A;
|
| 11 |
+
--c-border-hi: #2A2A2A;
|
| 12 |
+
--c-muted: #595958;
|
| 13 |
+
--c-secondary: #A6A6A6;
|
| 14 |
+
--c-primary: #F2F2F2;
|
| 15 |
+
--font-sans: 'DM Sans', sans-serif;
|
| 16 |
+
--font-mono: 'DM Mono', monospace;
|
| 17 |
+
|
| 18 |
+
/* Semantic verdict colors */
|
| 19 |
+
--c-fake-bg: rgba(239, 68, 68, 0.15);
|
| 20 |
+
--c-fake-bd: rgba(239, 68, 68, 0.30);
|
| 21 |
+
--c-fake-tx: #fca5a5;
|
| 22 |
+
--c-real-bg: rgba(34, 197, 94, 0.10);
|
| 23 |
+
--c-real-bd: rgba(34, 197, 94, 0.25);
|
| 24 |
+
--c-real-tx: #86efac;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
* {
|
| 28 |
+
box-sizing: border-box;
|
| 29 |
+
margin: 0;
|
| 30 |
+
padding: 0;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
html,
|
| 34 |
+
body {
|
| 35 |
+
background: var(--c-bg);
|
| 36 |
+
color: var(--c-primary);
|
| 37 |
+
font-family: var(--font-sans);
|
| 38 |
+
font-size: 15px;
|
| 39 |
+
line-height: 1.7;
|
| 40 |
+
-webkit-font-smoothing: antialiased;
|
| 41 |
+
-moz-osx-font-smoothing: grayscale;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
/* Scrollbar */
|
| 45 |
+
::-webkit-scrollbar { width: 4px; }
|
| 46 |
+
::-webkit-scrollbar-track { background: var(--c-bg); }
|
| 47 |
+
::-webkit-scrollbar-thumb { background: var(--c-border-hi); border-radius: 2px; }
|
| 48 |
+
|
| 49 |
+
/* Focus visible */
|
| 50 |
+
:focus-visible {
|
| 51 |
+
outline: 1px solid var(--c-muted);
|
| 52 |
+
outline-offset: 2px;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
/* Mono utility */
|
| 56 |
+
.font-mono { font-family: var(--font-mono); }
|
| 57 |
+
|
| 58 |
+
/* Section label with trailing line */
|
| 59 |
+
.section-label {
|
| 60 |
+
display: flex;
|
| 61 |
+
align-items: center;
|
| 62 |
+
gap: 12px;
|
| 63 |
+
font-family: var(--font-mono);
|
| 64 |
+
font-size: 10px;
|
| 65 |
+
letter-spacing: 0.2em;
|
| 66 |
+
text-transform: uppercase;
|
| 67 |
+
color: var(--c-muted);
|
| 68 |
+
}
|
| 69 |
+
.section-label::after {
|
| 70 |
+
content: '';
|
| 71 |
+
flex: 1;
|
| 72 |
+
height: 1px;
|
| 73 |
+
background: #222;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
/* Card hover lift */
|
| 77 |
+
.card-interactive {
|
| 78 |
+
transition: transform 200ms ease, border-color 200ms ease;
|
| 79 |
+
}
|
| 80 |
+
.card-interactive:hover {
|
| 81 |
+
transform: translateY(-1px);
|
| 82 |
+
border-color: rgba(255, 255, 255, 0.15);
|
| 83 |
+
}
|
frontend/tailwind.config.ts
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { Config } from 'tailwindcss'
|
| 2 |
+
|
| 3 |
+
const config: Config = {
|
| 4 |
+
content: [
|
| 5 |
+
'./pages/**/*.{js,ts,jsx,tsx,mdx}',
|
| 6 |
+
'./components/**/*.{js,ts,jsx,tsx,mdx}',
|
| 7 |
+
],
|
| 8 |
+
theme: {
|
| 9 |
+
extend: {
|
| 10 |
+
colors: {
|
| 11 |
+
brand: {
|
| 12 |
+
bg: '#0D0D0D',
|
| 13 |
+
surface: '#262626',
|
| 14 |
+
border: '#1A1A1A',
|
| 15 |
+
borderHi: '#2A2A2A',
|
| 16 |
+
muted: '#595958',
|
| 17 |
+
secondary: '#A6A6A6',
|
| 18 |
+
primary: '#F2F2F2',
|
| 19 |
+
},
|
| 20 |
+
},
|
| 21 |
+
fontFamily: {
|
| 22 |
+
sans: ['DM Sans', 'sans-serif'],
|
| 23 |
+
mono: ['DM Mono', 'monospace'],
|
| 24 |
+
},
|
| 25 |
+
borderRadius: {
|
| 26 |
+
card: '14px',
|
| 27 |
+
panel: '8px',
|
| 28 |
+
zone: '10px',
|
| 29 |
+
badge: '20px',
|
| 30 |
+
},
|
| 31 |
+
animation: {
|
| 32 |
+
'spin-slow': 'spin 2s linear infinite',
|
| 33 |
+
'fade-up': 'fadeUp 0.4s ease-out forwards',
|
| 34 |
+
'pulse-ring':'pulseRing 2s ease-in-out infinite',
|
| 35 |
+
},
|
| 36 |
+
keyframes: {
|
| 37 |
+
fadeUp: {
|
| 38 |
+
'0%': { opacity: '0', transform: 'translateY(12px)' },
|
| 39 |
+
'100%': { opacity: '1', transform: 'translateY(0)' },
|
| 40 |
+
},
|
| 41 |
+
pulseRing: {
|
| 42 |
+
'0%, 100%': { opacity: '0.4', transform: 'scale(1)' },
|
| 43 |
+
'50%': { opacity: '0.8', transform: 'scale(1.04)' },
|
| 44 |
+
},
|
| 45 |
+
},
|
| 46 |
+
},
|
| 47 |
+
},
|
| 48 |
+
plugins: [],
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
export default config
|
frontend/tsconfig.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"compilerOptions": {
|
| 3 |
+
"target": "es5",
|
| 4 |
+
"lib": ["dom", "dom.iterable", "esnext"],
|
| 5 |
+
"allowJs": true,
|
| 6 |
+
"skipLibCheck": true,
|
| 7 |
+
"strict": true,
|
| 8 |
+
"noEmit": true,
|
| 9 |
+
"esModuleInterop": true,
|
| 10 |
+
"module": "esnext",
|
| 11 |
+
"moduleResolution": "bundler",
|
| 12 |
+
"resolveJsonModule": true,
|
| 13 |
+
"isolatedModules": true,
|
| 14 |
+
"jsx": "preserve",
|
| 15 |
+
"incremental": true,
|
| 16 |
+
"paths": {
|
| 17 |
+
"@/components/*": ["./components/*"],
|
| 18 |
+
"@/lib/*": ["./lib/*"],
|
| 19 |
+
"@/styles/*": ["./styles/*"],
|
| 20 |
+
"@/types/*": ["./types/*"]
|
| 21 |
+
}
|
| 22 |
+
},
|
| 23 |
+
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"],
|
| 24 |
+
"exclude": ["node_modules"]
|
| 25 |
+
}
|
frontend/types/detection.ts
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// frontend/types/detection.ts
|
| 2 |
+
// Types aligned with src/types.py.
|
| 3 |
+
|
| 4 |
+
export type Verdict = 'FAKE' | 'REAL' | 'UNKNOWN'
|
| 5 |
+
|
| 6 |
+
export type GeneratorLabel =
|
| 7 |
+
| 'real'
|
| 8 |
+
| 'unknown_gan'
|
| 9 |
+
| 'stable_diffusion'
|
| 10 |
+
| 'midjourney'
|
| 11 |
+
| 'dall_e'
|
| 12 |
+
| 'flux'
|
| 13 |
+
| 'firefly'
|
| 14 |
+
| 'imagen'
|
| 15 |
+
| 'sora'
|
| 16 |
+
|
| 17 |
+
export interface EngineResult {
|
| 18 |
+
engine: string
|
| 19 |
+
verdict: Verdict
|
| 20 |
+
confidence: number
|
| 21 |
+
attributed_generator?: GeneratorLabel | string | null
|
| 22 |
+
explanation: string
|
| 23 |
+
processing_time_ms: number
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
export interface DetectionResponse {
|
| 27 |
+
verdict: Verdict
|
| 28 |
+
confidence: number
|
| 29 |
+
attributed_generator: GeneratorLabel | string
|
| 30 |
+
explanation: string
|
| 31 |
+
processing_time_ms: number
|
| 32 |
+
engine_breakdown: EngineResult[]
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
export type MediaType = 'image' | 'video'
|
| 36 |
+
|
| 37 |
+
export interface AnalysisState {
|
| 38 |
+
status: 'idle' | 'uploading' | 'analysing' | 'done' | 'error'
|
| 39 |
+
file?: File
|
| 40 |
+
preview?: string
|
| 41 |
+
result?: DetectionResponse
|
| 42 |
+
error?: string
|
| 43 |
+
}
|
frontend/types/framer-motion.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
declare module 'framer-motion';
|
package-lock.json
ADDED
|
@@ -0,0 +1,1268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "genai-deepdetect",
|
| 3 |
+
"lockfileVersion": 3,
|
| 4 |
+
"requires": true,
|
| 5 |
+
"packages": {
|
| 6 |
+
"": {
|
| 7 |
+
"dependencies": {
|
| 8 |
+
"@upstash/context7-mcp": "^2.1.4"
|
| 9 |
+
}
|
| 10 |
+
},
|
| 11 |
+
"node_modules/@hono/node-server": {
|
| 12 |
+
"version": "1.19.11",
|
| 13 |
+
"resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.11.tgz",
|
| 14 |
+
"integrity": "sha512-dr8/3zEaB+p0D2n/IUrlPF1HZm586qgJNXK1a9fhg/PzdtkK7Ksd5l312tJX2yBuALqDYBlG20QEbayqPyxn+g==",
|
| 15 |
+
"license": "MIT",
|
| 16 |
+
"engines": {
|
| 17 |
+
"node": ">=18.14.1"
|
| 18 |
+
},
|
| 19 |
+
"peerDependencies": {
|
| 20 |
+
"hono": "^4"
|
| 21 |
+
}
|
| 22 |
+
},
|
| 23 |
+
"node_modules/@modelcontextprotocol/sdk": {
|
| 24 |
+
"version": "1.28.0",
|
| 25 |
+
"resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.28.0.tgz",
|
| 26 |
+
"integrity": "sha512-gmloF+i+flI8ouQK7MWW4mOwuMh4RePBuPFAEPC6+pdqyWOUMDOixb6qZ69owLJpz6XmyllCouc4t8YWO+E2Nw==",
|
| 27 |
+
"license": "MIT",
|
| 28 |
+
"dependencies": {
|
| 29 |
+
"@hono/node-server": "^1.19.9",
|
| 30 |
+
"ajv": "^8.17.1",
|
| 31 |
+
"ajv-formats": "^3.0.1",
|
| 32 |
+
"content-type": "^1.0.5",
|
| 33 |
+
"cors": "^2.8.5",
|
| 34 |
+
"cross-spawn": "^7.0.5",
|
| 35 |
+
"eventsource": "^3.0.2",
|
| 36 |
+
"eventsource-parser": "^3.0.0",
|
| 37 |
+
"express": "^5.2.1",
|
| 38 |
+
"express-rate-limit": "^8.2.1",
|
| 39 |
+
"hono": "^4.11.4",
|
| 40 |
+
"jose": "^6.1.3",
|
| 41 |
+
"json-schema-typed": "^8.0.2",
|
| 42 |
+
"pkce-challenge": "^5.0.0",
|
| 43 |
+
"raw-body": "^3.0.0",
|
| 44 |
+
"zod": "^3.25 || ^4.0",
|
| 45 |
+
"zod-to-json-schema": "^3.25.1"
|
| 46 |
+
},
|
| 47 |
+
"engines": {
|
| 48 |
+
"node": ">=18"
|
| 49 |
+
},
|
| 50 |
+
"peerDependencies": {
|
| 51 |
+
"@cfworker/json-schema": "^4.1.1",
|
| 52 |
+
"zod": "^3.25 || ^4.0"
|
| 53 |
+
},
|
| 54 |
+
"peerDependenciesMeta": {
|
| 55 |
+
"@cfworker/json-schema": {
|
| 56 |
+
"optional": true
|
| 57 |
+
},
|
| 58 |
+
"zod": {
|
| 59 |
+
"optional": false
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
},
|
| 63 |
+
"node_modules/@types/body-parser": {
|
| 64 |
+
"version": "1.19.6",
|
| 65 |
+
"resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.6.tgz",
|
| 66 |
+
"integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==",
|
| 67 |
+
"license": "MIT",
|
| 68 |
+
"dependencies": {
|
| 69 |
+
"@types/connect": "*",
|
| 70 |
+
"@types/node": "*"
|
| 71 |
+
}
|
| 72 |
+
},
|
| 73 |
+
"node_modules/@types/connect": {
|
| 74 |
+
"version": "3.4.38",
|
| 75 |
+
"resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz",
|
| 76 |
+
"integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==",
|
| 77 |
+
"license": "MIT",
|
| 78 |
+
"dependencies": {
|
| 79 |
+
"@types/node": "*"
|
| 80 |
+
}
|
| 81 |
+
},
|
| 82 |
+
"node_modules/@types/express": {
|
| 83 |
+
"version": "5.0.6",
|
| 84 |
+
"resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.6.tgz",
|
| 85 |
+
"integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==",
|
| 86 |
+
"license": "MIT",
|
| 87 |
+
"dependencies": {
|
| 88 |
+
"@types/body-parser": "*",
|
| 89 |
+
"@types/express-serve-static-core": "^5.0.0",
|
| 90 |
+
"@types/serve-static": "^2"
|
| 91 |
+
}
|
| 92 |
+
},
|
| 93 |
+
"node_modules/@types/express-serve-static-core": {
|
| 94 |
+
"version": "5.1.1",
|
| 95 |
+
"resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.1.1.tgz",
|
| 96 |
+
"integrity": "sha512-v4zIMr/cX7/d2BpAEX3KNKL/JrT1s43s96lLvvdTmza1oEvDudCqK9aF/djc/SWgy8Yh0h30TZx5VpzqFCxk5A==",
|
| 97 |
+
"license": "MIT",
|
| 98 |
+
"dependencies": {
|
| 99 |
+
"@types/node": "*",
|
| 100 |
+
"@types/qs": "*",
|
| 101 |
+
"@types/range-parser": "*",
|
| 102 |
+
"@types/send": "*"
|
| 103 |
+
}
|
| 104 |
+
},
|
| 105 |
+
"node_modules/@types/http-errors": {
|
| 106 |
+
"version": "2.0.5",
|
| 107 |
+
"resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz",
|
| 108 |
+
"integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==",
|
| 109 |
+
"license": "MIT"
|
| 110 |
+
},
|
| 111 |
+
"node_modules/@types/node": {
|
| 112 |
+
"version": "25.5.0",
|
| 113 |
+
"resolved": "https://registry.npmjs.org/@types/node/-/node-25.5.0.tgz",
|
| 114 |
+
"integrity": "sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw==",
|
| 115 |
+
"license": "MIT",
|
| 116 |
+
"dependencies": {
|
| 117 |
+
"undici-types": "~7.18.0"
|
| 118 |
+
}
|
| 119 |
+
},
|
| 120 |
+
"node_modules/@types/qs": {
|
| 121 |
+
"version": "6.15.0",
|
| 122 |
+
"resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.15.0.tgz",
|
| 123 |
+
"integrity": "sha512-JawvT8iBVWpzTrz3EGw9BTQFg3BQNmwERdKE22vlTxawwtbyUSlMppvZYKLZzB5zgACXdXxbD3m1bXaMqP/9ow==",
|
| 124 |
+
"license": "MIT"
|
| 125 |
+
},
|
| 126 |
+
"node_modules/@types/range-parser": {
|
| 127 |
+
"version": "1.2.7",
|
| 128 |
+
"resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz",
|
| 129 |
+
"integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==",
|
| 130 |
+
"license": "MIT"
|
| 131 |
+
},
|
| 132 |
+
"node_modules/@types/send": {
|
| 133 |
+
"version": "1.2.1",
|
| 134 |
+
"resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.1.tgz",
|
| 135 |
+
"integrity": "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==",
|
| 136 |
+
"license": "MIT",
|
| 137 |
+
"dependencies": {
|
| 138 |
+
"@types/node": "*"
|
| 139 |
+
}
|
| 140 |
+
},
|
| 141 |
+
"node_modules/@types/serve-static": {
|
| 142 |
+
"version": "2.2.0",
|
| 143 |
+
"resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-2.2.0.tgz",
|
| 144 |
+
"integrity": "sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ==",
|
| 145 |
+
"license": "MIT",
|
| 146 |
+
"dependencies": {
|
| 147 |
+
"@types/http-errors": "*",
|
| 148 |
+
"@types/node": "*"
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
"node_modules/@upstash/context7-mcp": {
|
| 152 |
+
"version": "2.1.4",
|
| 153 |
+
"resolved": "https://registry.npmjs.org/@upstash/context7-mcp/-/context7-mcp-2.1.4.tgz",
|
| 154 |
+
"integrity": "sha512-PZ8ZgA5/tOshHi/7bQxAhbwukJUjmJvFmBtciH7P5IG4WirLEPo+8ieQgCrvwgPkWG5btym9a70iNVTL3IiF3Q==",
|
| 155 |
+
"license": "MIT",
|
| 156 |
+
"dependencies": {
|
| 157 |
+
"@modelcontextprotocol/sdk": "^1.25.1",
|
| 158 |
+
"@types/express": "^5.0.4",
|
| 159 |
+
"commander": "^14.0.0",
|
| 160 |
+
"express": "^5.1.0",
|
| 161 |
+
"jose": "^6.1.3",
|
| 162 |
+
"undici": "^6.6.3",
|
| 163 |
+
"zod": "^4.3.4"
|
| 164 |
+
},
|
| 165 |
+
"bin": {
|
| 166 |
+
"context7-mcp": "dist/index.js"
|
| 167 |
+
}
|
| 168 |
+
},
|
| 169 |
+
"node_modules/accepts": {
|
| 170 |
+
"version": "2.0.0",
|
| 171 |
+
"resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz",
|
| 172 |
+
"integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==",
|
| 173 |
+
"license": "MIT",
|
| 174 |
+
"dependencies": {
|
| 175 |
+
"mime-types": "^3.0.0",
|
| 176 |
+
"negotiator": "^1.0.0"
|
| 177 |
+
},
|
| 178 |
+
"engines": {
|
| 179 |
+
"node": ">= 0.6"
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"node_modules/ajv": {
|
| 183 |
+
"version": "8.18.0",
|
| 184 |
+
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz",
|
| 185 |
+
"integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==",
|
| 186 |
+
"license": "MIT",
|
| 187 |
+
"dependencies": {
|
| 188 |
+
"fast-deep-equal": "^3.1.3",
|
| 189 |
+
"fast-uri": "^3.0.1",
|
| 190 |
+
"json-schema-traverse": "^1.0.0",
|
| 191 |
+
"require-from-string": "^2.0.2"
|
| 192 |
+
},
|
| 193 |
+
"funding": {
|
| 194 |
+
"type": "github",
|
| 195 |
+
"url": "https://github.com/sponsors/epoberezkin"
|
| 196 |
+
}
|
| 197 |
+
},
|
| 198 |
+
"node_modules/ajv-formats": {
|
| 199 |
+
"version": "3.0.1",
|
| 200 |
+
"resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-3.0.1.tgz",
|
| 201 |
+
"integrity": "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==",
|
| 202 |
+
"license": "MIT",
|
| 203 |
+
"dependencies": {
|
| 204 |
+
"ajv": "^8.0.0"
|
| 205 |
+
},
|
| 206 |
+
"peerDependencies": {
|
| 207 |
+
"ajv": "^8.0.0"
|
| 208 |
+
},
|
| 209 |
+
"peerDependenciesMeta": {
|
| 210 |
+
"ajv": {
|
| 211 |
+
"optional": true
|
| 212 |
+
}
|
| 213 |
+
}
|
| 214 |
+
},
|
| 215 |
+
"node_modules/body-parser": {
|
| 216 |
+
"version": "2.2.2",
|
| 217 |
+
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.2.tgz",
|
| 218 |
+
"integrity": "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==",
|
| 219 |
+
"license": "MIT",
|
| 220 |
+
"dependencies": {
|
| 221 |
+
"bytes": "^3.1.2",
|
| 222 |
+
"content-type": "^1.0.5",
|
| 223 |
+
"debug": "^4.4.3",
|
| 224 |
+
"http-errors": "^2.0.0",
|
| 225 |
+
"iconv-lite": "^0.7.0",
|
| 226 |
+
"on-finished": "^2.4.1",
|
| 227 |
+
"qs": "^6.14.1",
|
| 228 |
+
"raw-body": "^3.0.1",
|
| 229 |
+
"type-is": "^2.0.1"
|
| 230 |
+
},
|
| 231 |
+
"engines": {
|
| 232 |
+
"node": ">=18"
|
| 233 |
+
},
|
| 234 |
+
"funding": {
|
| 235 |
+
"type": "opencollective",
|
| 236 |
+
"url": "https://opencollective.com/express"
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
"node_modules/bytes": {
|
| 240 |
+
"version": "3.1.2",
|
| 241 |
+
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
|
| 242 |
+
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
|
| 243 |
+
"license": "MIT",
|
| 244 |
+
"engines": {
|
| 245 |
+
"node": ">= 0.8"
|
| 246 |
+
}
|
| 247 |
+
},
|
| 248 |
+
"node_modules/call-bind-apply-helpers": {
|
| 249 |
+
"version": "1.0.2",
|
| 250 |
+
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
| 251 |
+
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
| 252 |
+
"license": "MIT",
|
| 253 |
+
"dependencies": {
|
| 254 |
+
"es-errors": "^1.3.0",
|
| 255 |
+
"function-bind": "^1.1.2"
|
| 256 |
+
},
|
| 257 |
+
"engines": {
|
| 258 |
+
"node": ">= 0.4"
|
| 259 |
+
}
|
| 260 |
+
},
|
| 261 |
+
"node_modules/call-bound": {
|
| 262 |
+
"version": "1.0.4",
|
| 263 |
+
"resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
|
| 264 |
+
"integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
|
| 265 |
+
"license": "MIT",
|
| 266 |
+
"dependencies": {
|
| 267 |
+
"call-bind-apply-helpers": "^1.0.2",
|
| 268 |
+
"get-intrinsic": "^1.3.0"
|
| 269 |
+
},
|
| 270 |
+
"engines": {
|
| 271 |
+
"node": ">= 0.4"
|
| 272 |
+
},
|
| 273 |
+
"funding": {
|
| 274 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 275 |
+
}
|
| 276 |
+
},
|
| 277 |
+
"node_modules/commander": {
|
| 278 |
+
"version": "14.0.3",
|
| 279 |
+
"resolved": "https://registry.npmjs.org/commander/-/commander-14.0.3.tgz",
|
| 280 |
+
"integrity": "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==",
|
| 281 |
+
"license": "MIT",
|
| 282 |
+
"engines": {
|
| 283 |
+
"node": ">=20"
|
| 284 |
+
}
|
| 285 |
+
},
|
| 286 |
+
"node_modules/content-disposition": {
|
| 287 |
+
"version": "1.0.1",
|
| 288 |
+
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.1.tgz",
|
| 289 |
+
"integrity": "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==",
|
| 290 |
+
"license": "MIT",
|
| 291 |
+
"engines": {
|
| 292 |
+
"node": ">=18"
|
| 293 |
+
},
|
| 294 |
+
"funding": {
|
| 295 |
+
"type": "opencollective",
|
| 296 |
+
"url": "https://opencollective.com/express"
|
| 297 |
+
}
|
| 298 |
+
},
|
| 299 |
+
"node_modules/content-type": {
|
| 300 |
+
"version": "1.0.5",
|
| 301 |
+
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
|
| 302 |
+
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
|
| 303 |
+
"license": "MIT",
|
| 304 |
+
"engines": {
|
| 305 |
+
"node": ">= 0.6"
|
| 306 |
+
}
|
| 307 |
+
},
|
| 308 |
+
"node_modules/cookie": {
|
| 309 |
+
"version": "0.7.2",
|
| 310 |
+
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
|
| 311 |
+
"integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
|
| 312 |
+
"license": "MIT",
|
| 313 |
+
"engines": {
|
| 314 |
+
"node": ">= 0.6"
|
| 315 |
+
}
|
| 316 |
+
},
|
| 317 |
+
"node_modules/cookie-signature": {
|
| 318 |
+
"version": "1.2.2",
|
| 319 |
+
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz",
|
| 320 |
+
"integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==",
|
| 321 |
+
"license": "MIT",
|
| 322 |
+
"engines": {
|
| 323 |
+
"node": ">=6.6.0"
|
| 324 |
+
}
|
| 325 |
+
},
|
| 326 |
+
"node_modules/cors": {
|
| 327 |
+
"version": "2.8.6",
|
| 328 |
+
"resolved": "https://registry.npmjs.org/cors/-/cors-2.8.6.tgz",
|
| 329 |
+
"integrity": "sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==",
|
| 330 |
+
"license": "MIT",
|
| 331 |
+
"dependencies": {
|
| 332 |
+
"object-assign": "^4",
|
| 333 |
+
"vary": "^1"
|
| 334 |
+
},
|
| 335 |
+
"engines": {
|
| 336 |
+
"node": ">= 0.10"
|
| 337 |
+
},
|
| 338 |
+
"funding": {
|
| 339 |
+
"type": "opencollective",
|
| 340 |
+
"url": "https://opencollective.com/express"
|
| 341 |
+
}
|
| 342 |
+
},
|
| 343 |
+
"node_modules/cross-spawn": {
|
| 344 |
+
"version": "7.0.6",
|
| 345 |
+
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
| 346 |
+
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
| 347 |
+
"license": "MIT",
|
| 348 |
+
"dependencies": {
|
| 349 |
+
"path-key": "^3.1.0",
|
| 350 |
+
"shebang-command": "^2.0.0",
|
| 351 |
+
"which": "^2.0.1"
|
| 352 |
+
},
|
| 353 |
+
"engines": {
|
| 354 |
+
"node": ">= 8"
|
| 355 |
+
}
|
| 356 |
+
},
|
| 357 |
+
"node_modules/debug": {
|
| 358 |
+
"version": "4.4.3",
|
| 359 |
+
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
|
| 360 |
+
"integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
|
| 361 |
+
"license": "MIT",
|
| 362 |
+
"dependencies": {
|
| 363 |
+
"ms": "^2.1.3"
|
| 364 |
+
},
|
| 365 |
+
"engines": {
|
| 366 |
+
"node": ">=6.0"
|
| 367 |
+
},
|
| 368 |
+
"peerDependenciesMeta": {
|
| 369 |
+
"supports-color": {
|
| 370 |
+
"optional": true
|
| 371 |
+
}
|
| 372 |
+
}
|
| 373 |
+
},
|
| 374 |
+
"node_modules/depd": {
|
| 375 |
+
"version": "2.0.0",
|
| 376 |
+
"resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
|
| 377 |
+
"integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
|
| 378 |
+
"license": "MIT",
|
| 379 |
+
"engines": {
|
| 380 |
+
"node": ">= 0.8"
|
| 381 |
+
}
|
| 382 |
+
},
|
| 383 |
+
"node_modules/dunder-proto": {
|
| 384 |
+
"version": "1.0.1",
|
| 385 |
+
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
| 386 |
+
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
| 387 |
+
"license": "MIT",
|
| 388 |
+
"dependencies": {
|
| 389 |
+
"call-bind-apply-helpers": "^1.0.1",
|
| 390 |
+
"es-errors": "^1.3.0",
|
| 391 |
+
"gopd": "^1.2.0"
|
| 392 |
+
},
|
| 393 |
+
"engines": {
|
| 394 |
+
"node": ">= 0.4"
|
| 395 |
+
}
|
| 396 |
+
},
|
| 397 |
+
"node_modules/ee-first": {
|
| 398 |
+
"version": "1.1.1",
|
| 399 |
+
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
|
| 400 |
+
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
|
| 401 |
+
"license": "MIT"
|
| 402 |
+
},
|
| 403 |
+
"node_modules/encodeurl": {
|
| 404 |
+
"version": "2.0.0",
|
| 405 |
+
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
|
| 406 |
+
"integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
|
| 407 |
+
"license": "MIT",
|
| 408 |
+
"engines": {
|
| 409 |
+
"node": ">= 0.8"
|
| 410 |
+
}
|
| 411 |
+
},
|
| 412 |
+
"node_modules/es-define-property": {
|
| 413 |
+
"version": "1.0.1",
|
| 414 |
+
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
| 415 |
+
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
| 416 |
+
"license": "MIT",
|
| 417 |
+
"engines": {
|
| 418 |
+
"node": ">= 0.4"
|
| 419 |
+
}
|
| 420 |
+
},
|
| 421 |
+
"node_modules/es-errors": {
|
| 422 |
+
"version": "1.3.0",
|
| 423 |
+
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
| 424 |
+
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
| 425 |
+
"license": "MIT",
|
| 426 |
+
"engines": {
|
| 427 |
+
"node": ">= 0.4"
|
| 428 |
+
}
|
| 429 |
+
},
|
| 430 |
+
"node_modules/es-object-atoms": {
|
| 431 |
+
"version": "1.1.1",
|
| 432 |
+
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
| 433 |
+
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
| 434 |
+
"license": "MIT",
|
| 435 |
+
"dependencies": {
|
| 436 |
+
"es-errors": "^1.3.0"
|
| 437 |
+
},
|
| 438 |
+
"engines": {
|
| 439 |
+
"node": ">= 0.4"
|
| 440 |
+
}
|
| 441 |
+
},
|
| 442 |
+
"node_modules/escape-html": {
|
| 443 |
+
"version": "1.0.3",
|
| 444 |
+
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
|
| 445 |
+
"integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
|
| 446 |
+
"license": "MIT"
|
| 447 |
+
},
|
| 448 |
+
"node_modules/etag": {
|
| 449 |
+
"version": "1.8.1",
|
| 450 |
+
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
|
| 451 |
+
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
|
| 452 |
+
"license": "MIT",
|
| 453 |
+
"engines": {
|
| 454 |
+
"node": ">= 0.6"
|
| 455 |
+
}
|
| 456 |
+
},
|
| 457 |
+
"node_modules/eventsource": {
|
| 458 |
+
"version": "3.0.7",
|
| 459 |
+
"resolved": "https://registry.npmjs.org/eventsource/-/eventsource-3.0.7.tgz",
|
| 460 |
+
"integrity": "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==",
|
| 461 |
+
"license": "MIT",
|
| 462 |
+
"dependencies": {
|
| 463 |
+
"eventsource-parser": "^3.0.1"
|
| 464 |
+
},
|
| 465 |
+
"engines": {
|
| 466 |
+
"node": ">=18.0.0"
|
| 467 |
+
}
|
| 468 |
+
},
|
| 469 |
+
"node_modules/eventsource-parser": {
|
| 470 |
+
"version": "3.0.6",
|
| 471 |
+
"resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.6.tgz",
|
| 472 |
+
"integrity": "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==",
|
| 473 |
+
"license": "MIT",
|
| 474 |
+
"engines": {
|
| 475 |
+
"node": ">=18.0.0"
|
| 476 |
+
}
|
| 477 |
+
},
|
| 478 |
+
"node_modules/express": {
|
| 479 |
+
"version": "5.2.1",
|
| 480 |
+
"resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz",
|
| 481 |
+
"integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==",
|
| 482 |
+
"license": "MIT",
|
| 483 |
+
"peer": true,
|
| 484 |
+
"dependencies": {
|
| 485 |
+
"accepts": "^2.0.0",
|
| 486 |
+
"body-parser": "^2.2.1",
|
| 487 |
+
"content-disposition": "^1.0.0",
|
| 488 |
+
"content-type": "^1.0.5",
|
| 489 |
+
"cookie": "^0.7.1",
|
| 490 |
+
"cookie-signature": "^1.2.1",
|
| 491 |
+
"debug": "^4.4.0",
|
| 492 |
+
"depd": "^2.0.0",
|
| 493 |
+
"encodeurl": "^2.0.0",
|
| 494 |
+
"escape-html": "^1.0.3",
|
| 495 |
+
"etag": "^1.8.1",
|
| 496 |
+
"finalhandler": "^2.1.0",
|
| 497 |
+
"fresh": "^2.0.0",
|
| 498 |
+
"http-errors": "^2.0.0",
|
| 499 |
+
"merge-descriptors": "^2.0.0",
|
| 500 |
+
"mime-types": "^3.0.0",
|
| 501 |
+
"on-finished": "^2.4.1",
|
| 502 |
+
"once": "^1.4.0",
|
| 503 |
+
"parseurl": "^1.3.3",
|
| 504 |
+
"proxy-addr": "^2.0.7",
|
| 505 |
+
"qs": "^6.14.0",
|
| 506 |
+
"range-parser": "^1.2.1",
|
| 507 |
+
"router": "^2.2.0",
|
| 508 |
+
"send": "^1.1.0",
|
| 509 |
+
"serve-static": "^2.2.0",
|
| 510 |
+
"statuses": "^2.0.1",
|
| 511 |
+
"type-is": "^2.0.1",
|
| 512 |
+
"vary": "^1.1.2"
|
| 513 |
+
},
|
| 514 |
+
"engines": {
|
| 515 |
+
"node": ">= 18"
|
| 516 |
+
},
|
| 517 |
+
"funding": {
|
| 518 |
+
"type": "opencollective",
|
| 519 |
+
"url": "https://opencollective.com/express"
|
| 520 |
+
}
|
| 521 |
+
},
|
| 522 |
+
"node_modules/express-rate-limit": {
|
| 523 |
+
"version": "8.3.1",
|
| 524 |
+
"resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-8.3.1.tgz",
|
| 525 |
+
"integrity": "sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==",
|
| 526 |
+
"license": "MIT",
|
| 527 |
+
"dependencies": {
|
| 528 |
+
"ip-address": "10.1.0"
|
| 529 |
+
},
|
| 530 |
+
"engines": {
|
| 531 |
+
"node": ">= 16"
|
| 532 |
+
},
|
| 533 |
+
"funding": {
|
| 534 |
+
"url": "https://github.com/sponsors/express-rate-limit"
|
| 535 |
+
},
|
| 536 |
+
"peerDependencies": {
|
| 537 |
+
"express": ">= 4.11"
|
| 538 |
+
}
|
| 539 |
+
},
|
| 540 |
+
"node_modules/fast-deep-equal": {
|
| 541 |
+
"version": "3.1.3",
|
| 542 |
+
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
|
| 543 |
+
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
|
| 544 |
+
"license": "MIT"
|
| 545 |
+
},
|
| 546 |
+
"node_modules/fast-uri": {
|
| 547 |
+
"version": "3.1.0",
|
| 548 |
+
"resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
|
| 549 |
+
"integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==",
|
| 550 |
+
"funding": [
|
| 551 |
+
{
|
| 552 |
+
"type": "github",
|
| 553 |
+
"url": "https://github.com/sponsors/fastify"
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"type": "opencollective",
|
| 557 |
+
"url": "https://opencollective.com/fastify"
|
| 558 |
+
}
|
| 559 |
+
],
|
| 560 |
+
"license": "BSD-3-Clause"
|
| 561 |
+
},
|
| 562 |
+
"node_modules/finalhandler": {
|
| 563 |
+
"version": "2.1.1",
|
| 564 |
+
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz",
|
| 565 |
+
"integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==",
|
| 566 |
+
"license": "MIT",
|
| 567 |
+
"dependencies": {
|
| 568 |
+
"debug": "^4.4.0",
|
| 569 |
+
"encodeurl": "^2.0.0",
|
| 570 |
+
"escape-html": "^1.0.3",
|
| 571 |
+
"on-finished": "^2.4.1",
|
| 572 |
+
"parseurl": "^1.3.3",
|
| 573 |
+
"statuses": "^2.0.1"
|
| 574 |
+
},
|
| 575 |
+
"engines": {
|
| 576 |
+
"node": ">= 18.0.0"
|
| 577 |
+
},
|
| 578 |
+
"funding": {
|
| 579 |
+
"type": "opencollective",
|
| 580 |
+
"url": "https://opencollective.com/express"
|
| 581 |
+
}
|
| 582 |
+
},
|
| 583 |
+
"node_modules/forwarded": {
|
| 584 |
+
"version": "0.2.0",
|
| 585 |
+
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
|
| 586 |
+
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
|
| 587 |
+
"license": "MIT",
|
| 588 |
+
"engines": {
|
| 589 |
+
"node": ">= 0.6"
|
| 590 |
+
}
|
| 591 |
+
},
|
| 592 |
+
"node_modules/fresh": {
|
| 593 |
+
"version": "2.0.0",
|
| 594 |
+
"resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz",
|
| 595 |
+
"integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==",
|
| 596 |
+
"license": "MIT",
|
| 597 |
+
"engines": {
|
| 598 |
+
"node": ">= 0.8"
|
| 599 |
+
}
|
| 600 |
+
},
|
| 601 |
+
"node_modules/function-bind": {
|
| 602 |
+
"version": "1.1.2",
|
| 603 |
+
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
| 604 |
+
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
| 605 |
+
"license": "MIT",
|
| 606 |
+
"funding": {
|
| 607 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 608 |
+
}
|
| 609 |
+
},
|
| 610 |
+
"node_modules/get-intrinsic": {
|
| 611 |
+
"version": "1.3.0",
|
| 612 |
+
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
| 613 |
+
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
| 614 |
+
"license": "MIT",
|
| 615 |
+
"dependencies": {
|
| 616 |
+
"call-bind-apply-helpers": "^1.0.2",
|
| 617 |
+
"es-define-property": "^1.0.1",
|
| 618 |
+
"es-errors": "^1.3.0",
|
| 619 |
+
"es-object-atoms": "^1.1.1",
|
| 620 |
+
"function-bind": "^1.1.2",
|
| 621 |
+
"get-proto": "^1.0.1",
|
| 622 |
+
"gopd": "^1.2.0",
|
| 623 |
+
"has-symbols": "^1.1.0",
|
| 624 |
+
"hasown": "^2.0.2",
|
| 625 |
+
"math-intrinsics": "^1.1.0"
|
| 626 |
+
},
|
| 627 |
+
"engines": {
|
| 628 |
+
"node": ">= 0.4"
|
| 629 |
+
},
|
| 630 |
+
"funding": {
|
| 631 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 632 |
+
}
|
| 633 |
+
},
|
| 634 |
+
"node_modules/get-proto": {
|
| 635 |
+
"version": "1.0.1",
|
| 636 |
+
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
| 637 |
+
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
| 638 |
+
"license": "MIT",
|
| 639 |
+
"dependencies": {
|
| 640 |
+
"dunder-proto": "^1.0.1",
|
| 641 |
+
"es-object-atoms": "^1.0.0"
|
| 642 |
+
},
|
| 643 |
+
"engines": {
|
| 644 |
+
"node": ">= 0.4"
|
| 645 |
+
}
|
| 646 |
+
},
|
| 647 |
+
"node_modules/gopd": {
|
| 648 |
+
"version": "1.2.0",
|
| 649 |
+
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
| 650 |
+
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
| 651 |
+
"license": "MIT",
|
| 652 |
+
"engines": {
|
| 653 |
+
"node": ">= 0.4"
|
| 654 |
+
},
|
| 655 |
+
"funding": {
|
| 656 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 657 |
+
}
|
| 658 |
+
},
|
| 659 |
+
"node_modules/has-symbols": {
|
| 660 |
+
"version": "1.1.0",
|
| 661 |
+
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
| 662 |
+
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
| 663 |
+
"license": "MIT",
|
| 664 |
+
"engines": {
|
| 665 |
+
"node": ">= 0.4"
|
| 666 |
+
},
|
| 667 |
+
"funding": {
|
| 668 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 669 |
+
}
|
| 670 |
+
},
|
| 671 |
+
"node_modules/hasown": {
|
| 672 |
+
"version": "2.0.2",
|
| 673 |
+
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
| 674 |
+
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
| 675 |
+
"license": "MIT",
|
| 676 |
+
"dependencies": {
|
| 677 |
+
"function-bind": "^1.1.2"
|
| 678 |
+
},
|
| 679 |
+
"engines": {
|
| 680 |
+
"node": ">= 0.4"
|
| 681 |
+
}
|
| 682 |
+
},
|
| 683 |
+
"node_modules/hono": {
|
| 684 |
+
"version": "4.12.9",
|
| 685 |
+
"resolved": "https://registry.npmjs.org/hono/-/hono-4.12.9.tgz",
|
| 686 |
+
"integrity": "sha512-wy3T8Zm2bsEvxKZM5w21VdHDDcwVS1yUFFY6i8UobSsKfFceT7TOwhbhfKsDyx7tYQlmRM5FLpIuYvNFyjctiA==",
|
| 687 |
+
"license": "MIT",
|
| 688 |
+
"peer": true,
|
| 689 |
+
"engines": {
|
| 690 |
+
"node": ">=16.9.0"
|
| 691 |
+
}
|
| 692 |
+
},
|
| 693 |
+
"node_modules/http-errors": {
|
| 694 |
+
"version": "2.0.1",
|
| 695 |
+
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz",
|
| 696 |
+
"integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==",
|
| 697 |
+
"license": "MIT",
|
| 698 |
+
"dependencies": {
|
| 699 |
+
"depd": "~2.0.0",
|
| 700 |
+
"inherits": "~2.0.4",
|
| 701 |
+
"setprototypeof": "~1.2.0",
|
| 702 |
+
"statuses": "~2.0.2",
|
| 703 |
+
"toidentifier": "~1.0.1"
|
| 704 |
+
},
|
| 705 |
+
"engines": {
|
| 706 |
+
"node": ">= 0.8"
|
| 707 |
+
},
|
| 708 |
+
"funding": {
|
| 709 |
+
"type": "opencollective",
|
| 710 |
+
"url": "https://opencollective.com/express"
|
| 711 |
+
}
|
| 712 |
+
},
|
| 713 |
+
"node_modules/iconv-lite": {
|
| 714 |
+
"version": "0.7.2",
|
| 715 |
+
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.2.tgz",
|
| 716 |
+
"integrity": "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==",
|
| 717 |
+
"license": "MIT",
|
| 718 |
+
"dependencies": {
|
| 719 |
+
"safer-buffer": ">= 2.1.2 < 3.0.0"
|
| 720 |
+
},
|
| 721 |
+
"engines": {
|
| 722 |
+
"node": ">=0.10.0"
|
| 723 |
+
},
|
| 724 |
+
"funding": {
|
| 725 |
+
"type": "opencollective",
|
| 726 |
+
"url": "https://opencollective.com/express"
|
| 727 |
+
}
|
| 728 |
+
},
|
| 729 |
+
"node_modules/inherits": {
|
| 730 |
+
"version": "2.0.4",
|
| 731 |
+
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
| 732 |
+
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
|
| 733 |
+
"license": "ISC"
|
| 734 |
+
},
|
| 735 |
+
"node_modules/ip-address": {
|
| 736 |
+
"version": "10.1.0",
|
| 737 |
+
"resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.1.0.tgz",
|
| 738 |
+
"integrity": "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==",
|
| 739 |
+
"license": "MIT",
|
| 740 |
+
"engines": {
|
| 741 |
+
"node": ">= 12"
|
| 742 |
+
}
|
| 743 |
+
},
|
| 744 |
+
"node_modules/ipaddr.js": {
|
| 745 |
+
"version": "1.9.1",
|
| 746 |
+
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
|
| 747 |
+
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
|
| 748 |
+
"license": "MIT",
|
| 749 |
+
"engines": {
|
| 750 |
+
"node": ">= 0.10"
|
| 751 |
+
}
|
| 752 |
+
},
|
| 753 |
+
"node_modules/is-promise": {
|
| 754 |
+
"version": "4.0.0",
|
| 755 |
+
"resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz",
|
| 756 |
+
"integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==",
|
| 757 |
+
"license": "MIT"
|
| 758 |
+
},
|
| 759 |
+
"node_modules/isexe": {
|
| 760 |
+
"version": "2.0.0",
|
| 761 |
+
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
|
| 762 |
+
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
|
| 763 |
+
"license": "ISC"
|
| 764 |
+
},
|
| 765 |
+
"node_modules/jose": {
|
| 766 |
+
"version": "6.2.2",
|
| 767 |
+
"resolved": "https://registry.npmjs.org/jose/-/jose-6.2.2.tgz",
|
| 768 |
+
"integrity": "sha512-d7kPDd34KO/YnzaDOlikGpOurfF0ByC2sEV4cANCtdqLlTfBlw2p14O/5d/zv40gJPbIQxfES3nSx1/oYNyuZQ==",
|
| 769 |
+
"license": "MIT",
|
| 770 |
+
"funding": {
|
| 771 |
+
"url": "https://github.com/sponsors/panva"
|
| 772 |
+
}
|
| 773 |
+
},
|
| 774 |
+
"node_modules/json-schema-traverse": {
|
| 775 |
+
"version": "1.0.0",
|
| 776 |
+
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
|
| 777 |
+
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
|
| 778 |
+
"license": "MIT"
|
| 779 |
+
},
|
| 780 |
+
"node_modules/json-schema-typed": {
|
| 781 |
+
"version": "8.0.2",
|
| 782 |
+
"resolved": "https://registry.npmjs.org/json-schema-typed/-/json-schema-typed-8.0.2.tgz",
|
| 783 |
+
"integrity": "sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==",
|
| 784 |
+
"license": "BSD-2-Clause"
|
| 785 |
+
},
|
| 786 |
+
"node_modules/math-intrinsics": {
|
| 787 |
+
"version": "1.1.0",
|
| 788 |
+
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
| 789 |
+
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
| 790 |
+
"license": "MIT",
|
| 791 |
+
"engines": {
|
| 792 |
+
"node": ">= 0.4"
|
| 793 |
+
}
|
| 794 |
+
},
|
| 795 |
+
"node_modules/media-typer": {
|
| 796 |
+
"version": "1.1.0",
|
| 797 |
+
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz",
|
| 798 |
+
"integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==",
|
| 799 |
+
"license": "MIT",
|
| 800 |
+
"engines": {
|
| 801 |
+
"node": ">= 0.8"
|
| 802 |
+
}
|
| 803 |
+
},
|
| 804 |
+
"node_modules/merge-descriptors": {
|
| 805 |
+
"version": "2.0.0",
|
| 806 |
+
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz",
|
| 807 |
+
"integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==",
|
| 808 |
+
"license": "MIT",
|
| 809 |
+
"engines": {
|
| 810 |
+
"node": ">=18"
|
| 811 |
+
},
|
| 812 |
+
"funding": {
|
| 813 |
+
"url": "https://github.com/sponsors/sindresorhus"
|
| 814 |
+
}
|
| 815 |
+
},
|
| 816 |
+
"node_modules/mime-db": {
|
| 817 |
+
"version": "1.54.0",
|
| 818 |
+
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz",
|
| 819 |
+
"integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==",
|
| 820 |
+
"license": "MIT",
|
| 821 |
+
"engines": {
|
| 822 |
+
"node": ">= 0.6"
|
| 823 |
+
}
|
| 824 |
+
},
|
| 825 |
+
"node_modules/mime-types": {
|
| 826 |
+
"version": "3.0.2",
|
| 827 |
+
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz",
|
| 828 |
+
"integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==",
|
| 829 |
+
"license": "MIT",
|
| 830 |
+
"dependencies": {
|
| 831 |
+
"mime-db": "^1.54.0"
|
| 832 |
+
},
|
| 833 |
+
"engines": {
|
| 834 |
+
"node": ">=18"
|
| 835 |
+
},
|
| 836 |
+
"funding": {
|
| 837 |
+
"type": "opencollective",
|
| 838 |
+
"url": "https://opencollective.com/express"
|
| 839 |
+
}
|
| 840 |
+
},
|
| 841 |
+
"node_modules/ms": {
|
| 842 |
+
"version": "2.1.3",
|
| 843 |
+
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
| 844 |
+
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
|
| 845 |
+
"license": "MIT"
|
| 846 |
+
},
|
| 847 |
+
"node_modules/negotiator": {
|
| 848 |
+
"version": "1.0.0",
|
| 849 |
+
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz",
|
| 850 |
+
"integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==",
|
| 851 |
+
"license": "MIT",
|
| 852 |
+
"engines": {
|
| 853 |
+
"node": ">= 0.6"
|
| 854 |
+
}
|
| 855 |
+
},
|
| 856 |
+
"node_modules/object-assign": {
|
| 857 |
+
"version": "4.1.1",
|
| 858 |
+
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
|
| 859 |
+
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
|
| 860 |
+
"license": "MIT",
|
| 861 |
+
"engines": {
|
| 862 |
+
"node": ">=0.10.0"
|
| 863 |
+
}
|
| 864 |
+
},
|
| 865 |
+
"node_modules/object-inspect": {
|
| 866 |
+
"version": "1.13.4",
|
| 867 |
+
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
|
| 868 |
+
"integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
|
| 869 |
+
"license": "MIT",
|
| 870 |
+
"engines": {
|
| 871 |
+
"node": ">= 0.4"
|
| 872 |
+
},
|
| 873 |
+
"funding": {
|
| 874 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 875 |
+
}
|
| 876 |
+
},
|
| 877 |
+
"node_modules/on-finished": {
|
| 878 |
+
"version": "2.4.1",
|
| 879 |
+
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
|
| 880 |
+
"integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
|
| 881 |
+
"license": "MIT",
|
| 882 |
+
"dependencies": {
|
| 883 |
+
"ee-first": "1.1.1"
|
| 884 |
+
},
|
| 885 |
+
"engines": {
|
| 886 |
+
"node": ">= 0.8"
|
| 887 |
+
}
|
| 888 |
+
},
|
| 889 |
+
"node_modules/once": {
|
| 890 |
+
"version": "1.4.0",
|
| 891 |
+
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
|
| 892 |
+
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
|
| 893 |
+
"license": "ISC",
|
| 894 |
+
"dependencies": {
|
| 895 |
+
"wrappy": "1"
|
| 896 |
+
}
|
| 897 |
+
},
|
| 898 |
+
"node_modules/parseurl": {
|
| 899 |
+
"version": "1.3.3",
|
| 900 |
+
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
|
| 901 |
+
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
|
| 902 |
+
"license": "MIT",
|
| 903 |
+
"engines": {
|
| 904 |
+
"node": ">= 0.8"
|
| 905 |
+
}
|
| 906 |
+
},
|
| 907 |
+
"node_modules/path-key": {
|
| 908 |
+
"version": "3.1.1",
|
| 909 |
+
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
|
| 910 |
+
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
|
| 911 |
+
"license": "MIT",
|
| 912 |
+
"engines": {
|
| 913 |
+
"node": ">=8"
|
| 914 |
+
}
|
| 915 |
+
},
|
| 916 |
+
"node_modules/path-to-regexp": {
|
| 917 |
+
"version": "8.3.0",
|
| 918 |
+
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz",
|
| 919 |
+
"integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==",
|
| 920 |
+
"license": "MIT",
|
| 921 |
+
"funding": {
|
| 922 |
+
"type": "opencollective",
|
| 923 |
+
"url": "https://opencollective.com/express"
|
| 924 |
+
}
|
| 925 |
+
},
|
| 926 |
+
"node_modules/pkce-challenge": {
|
| 927 |
+
"version": "5.0.1",
|
| 928 |
+
"resolved": "https://registry.npmjs.org/pkce-challenge/-/pkce-challenge-5.0.1.tgz",
|
| 929 |
+
"integrity": "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==",
|
| 930 |
+
"license": "MIT",
|
| 931 |
+
"engines": {
|
| 932 |
+
"node": ">=16.20.0"
|
| 933 |
+
}
|
| 934 |
+
},
|
| 935 |
+
"node_modules/proxy-addr": {
|
| 936 |
+
"version": "2.0.7",
|
| 937 |
+
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
|
| 938 |
+
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
|
| 939 |
+
"license": "MIT",
|
| 940 |
+
"dependencies": {
|
| 941 |
+
"forwarded": "0.2.0",
|
| 942 |
+
"ipaddr.js": "1.9.1"
|
| 943 |
+
},
|
| 944 |
+
"engines": {
|
| 945 |
+
"node": ">= 0.10"
|
| 946 |
+
}
|
| 947 |
+
},
|
| 948 |
+
"node_modules/qs": {
|
| 949 |
+
"version": "6.15.0",
|
| 950 |
+
"resolved": "https://registry.npmjs.org/qs/-/qs-6.15.0.tgz",
|
| 951 |
+
"integrity": "sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ==",
|
| 952 |
+
"license": "BSD-3-Clause",
|
| 953 |
+
"dependencies": {
|
| 954 |
+
"side-channel": "^1.1.0"
|
| 955 |
+
},
|
| 956 |
+
"engines": {
|
| 957 |
+
"node": ">=0.6"
|
| 958 |
+
},
|
| 959 |
+
"funding": {
|
| 960 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 961 |
+
}
|
| 962 |
+
},
|
| 963 |
+
"node_modules/range-parser": {
|
| 964 |
+
"version": "1.2.1",
|
| 965 |
+
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
|
| 966 |
+
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
|
| 967 |
+
"license": "MIT",
|
| 968 |
+
"engines": {
|
| 969 |
+
"node": ">= 0.6"
|
| 970 |
+
}
|
| 971 |
+
},
|
| 972 |
+
"node_modules/raw-body": {
|
| 973 |
+
"version": "3.0.2",
|
| 974 |
+
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz",
|
| 975 |
+
"integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==",
|
| 976 |
+
"license": "MIT",
|
| 977 |
+
"dependencies": {
|
| 978 |
+
"bytes": "~3.1.2",
|
| 979 |
+
"http-errors": "~2.0.1",
|
| 980 |
+
"iconv-lite": "~0.7.0",
|
| 981 |
+
"unpipe": "~1.0.0"
|
| 982 |
+
},
|
| 983 |
+
"engines": {
|
| 984 |
+
"node": ">= 0.10"
|
| 985 |
+
}
|
| 986 |
+
},
|
| 987 |
+
"node_modules/require-from-string": {
|
| 988 |
+
"version": "2.0.2",
|
| 989 |
+
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
|
| 990 |
+
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
|
| 991 |
+
"license": "MIT",
|
| 992 |
+
"engines": {
|
| 993 |
+
"node": ">=0.10.0"
|
| 994 |
+
}
|
| 995 |
+
},
|
| 996 |
+
"node_modules/router": {
|
| 997 |
+
"version": "2.2.0",
|
| 998 |
+
"resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz",
|
| 999 |
+
"integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==",
|
| 1000 |
+
"license": "MIT",
|
| 1001 |
+
"dependencies": {
|
| 1002 |
+
"debug": "^4.4.0",
|
| 1003 |
+
"depd": "^2.0.0",
|
| 1004 |
+
"is-promise": "^4.0.0",
|
| 1005 |
+
"parseurl": "^1.3.3",
|
| 1006 |
+
"path-to-regexp": "^8.0.0"
|
| 1007 |
+
},
|
| 1008 |
+
"engines": {
|
| 1009 |
+
"node": ">= 18"
|
| 1010 |
+
}
|
| 1011 |
+
},
|
| 1012 |
+
"node_modules/safer-buffer": {
|
| 1013 |
+
"version": "2.1.2",
|
| 1014 |
+
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
|
| 1015 |
+
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
|
| 1016 |
+
"license": "MIT"
|
| 1017 |
+
},
|
| 1018 |
+
"node_modules/send": {
|
| 1019 |
+
"version": "1.2.1",
|
| 1020 |
+
"resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz",
|
| 1021 |
+
"integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==",
|
| 1022 |
+
"license": "MIT",
|
| 1023 |
+
"dependencies": {
|
| 1024 |
+
"debug": "^4.4.3",
|
| 1025 |
+
"encodeurl": "^2.0.0",
|
| 1026 |
+
"escape-html": "^1.0.3",
|
| 1027 |
+
"etag": "^1.8.1",
|
| 1028 |
+
"fresh": "^2.0.0",
|
| 1029 |
+
"http-errors": "^2.0.1",
|
| 1030 |
+
"mime-types": "^3.0.2",
|
| 1031 |
+
"ms": "^2.1.3",
|
| 1032 |
+
"on-finished": "^2.4.1",
|
| 1033 |
+
"range-parser": "^1.2.1",
|
| 1034 |
+
"statuses": "^2.0.2"
|
| 1035 |
+
},
|
| 1036 |
+
"engines": {
|
| 1037 |
+
"node": ">= 18"
|
| 1038 |
+
},
|
| 1039 |
+
"funding": {
|
| 1040 |
+
"type": "opencollective",
|
| 1041 |
+
"url": "https://opencollective.com/express"
|
| 1042 |
+
}
|
| 1043 |
+
},
|
| 1044 |
+
"node_modules/serve-static": {
|
| 1045 |
+
"version": "2.2.1",
|
| 1046 |
+
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz",
|
| 1047 |
+
"integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==",
|
| 1048 |
+
"license": "MIT",
|
| 1049 |
+
"dependencies": {
|
| 1050 |
+
"encodeurl": "^2.0.0",
|
| 1051 |
+
"escape-html": "^1.0.3",
|
| 1052 |
+
"parseurl": "^1.3.3",
|
| 1053 |
+
"send": "^1.2.0"
|
| 1054 |
+
},
|
| 1055 |
+
"engines": {
|
| 1056 |
+
"node": ">= 18"
|
| 1057 |
+
},
|
| 1058 |
+
"funding": {
|
| 1059 |
+
"type": "opencollective",
|
| 1060 |
+
"url": "https://opencollective.com/express"
|
| 1061 |
+
}
|
| 1062 |
+
},
|
| 1063 |
+
"node_modules/setprototypeof": {
|
| 1064 |
+
"version": "1.2.0",
|
| 1065 |
+
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
|
| 1066 |
+
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
|
| 1067 |
+
"license": "ISC"
|
| 1068 |
+
},
|
| 1069 |
+
"node_modules/shebang-command": {
|
| 1070 |
+
"version": "2.0.0",
|
| 1071 |
+
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
|
| 1072 |
+
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
|
| 1073 |
+
"license": "MIT",
|
| 1074 |
+
"dependencies": {
|
| 1075 |
+
"shebang-regex": "^3.0.0"
|
| 1076 |
+
},
|
| 1077 |
+
"engines": {
|
| 1078 |
+
"node": ">=8"
|
| 1079 |
+
}
|
| 1080 |
+
},
|
| 1081 |
+
"node_modules/shebang-regex": {
|
| 1082 |
+
"version": "3.0.0",
|
| 1083 |
+
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
|
| 1084 |
+
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
|
| 1085 |
+
"license": "MIT",
|
| 1086 |
+
"engines": {
|
| 1087 |
+
"node": ">=8"
|
| 1088 |
+
}
|
| 1089 |
+
},
|
| 1090 |
+
"node_modules/side-channel": {
|
| 1091 |
+
"version": "1.1.0",
|
| 1092 |
+
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
|
| 1093 |
+
"integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
|
| 1094 |
+
"license": "MIT",
|
| 1095 |
+
"dependencies": {
|
| 1096 |
+
"es-errors": "^1.3.0",
|
| 1097 |
+
"object-inspect": "^1.13.3",
|
| 1098 |
+
"side-channel-list": "^1.0.0",
|
| 1099 |
+
"side-channel-map": "^1.0.1",
|
| 1100 |
+
"side-channel-weakmap": "^1.0.2"
|
| 1101 |
+
},
|
| 1102 |
+
"engines": {
|
| 1103 |
+
"node": ">= 0.4"
|
| 1104 |
+
},
|
| 1105 |
+
"funding": {
|
| 1106 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 1107 |
+
}
|
| 1108 |
+
},
|
| 1109 |
+
"node_modules/side-channel-list": {
|
| 1110 |
+
"version": "1.0.0",
|
| 1111 |
+
"resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
|
| 1112 |
+
"integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
|
| 1113 |
+
"license": "MIT",
|
| 1114 |
+
"dependencies": {
|
| 1115 |
+
"es-errors": "^1.3.0",
|
| 1116 |
+
"object-inspect": "^1.13.3"
|
| 1117 |
+
},
|
| 1118 |
+
"engines": {
|
| 1119 |
+
"node": ">= 0.4"
|
| 1120 |
+
},
|
| 1121 |
+
"funding": {
|
| 1122 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 1123 |
+
}
|
| 1124 |
+
},
|
| 1125 |
+
"node_modules/side-channel-map": {
|
| 1126 |
+
"version": "1.0.1",
|
| 1127 |
+
"resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
|
| 1128 |
+
"integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
|
| 1129 |
+
"license": "MIT",
|
| 1130 |
+
"dependencies": {
|
| 1131 |
+
"call-bound": "^1.0.2",
|
| 1132 |
+
"es-errors": "^1.3.0",
|
| 1133 |
+
"get-intrinsic": "^1.2.5",
|
| 1134 |
+
"object-inspect": "^1.13.3"
|
| 1135 |
+
},
|
| 1136 |
+
"engines": {
|
| 1137 |
+
"node": ">= 0.4"
|
| 1138 |
+
},
|
| 1139 |
+
"funding": {
|
| 1140 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 1141 |
+
}
|
| 1142 |
+
},
|
| 1143 |
+
"node_modules/side-channel-weakmap": {
|
| 1144 |
+
"version": "1.0.2",
|
| 1145 |
+
"resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
|
| 1146 |
+
"integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
|
| 1147 |
+
"license": "MIT",
|
| 1148 |
+
"dependencies": {
|
| 1149 |
+
"call-bound": "^1.0.2",
|
| 1150 |
+
"es-errors": "^1.3.0",
|
| 1151 |
+
"get-intrinsic": "^1.2.5",
|
| 1152 |
+
"object-inspect": "^1.13.3",
|
| 1153 |
+
"side-channel-map": "^1.0.1"
|
| 1154 |
+
},
|
| 1155 |
+
"engines": {
|
| 1156 |
+
"node": ">= 0.4"
|
| 1157 |
+
},
|
| 1158 |
+
"funding": {
|
| 1159 |
+
"url": "https://github.com/sponsors/ljharb"
|
| 1160 |
+
}
|
| 1161 |
+
},
|
| 1162 |
+
"node_modules/statuses": {
|
| 1163 |
+
"version": "2.0.2",
|
| 1164 |
+
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
|
| 1165 |
+
"integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
|
| 1166 |
+
"license": "MIT",
|
| 1167 |
+
"engines": {
|
| 1168 |
+
"node": ">= 0.8"
|
| 1169 |
+
}
|
| 1170 |
+
},
|
| 1171 |
+
"node_modules/toidentifier": {
|
| 1172 |
+
"version": "1.0.1",
|
| 1173 |
+
"resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
|
| 1174 |
+
"integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
|
| 1175 |
+
"license": "MIT",
|
| 1176 |
+
"engines": {
|
| 1177 |
+
"node": ">=0.6"
|
| 1178 |
+
}
|
| 1179 |
+
},
|
| 1180 |
+
"node_modules/type-is": {
|
| 1181 |
+
"version": "2.0.1",
|
| 1182 |
+
"resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz",
|
| 1183 |
+
"integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==",
|
| 1184 |
+
"license": "MIT",
|
| 1185 |
+
"dependencies": {
|
| 1186 |
+
"content-type": "^1.0.5",
|
| 1187 |
+
"media-typer": "^1.1.0",
|
| 1188 |
+
"mime-types": "^3.0.0"
|
| 1189 |
+
},
|
| 1190 |
+
"engines": {
|
| 1191 |
+
"node": ">= 0.6"
|
| 1192 |
+
}
|
| 1193 |
+
},
|
| 1194 |
+
"node_modules/undici": {
|
| 1195 |
+
"version": "6.24.1",
|
| 1196 |
+
"resolved": "https://registry.npmjs.org/undici/-/undici-6.24.1.tgz",
|
| 1197 |
+
"integrity": "sha512-sC+b0tB1whOCzbtlx20fx3WgCXwkW627p4EA9uM+/tNNPkSS+eSEld6pAs9nDv7WbY1UUljBMYPtu9BCOrCWKA==",
|
| 1198 |
+
"license": "MIT",
|
| 1199 |
+
"engines": {
|
| 1200 |
+
"node": ">=18.17"
|
| 1201 |
+
}
|
| 1202 |
+
},
|
| 1203 |
+
"node_modules/undici-types": {
|
| 1204 |
+
"version": "7.18.2",
|
| 1205 |
+
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz",
|
| 1206 |
+
"integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==",
|
| 1207 |
+
"license": "MIT"
|
| 1208 |
+
},
|
| 1209 |
+
"node_modules/unpipe": {
|
| 1210 |
+
"version": "1.0.0",
|
| 1211 |
+
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
|
| 1212 |
+
"integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
|
| 1213 |
+
"license": "MIT",
|
| 1214 |
+
"engines": {
|
| 1215 |
+
"node": ">= 0.8"
|
| 1216 |
+
}
|
| 1217 |
+
},
|
| 1218 |
+
"node_modules/vary": {
|
| 1219 |
+
"version": "1.1.2",
|
| 1220 |
+
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
|
| 1221 |
+
"integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
|
| 1222 |
+
"license": "MIT",
|
| 1223 |
+
"engines": {
|
| 1224 |
+
"node": ">= 0.8"
|
| 1225 |
+
}
|
| 1226 |
+
},
|
| 1227 |
+
"node_modules/which": {
|
| 1228 |
+
"version": "2.0.2",
|
| 1229 |
+
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
|
| 1230 |
+
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
|
| 1231 |
+
"license": "ISC",
|
| 1232 |
+
"dependencies": {
|
| 1233 |
+
"isexe": "^2.0.0"
|
| 1234 |
+
},
|
| 1235 |
+
"bin": {
|
| 1236 |
+
"node-which": "bin/node-which"
|
| 1237 |
+
},
|
| 1238 |
+
"engines": {
|
| 1239 |
+
"node": ">= 8"
|
| 1240 |
+
}
|
| 1241 |
+
},
|
| 1242 |
+
"node_modules/wrappy": {
|
| 1243 |
+
"version": "1.0.2",
|
| 1244 |
+
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
|
| 1245 |
+
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
|
| 1246 |
+
"license": "ISC"
|
| 1247 |
+
},
|
| 1248 |
+
"node_modules/zod": {
|
| 1249 |
+
"version": "4.3.6",
|
| 1250 |
+
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz",
|
| 1251 |
+
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
|
| 1252 |
+
"license": "MIT",
|
| 1253 |
+
"peer": true,
|
| 1254 |
+
"funding": {
|
| 1255 |
+
"url": "https://github.com/sponsors/colinhacks"
|
| 1256 |
+
}
|
| 1257 |
+
},
|
| 1258 |
+
"node_modules/zod-to-json-schema": {
|
| 1259 |
+
"version": "3.25.1",
|
| 1260 |
+
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.25.1.tgz",
|
| 1261 |
+
"integrity": "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==",
|
| 1262 |
+
"license": "ISC",
|
| 1263 |
+
"peerDependencies": {
|
| 1264 |
+
"zod": "^3.25 || ^4"
|
| 1265 |
+
}
|
| 1266 |
+
}
|
| 1267 |
+
}
|
| 1268 |
+
}
|
package.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"dependencies": {
|
| 3 |
+
"@upstash/context7-mcp": "^2.1.4"
|
| 4 |
+
}
|
| 5 |
+
}
|
pytest.ini
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[pytest]
|
| 2 |
+
addopts = -p no:tmpdir -p no:cacheprovider --ignore=tests/.tmp
|
| 3 |
+
testpaths = tests
|
| 4 |
+
norecursedirs = .tmp tmp* pytest_tmp node_modules frontend tmp_everything_claude_code
|
requirements.txt
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# API
|
| 2 |
+
fastapi>=0.111.0
|
| 3 |
+
uvicorn[standard]>=0.29.0
|
| 4 |
+
python-multipart>=0.0.9
|
| 5 |
+
aiofiles>=23.2.1
|
| 6 |
+
httpx>=0.27.0
|
| 7 |
+
pydantic>=2.7.0
|
| 8 |
+
python-dotenv>=1.0.1
|
| 9 |
+
|
| 10 |
+
# ML - fingerprint
|
| 11 |
+
transformers>=4.40.0
|
| 12 |
+
timm>=1.0.0
|
| 13 |
+
torch>=2.1.0
|
| 14 |
+
torchvision>=0.16.0
|
| 15 |
+
|
| 16 |
+
# ML - coherence
|
| 17 |
+
# facenet-pytorch currently has limited support on newer Python versions.
|
| 18 |
+
facenet-pytorch>=2.5.3; python_version < "3.13"
|
| 19 |
+
mediapipe>=0.10.14
|
| 20 |
+
opencv-python-headless>=4.9.0
|
| 21 |
+
|
| 22 |
+
# ML - sstgnn
|
| 23 |
+
torch-geometric>=2.5.0
|
| 24 |
+
scipy>=1.13.0
|
| 25 |
+
|
| 26 |
+
# Explainability - Gemini
|
| 27 |
+
google-generativeai>=0.8.0
|
| 28 |
+
|
| 29 |
+
# HuggingFace
|
| 30 |
+
huggingface-hub>=0.23.0
|
| 31 |
+
|
| 32 |
+
# RunPod serverless handler
|
| 33 |
+
runpod>=1.6.0
|
| 34 |
+
|
| 35 |
+
# Continual learning
|
| 36 |
+
apscheduler>=3.10.4
|
| 37 |
+
|
| 38 |
+
# Utils
|
| 39 |
+
Pillow>=10.3.0
|
| 40 |
+
numpy>=1.26.0; python_version < "3.13"
|
| 41 |
+
numpy>=2.0.0; python_version >= "3.13"
|
| 42 |
+
scikit-learn>=1.5.0
|
runpod_handler.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import base64
|
| 4 |
+
import io
|
| 5 |
+
import os
|
| 6 |
+
import tempfile
|
| 7 |
+
import time
|
| 8 |
+
|
| 9 |
+
from PIL import Image
|
| 10 |
+
|
| 11 |
+
os.environ.setdefault("MODEL_CACHE_DIR", "/tmp/models")
|
| 12 |
+
os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
|
| 13 |
+
|
| 14 |
+
from src.engines.coherence.engine import CoherenceEngine
|
| 15 |
+
from src.engines.fingerprint.engine import FingerprintEngine
|
| 16 |
+
from src.engines.sstgnn.engine import SSTGNNEngine
|
| 17 |
+
from src.explainability.explainer import explain
|
| 18 |
+
from src.fusion.fuser import fuse
|
| 19 |
+
|
| 20 |
+
_fp = FingerprintEngine()
|
| 21 |
+
_co = CoherenceEngine()
|
| 22 |
+
_st = SSTGNNEngine()
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _extract_frames(video_path: str) -> list:
|
| 26 |
+
try:
|
| 27 |
+
import cv2
|
| 28 |
+
except Exception:
|
| 29 |
+
return []
|
| 30 |
+
|
| 31 |
+
cap = cv2.VideoCapture(video_path)
|
| 32 |
+
frames = []
|
| 33 |
+
index = 0
|
| 34 |
+
while True:
|
| 35 |
+
ret, frame = cap.read()
|
| 36 |
+
if not ret:
|
| 37 |
+
break
|
| 38 |
+
if index % 4 == 0:
|
| 39 |
+
frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
| 40 |
+
index += 1
|
| 41 |
+
if len(frames) >= 300:
|
| 42 |
+
break
|
| 43 |
+
cap.release()
|
| 44 |
+
return frames
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def handler(job: dict) -> dict:
|
| 48 |
+
inp = job.get("input", {})
|
| 49 |
+
encoded = inp.get("data") or inp.get("image_b64")
|
| 50 |
+
if not encoded:
|
| 51 |
+
raise ValueError("Missing input.data (base64 payload)")
|
| 52 |
+
|
| 53 |
+
raw = base64.b64decode(encoded)
|
| 54 |
+
media_type = str(inp.get("media_type", "image")).lower()
|
| 55 |
+
|
| 56 |
+
t0 = time.perf_counter()
|
| 57 |
+
|
| 58 |
+
if media_type == "image":
|
| 59 |
+
image = Image.open(io.BytesIO(raw)).convert("RGB")
|
| 60 |
+
fp = _fp.run(image)
|
| 61 |
+
co = _co.run(image)
|
| 62 |
+
st = _st.run(image)
|
| 63 |
+
verdict, conf, generator = fuse([fp, co, st], is_video=False)
|
| 64 |
+
else:
|
| 65 |
+
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp:
|
| 66 |
+
temp.write(raw)
|
| 67 |
+
tmp_path = temp.name
|
| 68 |
+
|
| 69 |
+
try:
|
| 70 |
+
frames = _extract_frames(tmp_path)
|
| 71 |
+
finally:
|
| 72 |
+
os.unlink(tmp_path)
|
| 73 |
+
|
| 74 |
+
fp = _fp.run_video(frames)
|
| 75 |
+
co = _co.run_video(frames)
|
| 76 |
+
st = _st.run_video(frames)
|
| 77 |
+
verdict, conf, generator = fuse([fp, co, st], is_video=True)
|
| 78 |
+
|
| 79 |
+
engine_results = [fp, co, st]
|
| 80 |
+
explanation = explain(verdict, conf, engine_results, generator)
|
| 81 |
+
total_ms = (time.perf_counter() - t0) * 1000
|
| 82 |
+
|
| 83 |
+
return {
|
| 84 |
+
"verdict": verdict,
|
| 85 |
+
"confidence": conf,
|
| 86 |
+
"attributed_generator": generator,
|
| 87 |
+
"explanation": explanation,
|
| 88 |
+
"processing_time_ms": total_ms,
|
| 89 |
+
"engine_breakdown": [result.model_dump() for result in engine_results],
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
import runpod # type: ignore
|
| 95 |
+
except Exception:
|
| 96 |
+
runpod = None
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
if runpod is not None:
|
| 100 |
+
runpod.serverless.start({"handler": handler})
|
scripts/__init__.py
ADDED
|
File without changes
|
scripts/assemble_image_baseline.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scripts/assemble_image_baseline.py
|
| 3 |
+
|
| 4 |
+
Assembles the image baseline dataset manifest from multiple prepared source
|
| 5 |
+
directories. Delegates to build_dataset.py with the correct Kaggle input paths.
|
| 6 |
+
|
| 7 |
+
Kaggle usage:
|
| 8 |
+
!python scripts/assemble_image_baseline.py \
|
| 9 |
+
--faces_140k /kaggle/input/140k-real-and-fake-faces \
|
| 10 |
+
--ai_vs_real /kaggle/input/ai-generated-vs-real-images-datasaet \
|
| 11 |
+
--output_dir /kaggle/working/processed/fingerprint
|
| 12 |
+
"""
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import sys
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
|
| 19 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 20 |
+
from training.phase1_fingerprint.build_dataset import build, parse_args as _parse
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def main() -> None:
|
| 24 |
+
p = argparse.ArgumentParser(description="Assemble image baseline dataset (Kaggle)")
|
| 25 |
+
p.add_argument("--faces_140k", default=None)
|
| 26 |
+
p.add_argument("--ai_vs_real", default=None)
|
| 27 |
+
p.add_argument("--deepfake_real", default=None)
|
| 28 |
+
p.add_argument("--deepfake_faces",default=None)
|
| 29 |
+
p.add_argument("--celebdf", default=None)
|
| 30 |
+
p.add_argument("--output_dir", default="/kaggle/working/processed/fingerprint")
|
| 31 |
+
p.add_argument("--seed", type=int, default=42)
|
| 32 |
+
args = p.parse_args()
|
| 33 |
+
build(args)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if __name__ == "__main__":
|
| 37 |
+
main()
|
scripts/bootstrap_kaggle_dataset.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scripts/bootstrap_kaggle_dataset.py
|
| 3 |
+
|
| 4 |
+
Uploads processed dataset to Kaggle as a private dataset so it can be
|
| 5 |
+
reused across notebook sessions without re-running preprocessing.
|
| 6 |
+
|
| 7 |
+
Usage (run locally after downloading processed data from Kaggle):
|
| 8 |
+
python scripts/bootstrap_kaggle_dataset.py \
|
| 9 |
+
--processed_dir /path/to/processed/fingerprint \
|
| 10 |
+
--dataset_name deepdetect-fingerprint-processed \
|
| 11 |
+
--title "DeepDetect Fingerprint Processed Dataset"
|
| 12 |
+
"""
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import json
|
| 17 |
+
import os
|
| 18 |
+
import subprocess
|
| 19 |
+
import sys
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def main(args: argparse.Namespace) -> None:
|
| 24 |
+
processed_dir = Path(args.processed_dir)
|
| 25 |
+
dataset_name = args.dataset_name
|
| 26 |
+
kaggle_username = args.username or os.environ.get("KAGGLE_USERNAME")
|
| 27 |
+
|
| 28 |
+
if not kaggle_username:
|
| 29 |
+
print("ERROR: Kaggle username required. Set --username or KAGGLE_USERNAME env var.")
|
| 30 |
+
sys.exit(1)
|
| 31 |
+
|
| 32 |
+
if not processed_dir.exists():
|
| 33 |
+
print(f"ERROR: Directory not found: {processed_dir}")
|
| 34 |
+
sys.exit(1)
|
| 35 |
+
|
| 36 |
+
# Write dataset-metadata.json
|
| 37 |
+
meta = {
|
| 38 |
+
"title": args.title,
|
| 39 |
+
"id": f"{kaggle_username}/{dataset_name}",
|
| 40 |
+
"licenses": [{"name": "CC0-1.0"}],
|
| 41 |
+
}
|
| 42 |
+
meta_path = processed_dir / "dataset-metadata.json"
|
| 43 |
+
with open(meta_path, "w") as f:
|
| 44 |
+
json.dump(meta, f, indent=2)
|
| 45 |
+
|
| 46 |
+
print(f"Creating Kaggle dataset: {kaggle_username}/{dataset_name}")
|
| 47 |
+
result = subprocess.run(
|
| 48 |
+
["kaggle", "datasets", "create", "-p", str(processed_dir), "--dir-mode", "zip"],
|
| 49 |
+
capture_output=True, text=True,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
if result.returncode == 0:
|
| 53 |
+
print(f"Dataset created: https://www.kaggle.com/datasets/{kaggle_username}/{dataset_name}")
|
| 54 |
+
else:
|
| 55 |
+
print(f"STDOUT: {result.stdout}")
|
| 56 |
+
print(f"STDERR: {result.stderr}")
|
| 57 |
+
print("If dataset already exists, use 'kaggle datasets version' to update it.")
|
| 58 |
+
|
| 59 |
+
meta_path.unlink() # clean up
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def parse_args():
|
| 63 |
+
p = argparse.ArgumentParser(description="Upload processed dataset to Kaggle")
|
| 64 |
+
p.add_argument("--processed_dir", required=True)
|
| 65 |
+
p.add_argument("--dataset_name", required=True)
|
| 66 |
+
p.add_argument("--title", default="DeepDetect Processed Dataset")
|
| 67 |
+
p.add_argument("--username", default=None)
|
| 68 |
+
return p.parse_args()
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
if __name__ == "__main__":
|
| 72 |
+
main(parse_args())
|
scripts/demo_smoke_test.py
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scripts/demo_smoke_test.py
|
| 3 |
+
|
| 4 |
+
Prepare demo sample media and run one-command smoke tests against the running API.
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
python scripts/demo_smoke_test.py
|
| 8 |
+
python scripts/demo_smoke_test.py --base-url http://localhost:8000
|
| 9 |
+
|
| 10 |
+
Environment:
|
| 11 |
+
DEMO_API_BASE_URL overrides default base URL.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import argparse
|
| 17 |
+
import logging
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
from typing import Any, cast
|
| 22 |
+
|
| 23 |
+
import httpx
|
| 24 |
+
import numpy as np
|
| 25 |
+
from PIL import Image, ImageDraw
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
LOG_FORMAT = "%(levelname)s: %(message)s"
|
| 29 |
+
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
|
| 30 |
+
log = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
SAMPLE_DIR = Path("data/samples")
|
| 34 |
+
REQUIRED_RESPONSE_KEYS = {
|
| 35 |
+
"verdict",
|
| 36 |
+
"confidence",
|
| 37 |
+
"attributed_generator",
|
| 38 |
+
"explanation",
|
| 39 |
+
"processing_time_ms",
|
| 40 |
+
"engine_breakdown",
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _download_file(url: str, output: Path) -> bool:
|
| 45 |
+
try:
|
| 46 |
+
with httpx.Client(timeout=20.0, follow_redirects=True) as client:
|
| 47 |
+
resp = client.get(url)
|
| 48 |
+
resp.raise_for_status()
|
| 49 |
+
output.write_bytes(resp.content)
|
| 50 |
+
return True
|
| 51 |
+
except Exception as exc:
|
| 52 |
+
log.warning("Could not download %s (%s)", url, exc)
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _make_fallback_fake_image(fake_img: Path) -> None:
|
| 57 |
+
# Synthetic-looking sample (deterministic).
|
| 58 |
+
fake = Image.new("RGB", (512, 512), (12, 12, 12))
|
| 59 |
+
draw_fake = ImageDraw.Draw(fake)
|
| 60 |
+
for i in range(0, 512, 16):
|
| 61 |
+
color = (40 + (i % 120), 40, 120 + (i % 80))
|
| 62 |
+
draw_fake.rectangle([i, 0, i + 8, 511], fill=color)
|
| 63 |
+
draw_fake.ellipse([140, 110, 380, 350], outline=(220, 220, 220), width=4)
|
| 64 |
+
draw_fake.text((168, 380), "SYNTHETIC SAMPLE", fill=(220, 220, 220))
|
| 65 |
+
fake.save(fake_img, format="JPEG", quality=95)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _make_fallback_real_image(real_img: Path) -> None:
|
| 69 |
+
# Natural-looking simple fallback when a real photo cannot be downloaded.
|
| 70 |
+
arr = np.zeros((512, 512, 3), dtype=np.uint8)
|
| 71 |
+
for y in range(512):
|
| 72 |
+
arr[y, :, 0] = np.uint8(200 - y // 4)
|
| 73 |
+
arr[y, :, 1] = np.uint8(160 - y // 6)
|
| 74 |
+
arr[y, :, 2] = np.uint8(140 - y // 8)
|
| 75 |
+
real = Image.fromarray(arr, mode="RGB")
|
| 76 |
+
draw_real = ImageDraw.Draw(real)
|
| 77 |
+
draw_real.rectangle([56, 280, 456, 508], outline=(245, 245, 245), width=2)
|
| 78 |
+
draw_real.text((70, 468), "REAL-LIKE SAMPLE", fill=(245, 245, 245))
|
| 79 |
+
real.save(real_img, format="JPEG", quality=95)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def _prepare_images(
|
| 83 |
+
sample_dir: Path,
|
| 84 |
+
refresh: bool = False,
|
| 85 |
+
use_web_samples: bool = False,
|
| 86 |
+
) -> tuple[Path, Path]:
|
| 87 |
+
sample_dir.mkdir(parents=True, exist_ok=True)
|
| 88 |
+
fake_img = sample_dir / "fake_1.jpg"
|
| 89 |
+
real_img = sample_dir / "real_1.jpg"
|
| 90 |
+
|
| 91 |
+
if not refresh and fake_img.exists() and real_img.exists():
|
| 92 |
+
return fake_img, real_img
|
| 93 |
+
|
| 94 |
+
fake_ok = False
|
| 95 |
+
if use_web_samples:
|
| 96 |
+
fake_ok = _download_file("https://thispersondoesnotexist.com", fake_img)
|
| 97 |
+
if not fake_ok:
|
| 98 |
+
_make_fallback_fake_image(fake_img)
|
| 99 |
+
|
| 100 |
+
real_ok = _download_file(
|
| 101 |
+
"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?w=512",
|
| 102 |
+
real_img,
|
| 103 |
+
)
|
| 104 |
+
if not real_ok:
|
| 105 |
+
log.info("Could not download real sample image; using generated fallback real image.")
|
| 106 |
+
_make_fallback_real_image(real_img)
|
| 107 |
+
|
| 108 |
+
return fake_img, real_img
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _prepare_video(sample_dir: Path) -> Path:
|
| 112 |
+
import cv2
|
| 113 |
+
|
| 114 |
+
sample_dir.mkdir(parents=True, exist_ok=True)
|
| 115 |
+
video_path = sample_dir / "demo_1.avi"
|
| 116 |
+
if video_path.exists() and video_path.stat().st_size > 0:
|
| 117 |
+
return video_path
|
| 118 |
+
|
| 119 |
+
width, height = 512, 512
|
| 120 |
+
fps = 10.0
|
| 121 |
+
frames = 40
|
| 122 |
+
|
| 123 |
+
fourcc_fn = cast(Any, getattr(cv2, "VideoWriter_fourcc", None))
|
| 124 |
+
if not callable(fourcc_fn):
|
| 125 |
+
raise RuntimeError("OpenCV VideoWriter_fourcc is unavailable")
|
| 126 |
+
fourcc_xvid = cast(int, fourcc_fn(*"XVID"))
|
| 127 |
+
fourcc_mjpg = cast(int, fourcc_fn(*"MJPG"))
|
| 128 |
+
|
| 129 |
+
writer = cv2.VideoWriter(
|
| 130 |
+
str(video_path), fourcc_xvid, fps, (width, height)
|
| 131 |
+
)
|
| 132 |
+
if not writer.isOpened():
|
| 133 |
+
writer = cv2.VideoWriter(
|
| 134 |
+
str(video_path), fourcc_mjpg, fps, (width, height)
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
if not writer.isOpened():
|
| 138 |
+
raise RuntimeError("Could not initialize AVI video writer")
|
| 139 |
+
|
| 140 |
+
for i in range(frames):
|
| 141 |
+
frame = np.zeros((height, width, 3), dtype=np.uint8)
|
| 142 |
+
# Background gradient animation
|
| 143 |
+
frame[:, :, 0] = np.uint8((40 + i * 3) % 255)
|
| 144 |
+
frame[:, :, 1] = np.uint8((20 + i * 2) % 255)
|
| 145 |
+
frame[:, :, 2] = np.uint8((60 + i * 4) % 255)
|
| 146 |
+
|
| 147 |
+
# Moving face-like circle + eyes/mouth
|
| 148 |
+
cx = int(120 + (i * 8) % 280)
|
| 149 |
+
cy = 220
|
| 150 |
+
cv2.circle(frame, (cx, cy), 70, (220, 220, 220), 2)
|
| 151 |
+
cv2.circle(frame, (cx - 24, cy - 12), 6, (230, 230, 230), -1)
|
| 152 |
+
cv2.circle(frame, (cx + 24, cy - 12), 6, (230, 230, 230), -1)
|
| 153 |
+
cv2.ellipse(frame, (cx, cy + 20), (24, 12), 0, 0, 180, (230, 230, 230), 2)
|
| 154 |
+
|
| 155 |
+
cv2.putText(
|
| 156 |
+
frame,
|
| 157 |
+
"DEMO VIDEO SAMPLE",
|
| 158 |
+
(110, 470),
|
| 159 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 160 |
+
0.8,
|
| 161 |
+
(245, 245, 245),
|
| 162 |
+
2,
|
| 163 |
+
cv2.LINE_AA,
|
| 164 |
+
)
|
| 165 |
+
writer.write(frame)
|
| 166 |
+
|
| 167 |
+
writer.release()
|
| 168 |
+
|
| 169 |
+
if not video_path.exists() or video_path.stat().st_size == 0:
|
| 170 |
+
raise RuntimeError("Video file was not written")
|
| 171 |
+
|
| 172 |
+
return video_path
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def _validate_response(payload: dict) -> None:
|
| 176 |
+
missing = REQUIRED_RESPONSE_KEYS - set(payload.keys())
|
| 177 |
+
if missing:
|
| 178 |
+
raise ValueError(f"Response missing keys: {sorted(missing)}")
|
| 179 |
+
|
| 180 |
+
verdict = payload.get("verdict")
|
| 181 |
+
confidence = payload.get("confidence")
|
| 182 |
+
|
| 183 |
+
if verdict not in {"FAKE", "REAL"}:
|
| 184 |
+
raise ValueError(f"Invalid verdict: {verdict}")
|
| 185 |
+
if not isinstance(confidence, (int, float)):
|
| 186 |
+
raise ValueError("confidence is not numeric")
|
| 187 |
+
if confidence < 0.0 or confidence > 1.0:
|
| 188 |
+
raise ValueError(f"confidence out of range: {confidence}")
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _post_file(base_url: str, endpoint: str, path: Path, content_type: str) -> dict:
|
| 192 |
+
with httpx.Client(timeout=60.0) as client:
|
| 193 |
+
with path.open("rb") as fh:
|
| 194 |
+
files = {"file": (path.name, fh, content_type)}
|
| 195 |
+
resp = client.post(f"{base_url}{endpoint}", files=files)
|
| 196 |
+
|
| 197 |
+
if resp.status_code != 200:
|
| 198 |
+
raise RuntimeError(
|
| 199 |
+
f"{endpoint} returned {resp.status_code}: {resp.text[:300]}"
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
payload = resp.json()
|
| 203 |
+
_validate_response(payload)
|
| 204 |
+
return payload
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _check_health(base_url: str) -> None:
|
| 208 |
+
with httpx.Client(timeout=15.0) as client:
|
| 209 |
+
resp = client.get(f"{base_url}/health")
|
| 210 |
+
resp.raise_for_status()
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def run_smoke(
|
| 214 |
+
base_url: str,
|
| 215 |
+
refresh_samples: bool = False,
|
| 216 |
+
use_web_samples: bool = False,
|
| 217 |
+
) -> int:
|
| 218 |
+
base_url = base_url.rstrip("/")
|
| 219 |
+
log.info("Using API base URL: %s", base_url)
|
| 220 |
+
|
| 221 |
+
_check_health(base_url)
|
| 222 |
+
log.info("Health check OK")
|
| 223 |
+
|
| 224 |
+
fake_img, real_img = _prepare_images(
|
| 225 |
+
SAMPLE_DIR,
|
| 226 |
+
refresh=refresh_samples,
|
| 227 |
+
use_web_samples=use_web_samples,
|
| 228 |
+
)
|
| 229 |
+
video = _prepare_video(SAMPLE_DIR)
|
| 230 |
+
|
| 231 |
+
image_fake = _post_file(base_url, "/detect/image", fake_img, "image/jpeg")
|
| 232 |
+
image_real = _post_file(base_url, "/detect/image", real_img, "image/jpeg")
|
| 233 |
+
video_res = _post_file(base_url, "/detect/video", video, "video/x-msvideo")
|
| 234 |
+
|
| 235 |
+
log.info(
|
| 236 |
+
"Image test 1 (%s): verdict=%s confidence=%.3f generator=%s",
|
| 237 |
+
fake_img.name,
|
| 238 |
+
image_fake["verdict"],
|
| 239 |
+
float(image_fake["confidence"]),
|
| 240 |
+
image_fake["attributed_generator"],
|
| 241 |
+
)
|
| 242 |
+
log.info(
|
| 243 |
+
"Image test 2 (%s): verdict=%s confidence=%.3f generator=%s",
|
| 244 |
+
real_img.name,
|
| 245 |
+
image_real["verdict"],
|
| 246 |
+
float(image_real["confidence"]),
|
| 247 |
+
image_real["attributed_generator"],
|
| 248 |
+
)
|
| 249 |
+
log.info(
|
| 250 |
+
"Video test (%s): verdict=%s confidence=%.3f generator=%s",
|
| 251 |
+
video.name,
|
| 252 |
+
video_res["verdict"],
|
| 253 |
+
float(video_res["confidence"]),
|
| 254 |
+
video_res["attributed_generator"],
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
log.info("Smoke test PASSED")
|
| 258 |
+
return 0
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def parse_args() -> argparse.Namespace:
|
| 262 |
+
parser = argparse.ArgumentParser(
|
| 263 |
+
description="Prepare demo media and smoke-test image/video endpoints"
|
| 264 |
+
)
|
| 265 |
+
parser.add_argument(
|
| 266 |
+
"--base-url",
|
| 267 |
+
default=os.environ.get("DEMO_API_BASE_URL", "http://localhost:8000"),
|
| 268 |
+
help="API base URL (default: %(default)s)",
|
| 269 |
+
)
|
| 270 |
+
parser.add_argument(
|
| 271 |
+
"--refresh-samples",
|
| 272 |
+
action="store_true",
|
| 273 |
+
help="Re-download/regenerate demo samples instead of reusing existing files",
|
| 274 |
+
)
|
| 275 |
+
parser.add_argument(
|
| 276 |
+
"--use-web-samples",
|
| 277 |
+
action="store_true",
|
| 278 |
+
help="Use downloaded web sample for the fake image (less deterministic)",
|
| 279 |
+
)
|
| 280 |
+
return parser.parse_args()
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def main() -> int:
|
| 284 |
+
args = parse_args()
|
| 285 |
+
base_url = args.base_url
|
| 286 |
+
try:
|
| 287 |
+
return run_smoke(
|
| 288 |
+
base_url,
|
| 289 |
+
refresh_samples=args.refresh_samples,
|
| 290 |
+
use_web_samples=args.use_web_samples,
|
| 291 |
+
)
|
| 292 |
+
except Exception as exc:
|
| 293 |
+
log.error("Smoke test FAILED: %s", exc)
|
| 294 |
+
return 1
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
if __name__ == "__main__":
|
| 298 |
+
raise SystemExit(main())
|
scripts/kaggle_train_image_baseline.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scripts/kaggle_train_image_baseline.py
|
| 3 |
+
|
| 4 |
+
Self-contained Kaggle training script.
|
| 5 |
+
Paste directly into a Kaggle notebook code cell and run.
|
| 6 |
+
No separate data prep step needed — builds the dataset inline from Kaggle input mounts.
|
| 7 |
+
|
| 8 |
+
Datasets needed (add via Notebook → Add Data):
|
| 9 |
+
- xhlulu/140k-real-and-fake-faces
|
| 10 |
+
- philosopher0/ai-generated-vs-real-images-datasaet
|
| 11 |
+
|
| 12 |
+
Accelerator: P100 (16GB VRAM)
|
| 13 |
+
"""
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import os, sys, logging, random, time, csv, shutil, hashlib
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
from torch.cuda.amp import GradScaler, autocast
|
| 23 |
+
from torch.utils.data import DataLoader, Dataset, WeightedRandomSampler
|
| 24 |
+
from torchvision import transforms
|
| 25 |
+
from PIL import Image
|
| 26 |
+
from sklearn.metrics import roc_auc_score, accuracy_score
|
| 27 |
+
from tqdm import tqdm
|
| 28 |
+
|
| 29 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
| 30 |
+
log = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
# ── Configuration — edit these ────────────────────────────────────────────────
|
| 33 |
+
|
| 34 |
+
CFG = {
|
| 35 |
+
"faces_140k": "/kaggle/input/140k-real-and-fake-faces",
|
| 36 |
+
"ai_vs_real": "/kaggle/input/ai-generated-vs-real-images-datasaet",
|
| 37 |
+
"processed": "/kaggle/working/processed/fingerprint",
|
| 38 |
+
"output": "/kaggle/working/checkpoints/fingerprint",
|
| 39 |
+
"epochs": 5, # 5 for demo (~55 min), 30 for full run (~6h)
|
| 40 |
+
"batch_size": 64,
|
| 41 |
+
"lr": 2e-5,
|
| 42 |
+
"amp": True,
|
| 43 |
+
"seed": 42,
|
| 44 |
+
"workers": 2,
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
torch.manual_seed(CFG["seed"])
|
| 48 |
+
np.random.seed(CFG["seed"])
|
| 49 |
+
random.seed(CFG["seed"])
|
| 50 |
+
|
| 51 |
+
Path(CFG["processed"]).mkdir(parents=True, exist_ok=True)
|
| 52 |
+
Path(CFG["output"]).mkdir(parents=True, exist_ok=True)
|
| 53 |
+
|
| 54 |
+
# ── GPU check ────────────────────────────────────────────────────────────────
|
| 55 |
+
|
| 56 |
+
print(f"PyTorch {torch.__version__} | CUDA {torch.cuda.is_available()}")
|
| 57 |
+
if torch.cuda.is_available():
|
| 58 |
+
print(f"GPU: {torch.cuda.get_device_name(0)}")
|
| 59 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 60 |
+
|
| 61 |
+
# ── Dataset build ─────────────────────────────────────────────────────────────
|
| 62 |
+
|
| 63 |
+
IMG_EXTS = {".jpg", ".jpeg", ".png", ".webp"}
|
| 64 |
+
|
| 65 |
+
def collect_images(root, label_str):
|
| 66 |
+
records = []
|
| 67 |
+
for p in Path(root).rglob("*"):
|
| 68 |
+
if p.suffix.lower() in IMG_EXTS:
|
| 69 |
+
records.append((p, label_str))
|
| 70 |
+
return records
|
| 71 |
+
|
| 72 |
+
def build_dataset():
|
| 73 |
+
log.info("Building fingerprint dataset ...")
|
| 74 |
+
all_records = []
|
| 75 |
+
|
| 76 |
+
# 140k faces: has train/real and train/fake folders
|
| 77 |
+
if Path(CFG["faces_140k"]).exists():
|
| 78 |
+
for split in ["train", "valid", ""]:
|
| 79 |
+
for label in ["real", "fake"]:
|
| 80 |
+
d = Path(CFG["faces_140k"]) / split / label if split else Path(CFG["faces_140k"]) / label
|
| 81 |
+
if d.exists():
|
| 82 |
+
imgs = collect_images(d, label)
|
| 83 |
+
all_records.extend(imgs)
|
| 84 |
+
log.info(f" 140k/{split or 'root'}/{label}: {len(imgs)}")
|
| 85 |
+
|
| 86 |
+
# AI vs real: subfolder name = generator
|
| 87 |
+
if Path(CFG["ai_vs_real"]).exists():
|
| 88 |
+
for sub in Path(CFG["ai_vs_real"]).iterdir():
|
| 89 |
+
if not sub.is_dir(): continue
|
| 90 |
+
label = "real" if "real" in sub.name.lower() else "fake"
|
| 91 |
+
imgs = collect_images(sub, label)
|
| 92 |
+
all_records.extend(imgs)
|
| 93 |
+
log.info(f" ai_vs_real/{sub.name} → {label}: {len(imgs)}")
|
| 94 |
+
|
| 95 |
+
random.shuffle(all_records)
|
| 96 |
+
n = len(all_records)
|
| 97 |
+
n_train = int(n * 0.80)
|
| 98 |
+
n_val = int(n * 0.10)
|
| 99 |
+
splits = {
|
| 100 |
+
"train": all_records[:n_train],
|
| 101 |
+
"val": all_records[n_train:n_train+n_val],
|
| 102 |
+
"test": all_records[n_train+n_val:],
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
seen = set()
|
| 106 |
+
for split, records in splits.items():
|
| 107 |
+
for src, label in tqdm(records, desc=f"Copying {split}"):
|
| 108 |
+
h = hashlib.md5(src.read_bytes()).hexdigest()[:8]
|
| 109 |
+
dst = Path(CFG["processed"]) / split / label / f"{h}_{src.name}"
|
| 110 |
+
if h not in seen and not dst.exists():
|
| 111 |
+
dst.parent.mkdir(parents=True, exist_ok=True)
|
| 112 |
+
shutil.copy2(src, dst)
|
| 113 |
+
seen.add(h)
|
| 114 |
+
|
| 115 |
+
for split in ["train", "val"]:
|
| 116 |
+
for label in ["real", "fake"]:
|
| 117 |
+
count = len(list((Path(CFG["processed"]) / split / label).glob("*")))
|
| 118 |
+
log.info(f" {split}/{label}: {count}")
|
| 119 |
+
|
| 120 |
+
build_dataset()
|
| 121 |
+
|
| 122 |
+
# ── Transforms ────────────────────────────────────────────────────────────────
|
| 123 |
+
|
| 124 |
+
MEAN, STD = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
|
| 125 |
+
TRAIN_TF = transforms.Compose([
|
| 126 |
+
transforms.RandomResizedCrop(224, scale=(0.8, 1.0)),
|
| 127 |
+
transforms.RandomHorizontalFlip(),
|
| 128 |
+
transforms.ColorJitter(0.2, 0.2, 0.2, 0.1),
|
| 129 |
+
transforms.ToTensor(),
|
| 130 |
+
transforms.Normalize(MEAN, STD),
|
| 131 |
+
])
|
| 132 |
+
VAL_TF = transforms.Compose([
|
| 133 |
+
transforms.Resize(256), transforms.CenterCrop(224),
|
| 134 |
+
transforms.ToTensor(), transforms.Normalize(MEAN, STD),
|
| 135 |
+
])
|
| 136 |
+
|
| 137 |
+
# ── Dataset class ──────────────────────────────────────────────────────────────
|
| 138 |
+
|
| 139 |
+
class ImgDataset(Dataset):
|
| 140 |
+
def __init__(self, root, split, transform):
|
| 141 |
+
self.transform = transform
|
| 142 |
+
self.samples = []
|
| 143 |
+
for label, is_fake in [("real", 0), ("fake", 1)]:
|
| 144 |
+
d = Path(root) / split / label
|
| 145 |
+
if not d.exists(): continue
|
| 146 |
+
for p in d.iterdir():
|
| 147 |
+
if p.suffix.lower() in IMG_EXTS:
|
| 148 |
+
self.samples.append((p, is_fake))
|
| 149 |
+
log.info(f"Dataset [{split}]: {len(self.samples)} images")
|
| 150 |
+
|
| 151 |
+
def __len__(self): return len(self.samples)
|
| 152 |
+
def __getitem__(self, i):
|
| 153 |
+
p, label = self.samples[i]
|
| 154 |
+
img = Image.open(p).convert("RGB")
|
| 155 |
+
return self.transform(img), label
|
| 156 |
+
|
| 157 |
+
train_ds = ImgDataset(CFG["processed"], "train", TRAIN_TF)
|
| 158 |
+
val_ds = ImgDataset(CFG["processed"], "val", VAL_TF)
|
| 159 |
+
labels = [s[1] for s in train_ds.samples]
|
| 160 |
+
n_real = labels.count(0); n_fake = labels.count(1)
|
| 161 |
+
weights = [1.0/n_real if l==0 else 1.0/n_fake for l in labels]
|
| 162 |
+
sampler = WeightedRandomSampler(weights, len(weights), replacement=True)
|
| 163 |
+
train_dl = DataLoader(train_ds, batch_size=CFG["batch_size"], sampler=sampler,
|
| 164 |
+
num_workers=CFG["workers"], pin_memory=True)
|
| 165 |
+
val_dl = DataLoader(val_ds, batch_size=CFG["batch_size"]*2, shuffle=False,
|
| 166 |
+
num_workers=CFG["workers"], pin_memory=True)
|
| 167 |
+
|
| 168 |
+
# ── Model ─────────────────────────────────────────────────────────────────────
|
| 169 |
+
|
| 170 |
+
import timm
|
| 171 |
+
class FingerprintModel(nn.Module):
|
| 172 |
+
def __init__(self):
|
| 173 |
+
super().__init__()
|
| 174 |
+
self.backbone = timm.create_model("vit_base_patch16_224", pretrained=True,
|
| 175 |
+
num_classes=0, global_pool="token")
|
| 176 |
+
d = self.backbone.num_features
|
| 177 |
+
self.binary_head = nn.Sequential(nn.Linear(d, 384), nn.GELU(), nn.Dropout(0.1), nn.Linear(384, 2))
|
| 178 |
+
self.generator_head = nn.Sequential(nn.Linear(d, 384), nn.GELU(), nn.Dropout(0.1), nn.Linear(384, 8))
|
| 179 |
+
|
| 180 |
+
def forward(self, x):
|
| 181 |
+
f = self.backbone(x)
|
| 182 |
+
return {"binary_logits": self.binary_head(f), "generator_logits": self.generator_head(f)}
|
| 183 |
+
|
| 184 |
+
model = FingerprintModel().to(device)
|
| 185 |
+
cw = torch.tensor([1.0/n_real, 1.0/n_fake], dtype=torch.float32).to(device)
|
| 186 |
+
criterion = nn.CrossEntropyLoss(weight=cw, label_smoothing=0.05)
|
| 187 |
+
optimizer = torch.optim.AdamW(model.parameters(), lr=CFG["lr"], weight_decay=0.01)
|
| 188 |
+
scaler = GradScaler(enabled=CFG["amp"])
|
| 189 |
+
|
| 190 |
+
# ── Training loop ─────────────────────────────────────────────────────────────
|
| 191 |
+
|
| 192 |
+
best_auc = 0.0
|
| 193 |
+
for epoch in range(CFG["epochs"]):
|
| 194 |
+
model.train()
|
| 195 |
+
t0 = time.time()
|
| 196 |
+
for imgs, labels_b in tqdm(train_dl, desc=f"Train E{epoch+1}", leave=False):
|
| 197 |
+
imgs, labels_b = imgs.to(device), labels_b.to(device)
|
| 198 |
+
with autocast(enabled=CFG["amp"]):
|
| 199 |
+
out = model(imgs)
|
| 200 |
+
loss = criterion(out["binary_logits"], labels_b)
|
| 201 |
+
scaler.scale(loss).backward()
|
| 202 |
+
scaler.unscale_(optimizer)
|
| 203 |
+
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
|
| 204 |
+
scaler.step(optimizer); scaler.update(); optimizer.zero_grad()
|
| 205 |
+
|
| 206 |
+
# Validate
|
| 207 |
+
model.eval()
|
| 208 |
+
all_probs, all_labels = [], []
|
| 209 |
+
with torch.no_grad():
|
| 210 |
+
for imgs, labels_b in tqdm(val_dl, desc="Val", leave=False):
|
| 211 |
+
imgs = imgs.to(device)
|
| 212 |
+
out = model(imgs)
|
| 213 |
+
probs = torch.softmax(out["binary_logits"], 1)[:, 1].cpu().numpy()
|
| 214 |
+
all_probs.extend(probs); all_labels.extend(labels_b.numpy())
|
| 215 |
+
|
| 216 |
+
auc = roc_auc_score(all_labels, all_probs) if len(set(all_labels)) > 1 else 0.5
|
| 217 |
+
acc = accuracy_score(all_labels, (np.array(all_probs) > 0.5).astype(int))
|
| 218 |
+
elapsed = time.time() - t0
|
| 219 |
+
log.info(f"Epoch {epoch+1}/{CFG['epochs']} | val_auc={auc:.4f} | val_acc={acc:.4f} | {elapsed:.0f}s")
|
| 220 |
+
|
| 221 |
+
if auc > best_auc:
|
| 222 |
+
best_auc = auc
|
| 223 |
+
ckpt = {
|
| 224 |
+
"epoch": epoch, "model_state": model.state_dict(),
|
| 225 |
+
"val_auc": auc, "config": CFG,
|
| 226 |
+
}
|
| 227 |
+
torch.save(ckpt, Path(CFG["output"]) / "best.pt")
|
| 228 |
+
log.info(f" Saved best.pt (AUC={best_auc:.4f})")
|
| 229 |
+
|
| 230 |
+
torch.save({"epoch": epoch, "model_state": model.state_dict(),
|
| 231 |
+
"val_auc": auc, "config": CFG},
|
| 232 |
+
Path(CFG["output"]) / "latest.pt")
|
| 233 |
+
|
| 234 |
+
print(f"\nTraining complete. Best val AUC: {best_auc:.4f}")
|
| 235 |
+
print(f"Download: /kaggle/working/checkpoints/fingerprint/best.pt")
|
scripts/prepare_cifake.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scripts/prepare_cifake.py
|
| 3 |
+
|
| 4 |
+
Prepares the CIFAKE dataset (Kaggle: bird-coder/cifake-real-and-ai-generated-synthetic-images)
|
| 5 |
+
for use in the fingerprint engine training pipeline.
|
| 6 |
+
|
| 7 |
+
CIFAKE contains 60k real images (CIFAR-10) and 60k AI-generated equivalents.
|
| 8 |
+
Useful as extra training data for the fingerprint engine.
|
| 9 |
+
|
| 10 |
+
Kaggle usage:
|
| 11 |
+
!python scripts/prepare_cifake.py \
|
| 12 |
+
--source /kaggle/input/cifake-real-and-ai-generated-synthetic-images \
|
| 13 |
+
--output /kaggle/working/processed/fingerprint \
|
| 14 |
+
--max_per_class 20000
|
| 15 |
+
"""
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import logging
|
| 20 |
+
import random
|
| 21 |
+
import shutil
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
|
| 24 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
| 25 |
+
log = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
IMG_EXTS = {".jpg", ".jpeg", ".png"}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def main(args: argparse.Namespace) -> None:
|
| 31 |
+
source = Path(args.source)
|
| 32 |
+
output = Path(args.output)
|
| 33 |
+
rng = random.Random(args.seed)
|
| 34 |
+
|
| 35 |
+
if not source.exists():
|
| 36 |
+
log.error(f"Source not found: {source}")
|
| 37 |
+
return
|
| 38 |
+
|
| 39 |
+
for split in ["train", "test"]:
|
| 40 |
+
for label, is_fake in [("REAL", "real"), ("FAKE", "fake")]:
|
| 41 |
+
src_dir = source / split / label
|
| 42 |
+
if not src_dir.exists():
|
| 43 |
+
src_dir = source / label
|
| 44 |
+
if not src_dir.exists():
|
| 45 |
+
log.warning(f" Not found: {src_dir}")
|
| 46 |
+
continue
|
| 47 |
+
|
| 48 |
+
imgs = [p for p in src_dir.iterdir() if p.suffix.lower() in IMG_EXTS]
|
| 49 |
+
rng.shuffle(imgs)
|
| 50 |
+
imgs = imgs[:args.max_per_class]
|
| 51 |
+
|
| 52 |
+
out_split = "train" if split == "train" else "val"
|
| 53 |
+
dst_dir = output / out_split / is_fake
|
| 54 |
+
dst_dir.mkdir(parents=True, exist_ok=True)
|
| 55 |
+
|
| 56 |
+
for img in imgs:
|
| 57 |
+
dst = dst_dir / f"cifake_{img.name}"
|
| 58 |
+
if not dst.exists():
|
| 59 |
+
shutil.copy2(img, dst)
|
| 60 |
+
|
| 61 |
+
log.info(f" cifake/{split}/{label} → {out_split}/{is_fake}: {len(imgs)} images")
|
| 62 |
+
|
| 63 |
+
log.info("CIFAKE preparation complete.")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def parse_args():
|
| 67 |
+
p = argparse.ArgumentParser()
|
| 68 |
+
p.add_argument("--source", default="/kaggle/input/cifake-real-and-ai-generated-synthetic-images")
|
| 69 |
+
p.add_argument("--output", default="/kaggle/working/processed/fingerprint")
|
| 70 |
+
p.add_argument("--max_per_class", type=int, default=20000)
|
| 71 |
+
p.add_argument("--seed", type=int, default=42)
|
| 72 |
+
return p.parse_args()
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
main(parse_args())
|
scripts/prepare_cocoxgen.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scripts/prepare_cocoxgen.py
|
| 3 |
+
|
| 4 |
+
Prepares COCO-XGen dataset for fingerprint engine training.
|
| 5 |
+
Maps COCO real images against XL-generated equivalents for generator attribution training.
|
| 6 |
+
|
| 7 |
+
Kaggle usage:
|
| 8 |
+
!python scripts/prepare_cocoxgen.py \
|
| 9 |
+
--real_source /kaggle/input/coco-2017-dataset/coco2017/val2017 \
|
| 10 |
+
--fake_source /kaggle/input/coco-xgen-synthetic \
|
| 11 |
+
--output /kaggle/working/processed/fingerprint \
|
| 12 |
+
--max 15000
|
| 13 |
+
"""
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import argparse
|
| 17 |
+
import logging
|
| 18 |
+
import random
|
| 19 |
+
import shutil
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
|
| 22 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
| 23 |
+
log = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
IMG_EXTS = {".jpg", ".jpeg", ".png"}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def copy_subset(src_dir: Path, dst_dir: Path, max_n: int, prefix: str, rng: random.Random) -> int:
|
| 29 |
+
dst_dir.mkdir(parents=True, exist_ok=True)
|
| 30 |
+
imgs = [p for p in src_dir.rglob("*") if p.suffix.lower() in IMG_EXTS]
|
| 31 |
+
rng.shuffle(imgs)
|
| 32 |
+
imgs = imgs[:max_n]
|
| 33 |
+
for img in imgs:
|
| 34 |
+
dst = dst_dir / f"{prefix}_{img.name}"
|
| 35 |
+
if not dst.exists():
|
| 36 |
+
shutil.copy2(img, dst)
|
| 37 |
+
return len(imgs)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def main(args: argparse.Namespace) -> None:
|
| 41 |
+
rng = random.Random(args.seed)
|
| 42 |
+
|
| 43 |
+
splits = {"train": int(args.max * 0.9), "val": int(args.max * 0.1)}
|
| 44 |
+
|
| 45 |
+
for split, max_n in splits.items():
|
| 46 |
+
if args.real_source and Path(args.real_source).exists():
|
| 47 |
+
n = copy_subset(
|
| 48 |
+
Path(args.real_source),
|
| 49 |
+
Path(args.output) / split / "real",
|
| 50 |
+
max_n, "coco", rng
|
| 51 |
+
)
|
| 52 |
+
log.info(f" real/{split}: {n} images")
|
| 53 |
+
|
| 54 |
+
if args.fake_source and Path(args.fake_source).exists():
|
| 55 |
+
n = copy_subset(
|
| 56 |
+
Path(args.fake_source),
|
| 57 |
+
Path(args.output) / split / "fake",
|
| 58 |
+
max_n, "xgen", rng
|
| 59 |
+
)
|
| 60 |
+
log.info(f" fake/{split}: {n} images (generator: stable_diffusion/xl)")
|
| 61 |
+
|
| 62 |
+
log.info("COCO-XGen preparation complete.")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def parse_args():
|
| 66 |
+
p = argparse.ArgumentParser()
|
| 67 |
+
p.add_argument("--real_source", default=None)
|
| 68 |
+
p.add_argument("--fake_source", default=None)
|
| 69 |
+
p.add_argument("--output", default="/kaggle/working/processed/fingerprint")
|
| 70 |
+
p.add_argument("--max", type=int, default=15000)
|
| 71 |
+
p.add_argument("--seed", type=int, default=42)
|
| 72 |
+
return p.parse_args()
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
main(parse_args())
|
scripts/prepare_hf_source.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scripts/prepare_hf_source.py
|
| 3 |
+
|
| 4 |
+
Downloads and prepares datasets from HuggingFace Hub for fingerprint engine training.
|
| 5 |
+
Requires: pip install datasets huggingface_hub
|
| 6 |
+
|
| 7 |
+
Kaggle usage (internet must be enabled in notebook settings):
|
| 8 |
+
!pip install datasets huggingface_hub -q
|
| 9 |
+
!python scripts/prepare_hf_source.py \
|
| 10 |
+
--dataset elsaEU/ELSA_1M \
|
| 11 |
+
--split train \
|
| 12 |
+
--output /kaggle/working/processed/fingerprint \
|
| 13 |
+
--max 10000
|
| 14 |
+
"""
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
import logging
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
| 22 |
+
log = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def main(args: argparse.Namespace) -> None:
|
| 26 |
+
try:
|
| 27 |
+
from datasets import load_dataset
|
| 28 |
+
except ImportError:
|
| 29 |
+
log.error("datasets not installed. Run: pip install datasets")
|
| 30 |
+
return
|
| 31 |
+
|
| 32 |
+
log.info(f"Loading {args.dataset} (split={args.split}, max={args.max}) ...")
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
ds = load_dataset(args.dataset, split=args.split, streaming=True)
|
| 36 |
+
except Exception as e:
|
| 37 |
+
log.error(f"Failed to load dataset: {e}")
|
| 38 |
+
return
|
| 39 |
+
|
| 40 |
+
output = Path(args.output)
|
| 41 |
+
real_dir = output / "train" / "real"
|
| 42 |
+
fake_dir = output / "train" / "fake"
|
| 43 |
+
real_dir.mkdir(parents=True, exist_ok=True)
|
| 44 |
+
fake_dir.mkdir(parents=True, exist_ok=True)
|
| 45 |
+
|
| 46 |
+
n_saved = 0
|
| 47 |
+
for i, sample in enumerate(ds):
|
| 48 |
+
if n_saved >= args.max:
|
| 49 |
+
break
|
| 50 |
+
|
| 51 |
+
img = sample.get("image") or sample.get("img")
|
| 52 |
+
label = sample.get("label", sample.get("is_fake", 0))
|
| 53 |
+
|
| 54 |
+
if img is None:
|
| 55 |
+
continue
|
| 56 |
+
|
| 57 |
+
dst_dir = fake_dir if label else real_dir
|
| 58 |
+
dst = dst_dir / f"hf_{i:07d}.jpg"
|
| 59 |
+
if not dst.exists():
|
| 60 |
+
img.save(dst, format="JPEG", quality=95)
|
| 61 |
+
n_saved += 1
|
| 62 |
+
|
| 63 |
+
if n_saved % 1000 == 0:
|
| 64 |
+
log.info(f" Saved {n_saved}/{args.max} ...")
|
| 65 |
+
|
| 66 |
+
log.info(f"HuggingFace source preparation complete. {n_saved} images saved.")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def parse_args():
|
| 70 |
+
p = argparse.ArgumentParser()
|
| 71 |
+
p.add_argument("--dataset", default="elsaEU/ELSA_1M")
|
| 72 |
+
p.add_argument("--split", default="train")
|
| 73 |
+
p.add_argument("--output", default="/kaggle/working/processed/fingerprint")
|
| 74 |
+
p.add_argument("--max", type=int, default=10000)
|
| 75 |
+
return p.parse_args()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
if __name__ == "__main__":
|
| 79 |
+
main(parse_args())
|
scripts/prepare_ifakefakedb.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
scripts/prepare_ifakefakedb.py
|
| 3 |
+
|
| 4 |
+
Prepares the iFakeFaceDB dataset for fingerprint engine training.
|
| 5 |
+
Kaggle slug: tapakah68/artificial-faces-dataset or similar.
|
| 6 |
+
|
| 7 |
+
iFakeFaceDB contains ~87k StyleGAN-generated fake faces, useful for
|
| 8 |
+
increasing unknown_gan class coverage.
|
| 9 |
+
|
| 10 |
+
Kaggle usage:
|
| 11 |
+
!python scripts/prepare_ifakefakedb.py \
|
| 12 |
+
--source /kaggle/input/artificial-faces-dataset \
|
| 13 |
+
--output /kaggle/working/processed/fingerprint \
|
| 14 |
+
--max 20000
|
| 15 |
+
"""
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import logging
|
| 20 |
+
import random
|
| 21 |
+
import shutil
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
|
| 24 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
| 25 |
+
log = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
IMG_EXTS = {".jpg", ".jpeg", ".png"}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def main(args: argparse.Namespace) -> None:
|
| 31 |
+
source = Path(args.source)
|
| 32 |
+
if not source.exists():
|
| 33 |
+
log.error(f"Source not found: {source}")
|
| 34 |
+
return
|
| 35 |
+
|
| 36 |
+
rng = random.Random(args.seed)
|
| 37 |
+
imgs = [p for p in source.rglob("*") if p.suffix.lower() in IMG_EXTS]
|
| 38 |
+
rng.shuffle(imgs)
|
| 39 |
+
imgs = imgs[:args.max]
|
| 40 |
+
|
| 41 |
+
n_train = int(len(imgs) * 0.85)
|
| 42 |
+
splits = {"train": imgs[:n_train], "val": imgs[n_train:]}
|
| 43 |
+
|
| 44 |
+
for split, subset in splits.items():
|
| 45 |
+
dst_dir = Path(args.output) / split / "fake"
|
| 46 |
+
dst_dir.mkdir(parents=True, exist_ok=True)
|
| 47 |
+
for img in subset:
|
| 48 |
+
dst = dst_dir / f"ifake_{img.name}"
|
| 49 |
+
if not dst.exists():
|
| 50 |
+
shutil.copy2(img, dst)
|
| 51 |
+
log.info(f" {split}/fake: {len(subset)} images (generator: unknown_gan / StyleGAN)")
|
| 52 |
+
|
| 53 |
+
log.info("iFakeFaceDB preparation complete.")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def parse_args():
|
| 57 |
+
p = argparse.ArgumentParser()
|
| 58 |
+
p.add_argument("--source", default="/kaggle/input/artificial-faces-dataset")
|
| 59 |
+
p.add_argument("--output", default="/kaggle/working/processed/fingerprint")
|
| 60 |
+
p.add_argument("--max", type=int, default=20000)
|
| 61 |
+
p.add_argument("--seed", type=int, default=42)
|
| 62 |
+
return p.parse_args()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
main(parse_args())
|